From f31e6e1926d32bf31b68beb39a4e6151beb1efd7 Mon Sep 17 00:00:00 2001 From: gayang Date: Thu, 9 Sep 2021 09:19:03 +0000 Subject: [PATCH 001/121] add response_trailer_prefix for bandwidth limit filter --- .../http/bandwidth_limit/v3alpha/bandwidth_limit.proto | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto index 4cd5f8268b704..928231c4edcfb 100644 --- a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto +++ b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto @@ -67,4 +67,14 @@ message BandwidthLimit { // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults // to enabled. config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; + + // Optional the prefix for the response trailers of bandwidth decode/encode delays. + // If not set, use the default value "bandwidth-request-delay-ms" or "bandwidth-response-delay-ms". + // If set, the trailer name will be set as: + // request: response_trailer_prefix + "-bandwidth-request-delay-ms" + // Delay time it took for the request stream transfer. + // response: response_trailer_prefix + "-bandwidth-response-delay-ms" + // Delay time it took for the response stream transfer. + // If EnableMode is Disabled or Decode or delay time = 0, the trailer will not be set. + string response_trailer_prefix = 6; } From 0a1f752dd4f788b6d0899f9fffed83b0155f445b Mon Sep 17 00:00:00 2001 From: gayang Date: Thu, 9 Sep 2021 09:23:38 +0000 Subject: [PATCH 002/121] fix comment --- .../filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto index 928231c4edcfb..2395402fd1ab6 100644 --- a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto +++ b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto @@ -69,7 +69,7 @@ message BandwidthLimit { config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; // Optional the prefix for the response trailers of bandwidth decode/encode delays. - // If not set, use the default value "bandwidth-request-delay-ms" or "bandwidth-response-delay-ms". + // If not set, use the default value "bandwidth-request-delay-ms" and "bandwidth-response-delay-ms". // If set, the trailer name will be set as: // request: response_trailer_prefix + "-bandwidth-request-delay-ms" // Delay time it took for the request stream transfer. From 4ed93adc9b347d4b728a4008f1483879abb4d103 Mon Sep 17 00:00:00 2001 From: gayang Date: Sun, 26 Sep 2021 14:10:04 +0000 Subject: [PATCH 003/121] add response trailers for bandwidth limit filter Signed-off-by: gayang --- .../v3alpha/bandwidth_limit.proto | 2 +- .../http_filters/bandwidth_limit_filter.rst | 10 ++- docs/root/version_history/current.rst | 1 + .../http/bandwidth_limit/bandwidth_limit.cc | 51 +++++++++-- .../http/bandwidth_limit/bandwidth_limit.h | 16 +++- .../http/common/stream_rate_limiter.cc | 13 ++- .../filters/http/common/stream_rate_limiter.h | 7 +- .../filters/http/fault/fault_filter.cc | 2 +- .../http/bandwidth_limit/config_test.cc | 6 ++ .../http/bandwidth_limit/filter_test.cc | 88 +++++++++++++------ .../http/common/stream_rate_limiter_test.cc | 4 +- 11 files changed, 149 insertions(+), 51 deletions(-) diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto index 2395402fd1ab6..c47878863195f 100644 --- a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto +++ b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto @@ -20,7 +20,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Bandwidth limit :ref:`configuration overview `. // [#extension: envoy.filters.http.bandwidth_limit] -// [#next-free-field: 6] +// [#next-free-field: 7] message BandwidthLimit { // Defines the mode for the bandwidth limit filter. // Values represent bitmask. diff --git a/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst b/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst index 4576e9d3ac36a..5c032496b723a 100644 --- a/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst +++ b/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst @@ -42,14 +42,16 @@ The HTTP bandwidth limit filter outputs statistics in the ``.http_b :widths: 1, 1, 2 request_enabled, Counter, Total number of request streams for which the bandwidth limiter was consulted + request_enforced, Counter, Total number of request streams for which the bandwidth limiter was enforced request_pending, GAUGE, Number of request streams which are currently pending transfer in bandwidth limiter - request_incoming_size, GAUGE, Size in bytes of incoming request data to bandwidth limiter - request_allowed_size, GAUGE, Size in bytes of outgoing request data from bandwidth limiter + request_incoming_size, Counter, Size in bytes of incoming request data to bandwidth limiter + request_allowed_size, Counter, Size in bytes of outgoing request data from bandwidth limiter request_transfer_duration, HISTOGRAM, Total time (including added delay) it took for the request stream transfer response_enabled, Counter, Total number of response streams for which the bandwidth limiter was consulted + response_enforced, Counter, Total number of response streams for which the bandwidth limiter was enforced response_pending, GAUGE, Number of response streams which are currently pending transfer in bandwidth limiter - response_incoming_size, GAUGE, Size in bytes of incoming response data to bandwidth limiter - response_allowed_size, GAUGE, Size in bytes of outgoing response data from bandwidth limiter + response_incoming_size, Counter, Size in bytes of incoming response data to bandwidth limiter + response_allowed_size, Counter, Size in bytes of outgoing response data from bandwidth limiter response_transfer_duration, HISTOGRAM, Total time (including added delay) it took for the response stream transfer .. _config_http_filters_bandwidth_limit_runtime: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index c8e227d34b58d..ef5c7170501c4 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -66,6 +66,7 @@ Minor Behavior Changes information. * listener: destroy per network filter chain stats when a network filter chain is removed during the listener in place update. * quic: enables IETF connection migration. This feature requires stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. +* bandwidth_limit: added response trailers when request or response delay are enforced. Bug Fixes --------- diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc index 6d831d6de8903..036122b45bcbc 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc @@ -16,6 +16,11 @@ namespace Extensions { namespace HttpFilters { namespace BandwidthLimitFilter { +namespace { + const Http::LowerCaseString DefaultRequestDelayTrailer = Http::LowerCaseString("bandwidth-request-delay-ms"); + const Http::LowerCaseString DefaultResponseDelayTrailer = Http::LowerCaseString("bandwidth-response-delay-ms"); +} + FilterConfig::FilterConfig(const BandwidthLimit& config, Stats::Scope& scope, Runtime::Loader& runtime, TimeSource& time_source, bool per_route) : runtime_(runtime), time_source_(time_source), enable_mode_(config.enable_mode()), @@ -23,7 +28,11 @@ FilterConfig::FilterConfig(const BandwidthLimit& config, Stats::Scope& scope, fill_interval_(std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT( config, fill_interval, StreamRateLimiter::DefaultFillInterval.count()))), enabled_(config.runtime_enabled(), runtime), - stats_(generateStats(config.stat_prefix(), scope)) { + stats_(generateStats(config.stat_prefix(), scope)), + request_delay_trailer_(config.response_trailer_prefix().empty() ? DefaultRequestDelayTrailer + : Http::LowerCaseString(config.response_trailer_prefix() + "-" + DefaultRequestDelayTrailer.get())), + response_delay_trailer_(config.response_trailer_prefix().empty() ? DefaultResponseDelayTrailer + : Http::LowerCaseString(config.response_trailer_prefix() + "-" + DefaultResponseDelayTrailer.get())) { if (per_route && !config.has_limit_kbps()) { throw EnvoyException("bandwidthlimitfilter: limit must be set for per route filter config"); } @@ -64,7 +73,10 @@ Http::FilterHeadersStatus BandwidthLimiter::decodeHeaders(Http::RequestHeaderMap updateStatsOnDecodeFinish(); decoder_callbacks_->continueDecoding(); }, - [config](uint64_t len) { config.stats().request_allowed_size_.set(len); }, + [&config](uint64_t len, bool limit_enforced) { + config.stats().request_allowed_size_.add(len); + if (limit_enforced) { config.stats().request_enforced_.inc(); } + }, const_cast(&config)->timeSource(), decoder_callbacks_->dispatcher(), decoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval()); } @@ -82,7 +94,7 @@ Http::FilterDataStatus BandwidthLimiter::decodeData(Buffer::Instance& data, bool const_cast(&config)->timeSource()); config.stats().request_pending_.inc(); } - config.stats().request_incoming_size_.set(data.length()); + config.stats().request_incoming_size_.add(data.length()); request_limiter_->writeData(data, end_stream); return Http::FilterDataStatus::StopIterationNoBuffer; @@ -123,7 +135,10 @@ Http::FilterHeadersStatus BandwidthLimiter::encodeHeaders(Http::ResponseHeaderMa updateStatsOnEncodeFinish(); encoder_callbacks_->continueEncoding(); }, - [config](uint64_t len) { config.stats().response_allowed_size_.set(len); }, + [&config](uint64_t len, bool limit_enforced) { + config.stats().response_allowed_size_.add(len); + if (limit_enforced) { config.stats().response_enforced_.inc(); } + }, const_cast(&config)->timeSource(), encoder_callbacks_->dispatcher(), encoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval()); } @@ -135,23 +150,33 @@ Http::FilterDataStatus BandwidthLimiter::encodeData(Buffer::Instance& data, bool if (response_limiter_ != nullptr) { const auto& config = getConfig(); + // Adds encoded trailers. May only be called in encodeData when end_stream is set to true. + // If upstream has trailers, addEncodedTrailers won't be called + bool trailer_added = false; + if (end_stream) { + trailers = &encoder_callbacks_->addEncodedTrailers(); + trailer_added = true; + } + if (!response_latency_) { response_latency_ = std::make_unique( config.stats().response_transfer_duration_, const_cast(&config)->timeSource()); config.stats().response_pending_.inc(); } - config.stats().response_incoming_size_.set(data.length()); + config.stats().response_incoming_size_.add(data.length()); - response_limiter_->writeData(data, end_stream); + response_limiter_->writeData(data, end_stream, trailer_added); return Http::FilterDataStatus::StopIterationNoBuffer; } ENVOY_LOG(debug, "BandwidthLimiter : response_limiter not set"); return Http::FilterDataStatus::Continue; } -Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap&) { +Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap& responseTrailers) { if (response_limiter_ != nullptr) { + trailers = &responseTrailers; + if (response_limiter_->onTrailers()) { return Http::FilterTrailersStatus::StopIteration; } else { @@ -164,6 +189,7 @@ Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTraile void BandwidthLimiter::updateStatsOnDecodeFinish() { if (request_latency_) { + request_duration_ = request_latency_.get()->elapsed().count(); request_latency_->complete(); request_latency_.reset(); getConfig().stats().request_pending_.dec(); @@ -172,9 +198,18 @@ void BandwidthLimiter::updateStatsOnDecodeFinish() { void BandwidthLimiter::updateStatsOnEncodeFinish() { if (response_latency_) { + const auto& config = getConfig(); + + auto response_duration = response_latency_.get()->elapsed().count(); + if (trailers != nullptr && request_duration_ > 0) { + trailers->setCopy(config.request_delay_trailer(), std::to_string(request_duration_)); + } + if (trailers != nullptr && response_duration > 0) { + trailers->setCopy(config.response_delay_trailer(), std::to_string(response_duration)); + } response_latency_->complete(); response_latency_.reset(); - getConfig().stats().response_pending_.dec(); + config.stats().response_pending_.dec(); } } diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h index f5bac46426425..40ba10e45ef72 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h @@ -32,12 +32,14 @@ namespace BandwidthLimitFilter { #define ALL_BANDWIDTH_LIMIT_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(request_enabled) \ COUNTER(response_enabled) \ + COUNTER(request_enforced) \ + COUNTER(response_enforced) \ GAUGE(request_pending, Accumulate) \ GAUGE(response_pending, Accumulate) \ - GAUGE(request_incoming_size, Accumulate) \ - GAUGE(response_incoming_size, Accumulate) \ - GAUGE(request_allowed_size, Accumulate) \ - GAUGE(response_allowed_size, Accumulate) \ + COUNTER(request_incoming_size) \ + COUNTER(response_incoming_size) \ + COUNTER(request_allowed_size) \ + COUNTER(response_allowed_size) \ HISTOGRAM(request_transfer_duration, Milliseconds) \ HISTOGRAM(response_transfer_duration, Milliseconds) @@ -71,6 +73,8 @@ class FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig { EnableMode enableMode() const { return enable_mode_; }; const std::shared_ptr tokenBucket() const { return token_bucket_; } std::chrono::milliseconds fillInterval() const { return fill_interval_; } + const Http::LowerCaseString& request_delay_trailer() const { return request_delay_trailer_; } + const Http::LowerCaseString& response_delay_trailer() const { return response_delay_trailer_; } private: friend class FilterTest; @@ -86,6 +90,8 @@ class FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig { mutable BandwidthLimitStats stats_; // Filter chain's shared token bucket std::shared_ptr token_bucket_; + const Http::LowerCaseString request_delay_trailer_; + const Http::LowerCaseString response_delay_trailer_; }; using FilterConfigSharedPtr = std::shared_ptr; @@ -142,6 +148,8 @@ class BandwidthLimiter : public Http::StreamFilter, Logger::Loggable response_limiter_; Stats::TimespanPtr request_latency_; Stats::TimespanPtr response_latency_; + uint64_t request_duration_ = 0; + Http::ResponseTrailerMap* trailers; }; } // namespace BandwidthLimitFilter diff --git a/source/extensions/filters/http/common/stream_rate_limiter.cc b/source/extensions/filters/http/common/stream_rate_limiter.cc index 6763adbeb2414..aca234224c430 100644 --- a/source/extensions/filters/http/common/stream_rate_limiter.cc +++ b/source/extensions/filters/http/common/stream_rate_limiter.cc @@ -17,7 +17,7 @@ StreamRateLimiter::StreamRateLimiter( uint64_t max_kbps, uint64_t max_buffered_data, std::function pause_data_cb, std::function resume_data_cb, std::function write_data_cb, std::function continue_cb, - std::function write_stats_cb, TimeSource& time_source, + std::function write_stats_cb, TimeSource& time_source, Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope, std::shared_ptr token_bucket, std::chrono::milliseconds fill_interval) : fill_interval_(std::move(fill_interval)), write_data_cb_(write_data_cb), @@ -63,7 +63,7 @@ void StreamRateLimiter::onTokenTimer() { // Move the data to write into the output buffer with as little copying as possible. // NOTE: This might be moving zero bytes, but that should work fine. data_to_write.move(buffer_, bytes_to_write); - write_stats_cb_(bytes_to_write); + write_stats_cb_(bytes_to_write, buffer_.length() > 0); // If the buffer still contains data in it, we couldn't get enough tokens, so schedule the next // token available time. @@ -88,10 +88,17 @@ void StreamRateLimiter::onTokenTimer() { } } -void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream) { +void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream, bool trailer_added) { auto len = incoming_buffer.length(); buffer_.move(incoming_buffer); saw_end_stream_ = end_stream; + // If trailer_added is true, set saw_trailers_ to true to continue encode trailers, added + // after buffer_.move to ensure buffer has data and won't invoke continue_cb_ before + // processing the data in last data frame. + if (trailer_added) { + saw_trailers_ = true; + } + ENVOY_LOG(debug, "StreamRateLimiter : got new {} bytes of data. token " "timer {} scheduled.", diff --git a/source/extensions/filters/http/common/stream_rate_limiter.h b/source/extensions/filters/http/common/stream_rate_limiter.h index b8aed9ac8a72a..84035c26a6c0a 100644 --- a/source/extensions/filters/http/common/stream_rate_limiter.h +++ b/source/extensions/filters/http/common/stream_rate_limiter.h @@ -49,7 +49,8 @@ class StreamRateLimiter : Logger::Loggable { StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_data, std::function pause_data_cb, std::function resume_data_cb, std::function write_data_cb, - std::function continue_cb, std::function write_stats_cb, + std::function continue_cb, + std::function write_stats_cb, TimeSource& time_source, Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope, std::shared_ptr token_bucket = nullptr, @@ -59,7 +60,7 @@ class StreamRateLimiter : Logger::Loggable { * Called by the stream to write data. All data writes happen asynchronously, the stream should * be stopped after this call (all data will be drained from incoming_buffer). */ - void writeData(Buffer::Instance& incoming_buffer, bool end_stream); + void writeData(Buffer::Instance& incoming_buffer, bool end_stream, bool trailer_added = false); /** * Called if the stream receives trailers. @@ -83,7 +84,7 @@ class StreamRateLimiter : Logger::Loggable { const std::chrono::milliseconds fill_interval_; const std::function write_data_cb_; const std::function continue_cb_; - const std::function write_stats_cb_; + const std::function write_stats_cb_; const ScopeTrackedObject& scope_; std::shared_ptr token_bucket_; Event::TimerPtr token_timer_; diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index 7499e47dd5b8c..809b396b2b0fc 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -212,7 +212,7 @@ void FaultFilter::maybeSetupResponseRateLimit(const Http::RequestHeaderMap& requ encoder_callbacks_->injectEncodedDataToFilterChain(data, end_stream); }, [this] { encoder_callbacks_->continueEncoding(); }, - [](uint64_t) { + [](uint64_t, bool) { // write stats callback. }, config_->timeSource(), decoder_callbacks_->dispatcher(), decoder_callbacks_->scope()); diff --git a/test/extensions/filters/http/bandwidth_limit/config_test.cc b/test/extensions/filters/http/bandwidth_limit/config_test.cc index b98ea7ca4d67f..6c173ad17a5bc 100644 --- a/test/extensions/filters/http/bandwidth_limit/config_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/config_test.cc @@ -38,6 +38,7 @@ TEST(Factory, RouteSpecificFilterConfig) { enable_mode: REQUEST_AND_RESPONSE limit_kbps: 10 fill_interval: 0.1s + response_trailer_prefix: test )"; BandwidthLimitFilterConfig factory; @@ -54,6 +55,8 @@ TEST(Factory, RouteSpecificFilterConfig) { EXPECT_EQ(config->fillInterval().count(), 100); EXPECT_EQ(config->enableMode(), EnableMode::BandwidthLimit_EnableMode_REQUEST_AND_RESPONSE); EXPECT_FALSE(config->tokenBucket() == nullptr); + EXPECT_EQ(const_cast(config)->request_delay_trailer(), Http::LowerCaseString("test-bandwidth-request-delay-ms")); + EXPECT_EQ(const_cast(config)->response_delay_trailer(), Http::LowerCaseString("test-bandwidth-response-delay-ms")); } TEST(Factory, RouteSpecificFilterConfigDisabledByDefault) { @@ -97,6 +100,9 @@ TEST(Factory, RouteSpecificFilterConfigDefaultFillInterval) { const auto* config = dynamic_cast(route_config.get()); EXPECT_EQ(config->limit(), 10); EXPECT_EQ(config->fillInterval().count(), 50); + //default trailers + EXPECT_EQ(const_cast(config)->request_delay_trailer(), Http::LowerCaseString("bandwidth-request-delay-ms")); + EXPECT_EQ(const_cast(config)->response_delay_trailer(), Http::LowerCaseString("bandwidth-response-delay-ms")); } TEST(Factory, PerRouteConfigNoLimits) { diff --git a/test/extensions/filters/http/bandwidth_limit/filter_test.cc b/test/extensions/filters/http/bandwidth_limit/filter_test.cc index daffe9076ea34..3977f0fb5ad3d 100644 --- a/test/extensions/filters/http/bandwidth_limit/filter_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/filter_test.cc @@ -11,6 +11,7 @@ using testing::_; using testing::AnyNumber; using testing::NiceMock; using testing::Return; +using testing::ReturnRef; namespace Envoy { namespace Extensions { @@ -57,6 +58,7 @@ class FilterTest : public testing::Test { Http::TestResponseTrailerMapImpl response_trailers_; Buffer::OwnedImpl data_; Event::SimulatedTimeSystem time_system_; + Http::TestResponseTrailerMapImpl trailers_; }; TEST_F(FilterTest, Disabled) { @@ -75,11 +77,14 @@ TEST_F(FilterTest, Disabled) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); EXPECT_EQ(0U, findCounter("test.http_bandwidth_limit.request_enabled")); + EXPECT_EQ(0U, findCounter("test.http_bandwidth_limit.request_enforced")); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); EXPECT_EQ(0U, findCounter("test.http_bandwidth_limit.response_enabled")); + EXPECT_EQ(false, response_trailers_.has("bandwidth-request-delay-ms")); + EXPECT_EQ(false, response_trailers_.has("bandwidth-response-delay-ms")); } TEST_F(FilterTest, LimitOnDecode) { @@ -90,6 +95,7 @@ TEST_F(FilterTest, LimitOnDecode) { runtime_key: foo_key enable_mode: REQUEST limit_kbps: 1 + response_trailer_prefix: test )"; setup(fmt::format(config_yaml, "1")); @@ -107,11 +113,12 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data1, false)); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.request_pending")); - EXPECT_EQ(5, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(5, findCounter("test.http_bandwidth_limit.request_incoming_size")); EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual("hello"), false)); token_timer->invokeCallback(); - EXPECT_EQ(5, findGauge("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(0, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(5, findCounter("test.http_bandwidth_limit.request_allowed_size")); // Advance time by 1s which should refill all tokens. time_system_.advanceTimeWait(std::chrono::seconds(1)); @@ -122,15 +129,16 @@ TEST_F(FilterTest, LimitOnDecode) { Buffer::OwnedImpl data2(std::string(1126, 'a')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data2, false)); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.request_pending")); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.request_incoming_size")); EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(50), _)); EXPECT_CALL(decoder_filter_callbacks_, onDecoderFilterBelowWriteBufferLowWatermark()); EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(1024, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.request_allowed_size")); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(1, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(1029, findCounter("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.request_incoming_size")); // Fire timer, also advance time. time_system_.advanceTimeWait(std::chrono::milliseconds(50)); @@ -138,14 +146,15 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(51, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.request_allowed_size")); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(2, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(1080, findCounter("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.request_incoming_size")); // Get new data with current data buffered, not end_stream. Buffer::OwnedImpl data3(std::string(51, 'b')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data3, false)); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.request_pending")); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(1182, findCounter("test.http_bandwidth_limit.request_incoming_size")); // Fire timer, also advance time. time_system_.advanceTimeWait(std::chrono::milliseconds(50)); @@ -153,7 +162,8 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(51, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.request_allowed_size")); // Fire timer, also advance time. No timer enable because there is nothing // buffered. @@ -161,7 +171,8 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(51, 'b')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(1182, findCounter("test.http_bandwidth_limit.request_allowed_size")); // Advance time by 1s for a full refill. time_system_.advanceTimeWait(std::chrono::seconds(1)); @@ -171,12 +182,15 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); Buffer::OwnedImpl data4(std::string(1024, 'c')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data4, true)); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(2206, findCounter("test.http_bandwidth_limit.request_incoming_size")); EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(1024, 'c')), true)); token_timer->invokeCallback(); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(2206, findCounter("test.http_bandwidth_limit.request_allowed_size")); EXPECT_EQ(0, findGauge("test.http_bandwidth_limit.request_pending")); + EXPECT_EQ(false, response_trailers_.has("test-bandwidth-request-delay-ms")); + EXPECT_EQ(false, response_trailers_.has("test-bandwidth-response-delay-ms")); filter_->onDestroy(); } @@ -189,10 +203,12 @@ TEST_F(FilterTest, LimitOnEncode) { runtime_key: foo_key enable_mode: RESPONSE limit_kbps: 1 + response_trailer_prefix: test )"; setup(fmt::format(config_yaml, "1")); ON_CALL(encoder_filter_callbacks_, encoderBufferLimit()).WillByDefault(Return(1100)); + ON_CALL(encoder_filter_callbacks_, addEncodedTrailers()).WillByDefault(ReturnRef(trailers_)); Event::MockTimer* token_timer = new NiceMock(&encoder_filter_callbacks_.dispatcher_); @@ -211,11 +227,12 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data1, false)); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.response_pending")); - EXPECT_EQ(5, findGauge("test.http_bandwidth_limit.response_incoming_size")); + EXPECT_EQ(5, findCounter("test.http_bandwidth_limit.response_incoming_size")); EXPECT_CALL(encoder_filter_callbacks_, injectEncodedDataToFilterChain(BufferStringEqual("hello"), false)); token_timer->invokeCallback(); - EXPECT_EQ(5, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(0, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(5, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Advance time by 1s which should refill all tokens. time_system_.advanceTimeWait(std::chrono::seconds(1)); @@ -225,7 +242,7 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); Buffer::OwnedImpl data2(std::string(1126, 'a')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data2, false)); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.response_incoming_size")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.response_incoming_size")); EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(50), _)); EXPECT_CALL(encoder_filter_callbacks_, onEncoderFilterBelowWriteBufferLowWatermark()); @@ -233,8 +250,9 @@ TEST_F(FilterTest, LimitOnEncode) { injectEncodedDataToFilterChain(BufferStringEqual(std::string(1024, 'a')), false)); token_timer->invokeCallback(); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.response_pending")); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.response_incoming_size")); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(1, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.response_incoming_size")); + EXPECT_EQ(1029, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Fire timer, also advance time. time_system_.advanceTimeWait(std::chrono::milliseconds(50)); @@ -242,7 +260,8 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(encoder_filter_callbacks_, injectEncodedDataToFilterChain(BufferStringEqual(std::string(51, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(2, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(1080, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Get new data with current data buffered, not end_stream. Buffer::OwnedImpl data3(std::string(51, 'b')); @@ -254,7 +273,8 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(encoder_filter_callbacks_, injectEncodedDataToFilterChain(BufferStringEqual(std::string(51, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Fire timer, also advance time. No time enable because there is nothing // buffered. @@ -262,7 +282,8 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(encoder_filter_callbacks_, injectEncodedDataToFilterChain(BufferStringEqual(std::string(51, 'b')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(1182, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Advance time by 1s for a full refill. time_system_.advanceTimeWait(std::chrono::seconds(1)); @@ -272,12 +293,16 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); Buffer::OwnedImpl data4(std::string(1024, 'c')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data4, true)); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.response_incoming_size")); + EXPECT_EQ(2206, findCounter("test.http_bandwidth_limit.response_incoming_size")); EXPECT_CALL(encoder_filter_callbacks_, - injectEncodedDataToFilterChain(BufferStringEqual(std::string(1024, 'c')), true)); + injectEncodedDataToFilterChain(BufferStringEqual(std::string(1024, 'c')), false)); token_timer->invokeCallback(); EXPECT_EQ(0, findGauge("test.http_bandwidth_limit.response_pending")); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(2206, findCounter("test.http_bandwidth_limit.response_allowed_size")); + + EXPECT_EQ(false, response_trailers_.has("test-bandwidth-request-delay-ms")); + EXPECT_EQ("2150", trailers_.get_("test-bandwidth-response-delay-ms")); filter_->onDestroy(); } @@ -290,11 +315,13 @@ TEST_F(FilterTest, LimitOnDecodeAndEncode) { runtime_key: foo_key enable_mode: REQUEST_AND_RESPONSE limit_kbps: 1 + response_trailer_prefix: test )"; setup(fmt::format(config_yaml, "1")); ON_CALL(decoder_filter_callbacks_, decoderBufferLimit()).WillByDefault(Return(1050)); ON_CALL(encoder_filter_callbacks_, encoderBufferLimit()).WillByDefault(Return(1100)); + ON_CALL(encoder_filter_callbacks_, addEncodedTrailers()).WillByDefault(ReturnRef(trailers_)); Event::MockTimer* request_timer = new NiceMock(&decoder_filter_callbacks_.dispatcher_); Event::MockTimer* response_timer = @@ -403,9 +430,13 @@ TEST_F(FilterTest, LimitOnDecodeAndEncode) { EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(51, 'd')), true)); EXPECT_CALL(encoder_filter_callbacks_, - injectEncodedDataToFilterChain(BufferStringEqual(std::string(960, 'e')), true)); - response_timer->invokeCallback(); + injectEncodedDataToFilterChain(BufferStringEqual(std::string(960, 'e')), false)); + EXPECT_CALL(encoder_filter_callbacks_, continueEncoding()); + request_timer->invokeCallback(); + response_timer->invokeCallback(); + EXPECT_EQ("2200", trailers_.get_("test-bandwidth-request-delay-ms")); + EXPECT_EQ("2200", trailers_.get_("test-bandwidth-response-delay-ms")); filter_->onDestroy(); } @@ -418,6 +449,7 @@ TEST_F(FilterTest, WithTrailers) { runtime_key: foo_key enable_mode: REQUEST_AND_RESPONSE limit_kbps: 1 + response_trailer_prefix: test )"; setup(fmt::format(config_yaml, "1")); @@ -479,6 +511,9 @@ TEST_F(FilterTest, WithTrailers) { injectEncodedDataToFilterChain(BufferStringEqual(std::string(5, 'e')), false)); response_timer->invokeCallback(); EXPECT_EQ(0, findGauge("test.http_bandwidth_limit.response_pending")); + + EXPECT_EQ("50", response_trailers_.get_("test-bandwidth-request-delay-ms")); + EXPECT_EQ("150", response_trailers_.get_("test-bandwidth-response-delay-ms")); } TEST_F(FilterTest, WithTrailersNoEndStream) { @@ -550,6 +585,9 @@ TEST_F(FilterTest, WithTrailersNoEndStream) { EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.response_pending")); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); EXPECT_EQ(0, findGauge("test.http_bandwidth_limit.response_pending")); + + EXPECT_EQ("50", response_trailers_.get_("bandwidth-request-delay-ms")); + EXPECT_EQ("150", response_trailers_.get_("bandwidth-response-delay-ms")); } } // namespace BandwidthLimitFilter diff --git a/test/extensions/filters/http/common/stream_rate_limiter_test.cc b/test/extensions/filters/http/common/stream_rate_limiter_test.cc index 97f18f5957ca1..7fc0aeca1c66a 100644 --- a/test/extensions/filters/http/common/stream_rate_limiter_test.cc +++ b/test/extensions/filters/http/common/stream_rate_limiter_test.cc @@ -40,7 +40,7 @@ class StreamRateLimiterTest : public testing::Test { decoder_callbacks_.injectDecodedDataToFilterChain(data, end_stream); }, [this] { decoder_callbacks_.continueDecoding(); }, - [](uint64_t /*len*/) { + [](uint64_t /*len*/, bool) { // config->stats().decode_allowed_size_.set(len); }, time_system_, decoder_callbacks_.dispatcher_, decoder_callbacks_.scope(), token_bucket, @@ -59,7 +59,7 @@ class StreamRateLimiterTest : public testing::Test { decoder_callbacks_.injectDecodedDataToFilterChain(data, end_stream); }, [this] { decoder_callbacks_.continueDecoding(); }, - [](uint64_t /*len*/) { + [](uint64_t /*len*/, bool) { // config->stats().decode_allowed_size_.set(len); }, time_system_, decoder_callbacks_.dispatcher_, decoder_callbacks_.scope()); From 40730f8282dc7447455404bbe1d2d1554ee66d13 Mon Sep 17 00:00:00 2001 From: gayang Date: Sun, 26 Sep 2021 14:35:41 +0000 Subject: [PATCH 004/121] fix format Signed-off-by: gayang --- .../http/bandwidth_limit/bandwidth_limit.cc | 37 ++++++++++++------- .../http/common/stream_rate_limiter.cc | 7 ++-- .../filters/http/common/stream_rate_limiter.h | 7 ++-- .../http/bandwidth_limit/config_test.cc | 14 ++++--- 4 files changed, 40 insertions(+), 25 deletions(-) diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc index 036122b45bcbc..89c34b5b31a51 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc @@ -17,9 +17,11 @@ namespace HttpFilters { namespace BandwidthLimitFilter { namespace { - const Http::LowerCaseString DefaultRequestDelayTrailer = Http::LowerCaseString("bandwidth-request-delay-ms"); - const Http::LowerCaseString DefaultResponseDelayTrailer = Http::LowerCaseString("bandwidth-response-delay-ms"); -} +const Http::LowerCaseString DefaultRequestDelayTrailer = + Http::LowerCaseString("bandwidth-request-delay-ms"); +const Http::LowerCaseString DefaultResponseDelayTrailer = + Http::LowerCaseString("bandwidth-response-delay-ms"); +} // namespace FilterConfig::FilterConfig(const BandwidthLimit& config, Stats::Scope& scope, Runtime::Loader& runtime, TimeSource& time_source, bool per_route) @@ -29,10 +31,14 @@ FilterConfig::FilterConfig(const BandwidthLimit& config, Stats::Scope& scope, config, fill_interval, StreamRateLimiter::DefaultFillInterval.count()))), enabled_(config.runtime_enabled(), runtime), stats_(generateStats(config.stat_prefix(), scope)), - request_delay_trailer_(config.response_trailer_prefix().empty() ? DefaultRequestDelayTrailer - : Http::LowerCaseString(config.response_trailer_prefix() + "-" + DefaultRequestDelayTrailer.get())), - response_delay_trailer_(config.response_trailer_prefix().empty() ? DefaultResponseDelayTrailer - : Http::LowerCaseString(config.response_trailer_prefix() + "-" + DefaultResponseDelayTrailer.get())) { + request_delay_trailer_(config.response_trailer_prefix().empty() + ? DefaultRequestDelayTrailer + : Http::LowerCaseString(config.response_trailer_prefix() + "-" + + DefaultRequestDelayTrailer.get())), + response_delay_trailer_(config.response_trailer_prefix().empty() + ? DefaultResponseDelayTrailer + : Http::LowerCaseString(config.response_trailer_prefix() + "-" + + DefaultResponseDelayTrailer.get())) { if (per_route && !config.has_limit_kbps()) { throw EnvoyException("bandwidthlimitfilter: limit must be set for per route filter config"); } @@ -73,9 +79,11 @@ Http::FilterHeadersStatus BandwidthLimiter::decodeHeaders(Http::RequestHeaderMap updateStatsOnDecodeFinish(); decoder_callbacks_->continueDecoding(); }, - [&config](uint64_t len, bool limit_enforced) { + [&config](uint64_t len, bool limit_enforced) { config.stats().request_allowed_size_.add(len); - if (limit_enforced) { config.stats().request_enforced_.inc(); } + if (limit_enforced) { + config.stats().request_enforced_.inc(); + } }, const_cast(&config)->timeSource(), decoder_callbacks_->dispatcher(), decoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval()); @@ -135,9 +143,11 @@ Http::FilterHeadersStatus BandwidthLimiter::encodeHeaders(Http::ResponseHeaderMa updateStatsOnEncodeFinish(); encoder_callbacks_->continueEncoding(); }, - [&config](uint64_t len, bool limit_enforced) { + [&config](uint64_t len, bool limit_enforced) { config.stats().response_allowed_size_.add(len); - if (limit_enforced) { config.stats().response_enforced_.inc(); } + if (limit_enforced) { + config.stats().response_enforced_.inc(); + } }, const_cast(&config)->timeSource(), encoder_callbacks_->dispatcher(), encoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval()); @@ -173,7 +183,8 @@ Http::FilterDataStatus BandwidthLimiter::encodeData(Buffer::Instance& data, bool return Http::FilterDataStatus::Continue; } -Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap& responseTrailers) { +Http::FilterTrailersStatus +BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap& responseTrailers) { if (response_limiter_ != nullptr) { trailers = &responseTrailers; @@ -199,7 +210,7 @@ void BandwidthLimiter::updateStatsOnDecodeFinish() { void BandwidthLimiter::updateStatsOnEncodeFinish() { if (response_latency_) { const auto& config = getConfig(); - + auto response_duration = response_latency_.get()->elapsed().count(); if (trailers != nullptr && request_duration_ > 0) { trailers->setCopy(config.request_delay_trailer(), std::to_string(request_duration_)); diff --git a/source/extensions/filters/http/common/stream_rate_limiter.cc b/source/extensions/filters/http/common/stream_rate_limiter.cc index aca234224c430..a4e215a5f1d03 100644 --- a/source/extensions/filters/http/common/stream_rate_limiter.cc +++ b/source/extensions/filters/http/common/stream_rate_limiter.cc @@ -88,17 +88,18 @@ void StreamRateLimiter::onTokenTimer() { } } -void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream, bool trailer_added) { +void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream, + bool trailer_added) { auto len = incoming_buffer.length(); buffer_.move(incoming_buffer); saw_end_stream_ = end_stream; - // If trailer_added is true, set saw_trailers_ to true to continue encode trailers, added + // If trailer_added is true, set saw_trailers_ to true to continue encode trailers, added // after buffer_.move to ensure buffer has data and won't invoke continue_cb_ before // processing the data in last data frame. if (trailer_added) { saw_trailers_ = true; } - + ENVOY_LOG(debug, "StreamRateLimiter : got new {} bytes of data. token " "timer {} scheduled.", diff --git a/source/extensions/filters/http/common/stream_rate_limiter.h b/source/extensions/filters/http/common/stream_rate_limiter.h index 84035c26a6c0a..42b6c21015f91 100644 --- a/source/extensions/filters/http/common/stream_rate_limiter.h +++ b/source/extensions/filters/http/common/stream_rate_limiter.h @@ -49,10 +49,9 @@ class StreamRateLimiter : Logger::Loggable { StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_data, std::function pause_data_cb, std::function resume_data_cb, std::function write_data_cb, - std::function continue_cb, - std::function write_stats_cb, - TimeSource& time_source, Event::Dispatcher& dispatcher, - const ScopeTrackedObject& scope, + std::function continue_cb, + std::function write_stats_cb, TimeSource& time_source, + Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope, std::shared_ptr token_bucket = nullptr, std::chrono::milliseconds fill_interval = DefaultFillInterval); diff --git a/test/extensions/filters/http/bandwidth_limit/config_test.cc b/test/extensions/filters/http/bandwidth_limit/config_test.cc index 6c173ad17a5bc..e3a16aa07a9b8 100644 --- a/test/extensions/filters/http/bandwidth_limit/config_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/config_test.cc @@ -55,8 +55,10 @@ TEST(Factory, RouteSpecificFilterConfig) { EXPECT_EQ(config->fillInterval().count(), 100); EXPECT_EQ(config->enableMode(), EnableMode::BandwidthLimit_EnableMode_REQUEST_AND_RESPONSE); EXPECT_FALSE(config->tokenBucket() == nullptr); - EXPECT_EQ(const_cast(config)->request_delay_trailer(), Http::LowerCaseString("test-bandwidth-request-delay-ms")); - EXPECT_EQ(const_cast(config)->response_delay_trailer(), Http::LowerCaseString("test-bandwidth-response-delay-ms")); + EXPECT_EQ(const_cast(config)->request_delay_trailer(), + Http::LowerCaseString("test-bandwidth-request-delay-ms")); + EXPECT_EQ(const_cast(config)->response_delay_trailer(), + Http::LowerCaseString("test-bandwidth-response-delay-ms")); } TEST(Factory, RouteSpecificFilterConfigDisabledByDefault) { @@ -100,9 +102,11 @@ TEST(Factory, RouteSpecificFilterConfigDefaultFillInterval) { const auto* config = dynamic_cast(route_config.get()); EXPECT_EQ(config->limit(), 10); EXPECT_EQ(config->fillInterval().count(), 50); - //default trailers - EXPECT_EQ(const_cast(config)->request_delay_trailer(), Http::LowerCaseString("bandwidth-request-delay-ms")); - EXPECT_EQ(const_cast(config)->response_delay_trailer(), Http::LowerCaseString("bandwidth-response-delay-ms")); + // default trailers + EXPECT_EQ(const_cast(config)->request_delay_trailer(), + Http::LowerCaseString("bandwidth-request-delay-ms")); + EXPECT_EQ(const_cast(config)->response_delay_trailer(), + Http::LowerCaseString("bandwidth-response-delay-ms")); } TEST(Factory, PerRouteConfigNoLimits) { From ddc3b33ac906ed21c2430e5748c6282d2677fc67 Mon Sep 17 00:00:00 2001 From: gayang Date: Thu, 9 Sep 2021 09:23:38 +0000 Subject: [PATCH 005/121] fix comment Signed-off-by: gayang --- .../filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto index 928231c4edcfb..2395402fd1ab6 100644 --- a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto +++ b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto @@ -69,7 +69,7 @@ message BandwidthLimit { config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; // Optional the prefix for the response trailers of bandwidth decode/encode delays. - // If not set, use the default value "bandwidth-request-delay-ms" or "bandwidth-response-delay-ms". + // If not set, use the default value "bandwidth-request-delay-ms" and "bandwidth-response-delay-ms". // If set, the trailer name will be set as: // request: response_trailer_prefix + "-bandwidth-request-delay-ms" // Delay time it took for the request stream transfer. From 8e8b6f15c2a368d9bbd9f221936e9b18d12d7c9b Mon Sep 17 00:00:00 2001 From: phlax Date: Thu, 9 Sep 2021 18:24:44 +0100 Subject: [PATCH 006/121] protos: Cleanup unused imports (#18024) Signed-off-by: Ryan Northey Signed-off-by: gayang --- api/envoy/config/endpoint/v3/endpoint_components.proto | 1 - api/envoy/extensions/common/key_value/v3/config.proto | 3 --- .../envoy/config/endpoint/v3/endpoint_components.proto | 1 - .../envoy/extensions/common/key_value/v3/config.proto | 3 --- 4 files changed, 8 deletions(-) diff --git a/api/envoy/config/endpoint/v3/endpoint_components.proto b/api/envoy/config/endpoint/v3/endpoint_components.proto index 0a9aac105e72d..1faf64e20c2c6 100644 --- a/api/envoy/config/endpoint/v3/endpoint_components.proto +++ b/api/envoy/config/endpoint/v3/endpoint_components.proto @@ -9,7 +9,6 @@ import "envoy/config/core/v3/health_check.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; diff --git a/api/envoy/extensions/common/key_value/v3/config.proto b/api/envoy/extensions/common/key_value/v3/config.proto index 0db9c622cd16c..66a55435437b3 100644 --- a/api/envoy/extensions/common/key_value/v3/config.proto +++ b/api/envoy/extensions/common/key_value/v3/config.proto @@ -4,9 +4,6 @@ package envoy.extensions.common.key_value.v3; import "envoy/config/core/v3/extension.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; - import "udpa/annotations/status.proto"; import "validate/validate.proto"; diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto index 0a9aac105e72d..1faf64e20c2c6 100644 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto +++ b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto @@ -9,7 +9,6 @@ import "envoy/config/core/v3/health_check.proto"; import "google/protobuf/wrappers.proto"; -import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; diff --git a/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto b/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto index 0db9c622cd16c..66a55435437b3 100644 --- a/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto @@ -4,9 +4,6 @@ package envoy.extensions.common.key_value.v3; import "envoy/config/core/v3/extension.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; - import "udpa/annotations/status.proto"; import "validate/validate.proto"; From 0ebd6e602b9c583bd6aca611cdf0b64e321b4740 Mon Sep 17 00:00:00 2001 From: John Esmet Date: Thu, 9 Sep 2021 13:25:10 -0400 Subject: [PATCH 007/121] Fix a typo (#18045) Signed-off-by: John Esmet Signed-off-by: gayang --- envoy/server/overload/overload_manager.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envoy/server/overload/overload_manager.h b/envoy/server/overload/overload_manager.h index 4939897c70792..bfdbb2e260c21 100644 --- a/envoy/server/overload/overload_manager.h +++ b/envoy/server/overload/overload_manager.h @@ -46,7 +46,7 @@ using OverloadActionNames = ConstSingleton; */ class OverloadActionStatsNameValues { public: - // Count of ther number of streams the reset streams action has reset + // Count of the number of streams the reset streams action has reset const std::string ResetStreamsCount = "envoy.overload_actions.reset_high_memory_stream.count"; }; From 9038b8275a37e2f6d8455001668fdc0831042ca6 Mon Sep 17 00:00:00 2001 From: William A Rowe Jr Date: Thu, 9 Sep 2021 16:54:30 -0500 Subject: [PATCH 008/121] Update OWNERS.md (#18054) Request add to Security Team for research, platform portability and liason to our upstream dependencies. Signed-off-by: William A Rowe Jr Signed-off-by: gayang --- OWNERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/OWNERS.md b/OWNERS.md index 74100d89c40be..f4b600c7d29d1 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -65,6 +65,7 @@ without further review. * Tony Allen ([tonya11en](https://github.com/tonya11en)) (tony@allen.gg) * Dmitri Dolguikh ([dmitri-d](https://github.com/dmitri-d)) (ddolguik@redhat.com) * Yan Avlasov ([yanavlasov](https://github.com/yanavlasov)) (yavlasov@google.com) +* William A Rowe Jr ([wrowe](https://github.com/wrowe)) (wrowe@vmware.com) # Emeritus maintainers From fdee7c6093c81488bb928b495e495915a13a46c5 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 10 Sep 2021 00:04:01 -0400 Subject: [PATCH 009/121] docs: api docs update (#17966) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- CONTRIBUTING.md | 10 ++-------- api/STYLE.md | 31 ++++++++++++++++++++----------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c59d73c0e7222..9b6e47913f07f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -253,15 +253,9 @@ Extension configuration should be located in a directory structure like The code for the extension should be located under the equivalent `source/extensions/area/plugin`, and include an *envoy_cc_extension* with the configuration and tagged with the appropriate security posture, and an -*envoy_cc_library* with the code. More details on how to add a new extension -API can be found [here](api/STYLE.md#adding-an-extension-configuration-to-the-api): +*envoy_cc_library* with the code. -Other changes will likely include - - * Editing [source/extensions/extensions_build_config.bzl](source/extensions/extensions_build_config.bzl) to include the new extensions - * Editing [source/extensions/extensions_metadata.yaml](source/extensions/extensions_metadata.yaml) to include metadata for the new extensions - * Editing [docs/root/api-v3/config/config.rst](docs/root/api-v3/config/config.rst) to add area/area - * Adding `docs/root/api-v3/config/area/area.rst` to add a table of contents for the API docs +More details on how to add a new extension API can be found [here](api/STYLE.md#adding-an-extension-configuration-to-the-api): # Adding contrib extensions diff --git a/api/STYLE.md b/api/STYLE.md index d73e17b773b24..b185be97c9687 100644 --- a/api/STYLE.md +++ b/api/STYLE.md @@ -112,10 +112,9 @@ Extensions must currently be added as v3 APIs following the [package organization](#package-organization) above. To add an extension config to the API, the steps below should be followed: -1. If this is still WiP and subject to breaking changes, use `vNalpha` instead of `vN` in steps - below. Refer to the [Cache filter config](envoy/extensions/filters/http/cache/v3alpha/cache.proto) - as an example of `v3alpha`, and the - [Buffer filter config](envoy/extensions/filters/http/buffer/v3/buffer.proto) as an example of `v3`. +1. If this is still WiP and subject to breaking changes, please tag it + `option (udpa.annotations.file_status).work_in_progress = true;` and + optionally hide it from the docs (`[#not-implemented-hide:]`. 1. Place the v3 extension configuration `.proto` in `api/envoy/extensions`, e.g. `api/envoy/extensions/filters/http/foobar/v3/foobar.proto` together with an initial BUILD file: ```bazel @@ -127,16 +126,26 @@ To add an extension config to the API, the steps below should be followed: deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ``` -1. Add to the v3 extension config proto `import "udpa/annotations/status.proto";` -1. If this is still WiP and subject to breaking changes, set - `option (udpa.annotations.file_status).work_in_progress = true;`. -1. Add to the v3 extension config proto a file level - `option (udpa.annotations.file_status).package_version_status = ACTIVE;`. +1. Update [source/extensions/extensions_metadata.yaml](../source/extensions/extensions_metadata.yaml) + with the category, security posture, and status. The category field will have to match an + annotation of the form `// [#extension-category: your.extension.category]` + in one of the proto files for the docs build to pass. +1. Update + [source/extensions/extensions_build_config.bzl](source/extensions/extensions_build_config.bzl) + to include the new extension. +1. If the extension is not hidden, find or create a docs file with a toctree + and to reference your proto to make sure users can navigate to it from the API docs + (and to not break the docs build). + See the [key-value-store PR](https://github.com/envoyproxy/envoy/pull/17745/files) for an example of adding a new extension point to common. +1. Make sure your proto imports the v3 extension config proto (`import "udpa/annotations/status.proto";`) +1. Make sure your proto is either tracked as a work in progress + (`option (udpa.annotations.file_status).work_in_progress = true;`) + or ready to be used + (`option (udpa.annotations.file_status).package_version_status = ACTIVE;`). This is required to automatically include the config proto in [api/versioning/BUILD](versioning/BUILD). 1. Add a reference to the v3 extension config in (1) in [api/versioning/BUILD](versioning/BUILD) under `active_protos`. 1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file, - reformat `foobar.proto` as needed and also generate the v4alpha extension config (if needed), - together with shadow API protos. + reformat `foobar.proto` as needed and also generate the shadow API protos. 1. `git add api/ generated_api_shadow/` to add any new files to your Git index. ## API annotations From a58745283383968031fb224b1b7cc10e45996345 Mon Sep 17 00:00:00 2001 From: moderation Date: Fri, 10 Sep 2021 07:57:17 -0700 Subject: [PATCH 010/121] Update base Alpine and Distroless container images. (#18062) Signed-off-by: Michael Payne Signed-off-by: gayang --- ci/Dockerfile-envoy-alpine | 2 +- ci/Dockerfile-envoy-distroless | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/Dockerfile-envoy-alpine b/ci/Dockerfile-envoy-alpine index 36a23f6e3a2aa..ae0e57cdb9e96 100644 --- a/ci/Dockerfile-envoy-alpine +++ b/ci/Dockerfile-envoy-alpine @@ -1,4 +1,4 @@ -FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 +FROM frolvlad/alpine-glibc:alpine-3.14_glibc-2.33 RUN mkdir -p /etc/envoy ADD configs/envoyproxy_io_proxy.yaml /etc/envoy/envoy.yaml diff --git a/ci/Dockerfile-envoy-distroless b/ci/Dockerfile-envoy-distroless index d2647f7b38557..40c2257e5b4bb 100644 --- a/ci/Dockerfile-envoy-distroless +++ b/ci/Dockerfile-envoy-distroless @@ -1,4 +1,4 @@ -FROM gcr.io/distroless/base-debian10:nonroot +FROM gcr.io/distroless/base-debian11:nonroot ADD configs/envoyproxy_io_proxy.yaml /etc/envoy/envoy.yaml From 212cbe6517de25600bd0fdb32854b4bb0660cade Mon Sep 17 00:00:00 2001 From: Le Yao Date: Fri, 10 Sep 2021 23:00:01 +0800 Subject: [PATCH 011/121] Wrong past participle words fixed in comments (#18037) Signed-off-by: Le Yao Signed-off-by: gayang --- envoy/stats/allocator.h | 11 ++++++----- envoy/stats/store.h | 9 +++++---- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/envoy/stats/allocator.h b/envoy/stats/allocator.h index 6f9cc9715ea43..2924ebf0ab303 100644 --- a/envoy/stats/allocator.h +++ b/envoy/stats/allocator.h @@ -70,11 +70,12 @@ class Allocator { virtual void markTextReadoutForDeletion(const TextReadoutSharedPtr& text_readout) PURE; /** - * Iterate over all stats that need to be sinked. Note, that implementations can potentially hold - * on to a mutex that will deadlock if the passed in functors try to create or delete a stat. - * @param f_size functor that is provided the number of all sinked stats. Note this is called - * only once, prior to any calls to f_stat. - * @param f_stat functor that is provided one sinked stat at a time. + * Iterate over all stats that need to be added to a sink. Note, that implementations can + * potentially hold on to a mutex that will deadlock if the passed in functors try to create + * or delete a stat. + * @param f_size functor that is provided the number of all stats in the sink. Note this is + * called only once, prior to any calls to f_stat. + * @param f_stat functor that is provided one stat in the sink at a time. */ virtual void forEachCounter(std::function f_size, std::function f_stat) const PURE; diff --git a/envoy/stats/store.h b/envoy/stats/store.h index a682fb0cd3d5f..3d456bbe7bec9 100644 --- a/envoy/stats/store.h +++ b/envoy/stats/store.h @@ -51,10 +51,11 @@ class Store : public Scope { virtual std::vector histograms() const PURE; /** - * Iterate over all stats that need to be sinked. Note, that implementations can potentially hold - * on to a mutex that will deadlock if the passed in functors try to create or delete a stat. - * @param f_size functor that is provided the number of all sinked stats. - * @param f_stat functor that is provided one sinked stat at a time. + * Iterate over all stats that need to be added to a sink. Note, that implementations can + * potentially hold on to a mutex that will deadlock if the passed in functors try to create + * or delete a stat. + * @param f_size functor that is provided the number of all stats in the sink. + * @param f_stat functor that is provided one stat in the sink at a time. */ virtual void forEachCounter(std::function f_size, std::function f_stat) const PURE; From ac396dd40fa720c5632358cfb1670e907986e41f Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 10 Sep 2021 13:08:40 -0400 Subject: [PATCH 012/121] decompressor: fixing a bug for requests with added trailers (#18055) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- docs/root/version_history/current.rst | 1 + source/common/http/filter_manager.cc | 12 +++- source/common/runtime/runtime_features.cc | 1 + .../filters/http/decompressor/BUILD | 1 + .../decompressor_filter_integration_test.cc | 64 +++++++++++++++++++ 5 files changed, 78 insertions(+), 1 deletion(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index c8e227d34b58d..bdc1e5bb7e920 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -76,6 +76,7 @@ Bug Fixes * aws request signer: fix the AWS Request Signer extension to correctly normalize the path and query string to be signed according to AWS' guidelines, so that the hash on the server side matches. See `AWS SigV4 documentaion `_. * cluster: delete pools when they're idle to fix unbounded memory use when using PROXY protocol upstream with tcp_proxy. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.conn_pool_delete_when_idle`` runtime guard to false. * cluster: finish cluster warming even if hosts are removed before health check initialization. This only affected clusters with :ref:`ignore_health_on_host_removal `. +* compressor: fix a bug where if trailers were added and a subsequent filter paused the filter chain, the request could be stalled. This behavior can be reverted by setting ``envoy.reloadable_features.fix_added_trailers`` to false. * dynamic forward proxy: fixing a validation bug where san and sni checks were not applied setting :ref:`http_protocol_options ` via :ref:`typed_extension_protocol_options `. * ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. * ext_authz: the network ext_authz filter now correctly sets dynamic metdata returned by the authorization service for non-OK responses. This behavior now matches the http ext_authz filter. diff --git a/source/common/http/filter_manager.cc b/source/common/http/filter_manager.cc index 3369ed06c06af..fc15f8041f224 100644 --- a/source/common/http/filter_manager.cc +++ b/source/common/http/filter_manager.cc @@ -604,6 +604,9 @@ void FilterManager::decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instan ScopeTrackerScopeState scope(&*this, dispatcher_); filter_manager_callbacks_.resetIdleTimer(); + const bool fix_added_trailers = + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fix_added_trailers"); + // If a response is complete or a reset has been sent, filters do not care about further body // data. Just drop it. if (state_.local_complete_) { @@ -690,6 +693,9 @@ void FilterManager::decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instan if (!trailers_exists_at_start && filter_manager_callbacks_.requestTrailers() && trailers_added_entry == decoder_filters_.end()) { + if (fix_added_trailers) { + end_stream = false; + } trailers_added_entry = entry; } @@ -698,7 +704,11 @@ void FilterManager::decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instan // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but // a previous filter has added trailers. - return; + if (fix_added_trailers) { + break; + } else { + return; + } } } diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index cf0aeb7ae4807..50cba15839226 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -64,6 +64,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.disable_tls_inspector_injection", "envoy.reloadable_features.dont_add_content_length_for_bodiless_requests", "envoy.reloadable_features.enable_compression_without_content_length_header", + "envoy.reloadable_features.fix_added_trailers", "envoy.reloadable_features.grpc_bridge_stats_disabled", "envoy.reloadable_features.grpc_web_fix_non_proto_encoded_response_handling", "envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits", diff --git a/test/extensions/filters/http/decompressor/BUILD b/test/extensions/filters/http/decompressor/BUILD index 6898a202ba113..6ab72ce93c916 100644 --- a/test/extensions/filters/http/decompressor/BUILD +++ b/test/extensions/filters/http/decompressor/BUILD @@ -40,6 +40,7 @@ envoy_extension_cc_test( "//source/extensions/compression/gzip/decompressor:config", "//source/extensions/filters/http/decompressor:config", "//test/integration:http_integration_lib", + "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/mocks/server:factory_context_mocks", "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc index 30c6f5117ee75..d02ba84b3aa2c 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc @@ -286,4 +286,68 @@ TEST_P(DecompressorIntegrationTest, BidirectionalDecompressionError) { "http.config_test.decompressor.testlib.gzip.decompressor_library.zlib_data_error", 3); } +// Buffer the request after it's been decompressed. +TEST_P(DecompressorIntegrationTest, DecompressAndBuffer) { + // filters are prepended, so add them in reverse order + + config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); + + config_helper_.addFilter(R"EOF( + name: envoy.filters.http.decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: gzip_default + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" + window_bits: 15 + chunk_size: 8192 + request_direction_config: + common_config: + enabled: + default_value: true + runtime_key: request_decompressor_enabled + response_direction_config: + common_config: + enabled: + default_value: false + runtime_key: response_decompressor_enabled + )EOF"); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":scheme", "http"}, + {":path", "/test/long/url"}, + {"content-encoding", "gzip"}, + {":authority", "host"}}); + + auto request_encoder = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + + // Compressed JSON. + constexpr uint8_t buffer[] = {0x1f, 0x8b, 0x08, 0x00, 0x9c, 0xb3, 0x38, 0x61, 0x00, 0x03, 0xab, + 0x56, 0x50, 0xca, 0xad, 0x4c, 0x29, 0xcd, 0xcd, 0xad, 0x54, 0x52, + 0xb0, 0x52, 0x50, 0xca, 0x2a, 0xce, 0xcf, 0x53, 0x52, 0xa8, 0xe5, + 0x02, 0x00, 0xa6, 0x6a, 0x24, 0x99, 0x17, 0x00, 0x00, 0x00}; + Buffer::OwnedImpl data(buffer, 43); + codec_client_->sendData(*request_encoder, data, true); + + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(10, true); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + + Stats::Store& stats = test_server_->server().stats(); + Stats::CounterSharedPtr counter = TestUtility::findCounter( + stats, "http.config_test.decompressor.gzip_default.gzip.request.decompressed"); + ASSERT_NE(nullptr, counter); + EXPECT_EQ(1L, counter->value()); +} + } // namespace Envoy From 267b1217438fa8620958a013d8811d73ea092bf3 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Fri, 10 Sep 2021 10:08:59 -0700 Subject: [PATCH 013/121] dns cache: log event with addresses on resolution (#18036) Signed-off-by: Jose Nino Signed-off-by: gayang --- source/common/common/stl_helpers.h | 3 ++- .../common/dynamic_forward_proxy/dns_cache_impl.cc | 7 ++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/source/common/common/stl_helpers.h b/source/common/common/stl_helpers.h index 674372f64eaa1..9c1ab9498b330 100644 --- a/source/common/common/stl_helpers.h +++ b/source/common/common/stl_helpers.h @@ -28,7 +28,8 @@ std::string accumulateToString(const ContainerT& source, if (source.empty()) { return "[]"; } - return std::accumulate(std::next(source.begin()), source.end(), "[" + string_func(source[0]), + return std::accumulate(std::next(source.begin()), source.end(), + "[" + string_func(*source.begin()), [string_func](std::string acc, const T& element) { return acc + ", " + string_func(element); }) + diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index 7a208b9f1398d..604004c565cdf 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -2,6 +2,7 @@ #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "source/common/common/stl_helpers.h" #include "source/common/config/utility.h" #include "source/common/http/utility.h" #include "source/common/network/resolver_impl.h" @@ -289,7 +290,11 @@ void DnsCacheImpl::finishResolve(const std::string& host, Network::DnsResolver::ResolutionStatus status, std::list&& response, bool from_cache) { ASSERT(main_thread_dispatcher_.isThreadSafe()); - ENVOY_LOG(debug, "main thread resolve complete for host '{}'. {} results", host, response.size()); + ENVOY_LOG_EVENT(debug, "dns_cache_finish_resolve", + "main thread resolve complete for host '{}': {}", host, + accumulateToString(response, [](const auto& dns_response) { + return dns_response.address_->asString(); + })); // Functions like this one that modify primary_hosts_ are only called in the main thread so we // know it is safe to use the PrimaryHostInfo pointers outside of the lock. From f44c757f3da5a7ad58073d4786c6bd5f83ccd0ba Mon Sep 17 00:00:00 2001 From: Adam Kotwasinski Date: Fri, 10 Sep 2021 10:11:50 -0700 Subject: [PATCH 014/121] Kafka-mesh filter (#11936) Signed-off-by: Adam Kotwasinski Signed-off-by: gayang --- api/BUILD | 1 + .../filters/network/kafka_mesh/v3alpha/BUILD | 9 + .../kafka_mesh/v3alpha/kafka_mesh.proto | 58 ++ api/versioning/BUILD | 1 + bazel/BUILD | 5 + bazel/repository_locations.bzl | 4 +- contrib/contrib_build_config.bzl | 1 + contrib/extensions_metadata.yaml | 5 + .../kafka/filters/network/source/mesh/BUILD | 24 + .../filters/network/source/mesh/config.cc | 55 ++ .../filters/network/source/mesh/config.h | 33 + .../network/source/mesh/upstream_config.cc | 93 +++ .../network/source/mesh/upstream_config.h | 41 +- contrib/kafka/filters/network/test/mesh/BUILD | 19 + .../network/test/mesh/config_unit_test.cc | 81 +++ .../network/test/mesh/integration_test/BUILD | 31 + .../integration_test/envoy_config_yaml.j2 | 34 + .../kafka_mesh_integration_test.py | 650 ++++++++++++++++++ .../kafka_server_properties.j2 | 31 + .../integration_test/zookeeper_properties.j2 | 5 + .../test/mesh/upstream_config_unit_test.cc | 143 ++++ .../network_filters/kafka_broker_filter.rst | 4 + .../network_filters/kafka_mesh_filter.rst | 103 +++ .../network_filters/network_filters.rst | 1 + generated_api_shadow/BUILD | 1 + .../filters/network/kafka_mesh/v3alpha/BUILD | 9 + .../kafka_mesh/v3alpha/kafka_mesh.proto | 58 ++ tools/code_format/check_format.py | 2 + 28 files changed, 1497 insertions(+), 5 deletions(-) create mode 100644 api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD create mode 100644 api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto create mode 100644 contrib/kafka/filters/network/source/mesh/config.cc create mode 100644 contrib/kafka/filters/network/source/mesh/config.h create mode 100644 contrib/kafka/filters/network/source/mesh/upstream_config.cc create mode 100644 contrib/kafka/filters/network/test/mesh/config_unit_test.cc create mode 100644 contrib/kafka/filters/network/test/mesh/integration_test/BUILD create mode 100644 contrib/kafka/filters/network/test/mesh/integration_test/envoy_config_yaml.j2 create mode 100644 contrib/kafka/filters/network/test/mesh/integration_test/kafka_mesh_integration_test.py create mode 100644 contrib/kafka/filters/network/test/mesh/integration_test/kafka_server_properties.j2 create mode 100644 contrib/kafka/filters/network/test/mesh/integration_test/zookeeper_properties.j2 create mode 100644 contrib/kafka/filters/network/test/mesh/upstream_config_unit_test.cc create mode 100644 docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst create mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD create mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto diff --git a/api/BUILD b/api/BUILD index 5bbde32946b63..93f9184a2b400 100644 --- a/api/BUILD +++ b/api/BUILD @@ -60,6 +60,7 @@ proto_library( "//contrib/envoy/extensions/filters/http/squash/v3:pkg", "//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg", + "//contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", diff --git a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD new file mode 100644 index 0000000000000..ee92fb652582e --- /dev/null +++ b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto new file mode 100644 index 0000000000000..03a6522852ab5 --- /dev/null +++ b/api/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.kafka_mesh.v3alpha; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_mesh.v3alpha"; +option java_outer_classname = "KafkaMeshProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Kafka Mesh] +// Kafka Mesh :ref:`configuration overview `. +// [#extension: envoy.filters.network.kafka_mesh] + +message KafkaMesh { + // Envoy's host that's advertised to clients. + // Has the same meaning as corresponding Kafka broker properties. + // Usually equal to filter chain's listener config, but needs to be reachable by clients + // (so 0.0.0.0 will not work). + string advertised_host = 1 [(validate.rules).string = {min_len: 1}]; + + // Envoy's port that's advertised to clients. + int32 advertised_port = 2 [(validate.rules).int32 = {gt: 0}]; + + // Upstream clusters this filter will connect to. + repeated KafkaClusterDefinition upstream_clusters = 3; + + // Rules that will decide which cluster gets which request. + repeated ForwardingRule forwarding_rules = 4; +} + +message KafkaClusterDefinition { + // Cluster name. + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; + + // Kafka cluster address. + string bootstrap_servers = 2 [(validate.rules).string = {min_len: 1}]; + + // Default number of partitions present in this cluster. + // This is especially important for clients that do not specify partition in their payloads and depend on this value for hashing. + int32 partition_count = 3 [(validate.rules).int32 = {gt: 0}]; + + // Custom configuration passed to Kafka producer. + map producer_config = 4; +} + +message ForwardingRule { + // Cluster name. + string target_cluster = 1; + + oneof trigger { + // Intended place for future types of forwarding rules. + string topic_prefix = 2; + } +} diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 52cb8c09eaf81..61af4c4764680 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -12,6 +12,7 @@ proto_library( "//contrib/envoy/extensions/filters/http/squash/v3:pkg", "//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg", + "//contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", diff --git a/bazel/BUILD b/bazel/BUILD index 016482a577f3e..303ab531bead3 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -586,3 +586,8 @@ alias( name = "remote_jdk11", actual = "@bazel_tools//tools/jdk:remote_jdk11", ) + +alias( + name = "windows", + actual = "@bazel_tools//src/conditions:windows", +) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index b817e6efffab7..75fa3544bcf83 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -935,7 +935,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "kafka-{version}/clients/src/main/resources/common/message", urls = ["https://github.com/apache/kafka/archive/{version}.zip"], use_category = ["dataplane_ext"], - extensions = ["envoy.filters.network.kafka_broker"], + extensions = ["envoy.filters.network.kafka_broker", "envoy.filters.network.kafka_mesh"], release_date = "2020-03-03", cpe = "cpe:2.3:a:apache:kafka:*", ), @@ -948,7 +948,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "librdkafka-{version}", urls = ["https://github.com/edenhill/librdkafka/archive/v{version}.tar.gz"], use_category = ["dataplane_ext"], - extensions = ["envoy.filters.network.kafka_broker"], + extensions = ["envoy.filters.network.kafka_mesh"], release_date = "2021-05-10", cpe = "N/A", ), diff --git a/contrib/contrib_build_config.bzl b/contrib/contrib_build_config.bzl index 34ef00af9fd15..f27001d971be8 100644 --- a/contrib/contrib_build_config.bzl +++ b/contrib/contrib_build_config.bzl @@ -12,6 +12,7 @@ CONTRIB_EXTENSIONS = { # "envoy.filters.network.kafka_broker": "//contrib/kafka/filters/network/source:kafka_broker_config_lib", + "envoy.filters.network.kafka_mesh": "//contrib/kafka/filters/network/source/mesh:config_lib", "envoy.filters.network.mysql_proxy": "//contrib/mysql_proxy/filters/network/source:config", "envoy.filters.network.postgres_proxy": "//contrib/postgres_proxy/filters/network/source:config", "envoy.filters.network.rocketmq_proxy": "//contrib/rocketmq_proxy/filters/network/source:config", diff --git a/contrib/extensions_metadata.yaml b/contrib/extensions_metadata.yaml index c3ccc61e53ee1..8614d2dbddb83 100644 --- a/contrib/extensions_metadata.yaml +++ b/contrib/extensions_metadata.yaml @@ -13,6 +13,11 @@ envoy.filters.network.kafka_broker: - envoy.filters.network security_posture: requires_trusted_downstream_and_upstream status: wip +envoy.filters.network.kafka_mesh: + categories: + - envoy.filters.network + security_posture: requires_trusted_downstream_and_upstream + status: wip envoy.filters.network.rocketmq_proxy: categories: - envoy.filters.network diff --git a/contrib/kafka/filters/network/source/mesh/BUILD b/contrib/kafka/filters/network/source/mesh/BUILD index fe24168a884b0..f457afee713ea 100644 --- a/contrib/kafka/filters/network/source/mesh/BUILD +++ b/contrib/kafka/filters/network/source/mesh/BUILD @@ -1,5 +1,6 @@ load( "//bazel:envoy_build_system.bzl", + "envoy_cc_contrib_extension", "envoy_cc_library", "envoy_contrib_package", ) @@ -10,6 +11,25 @@ licenses(["notice"]) # Apache 2 envoy_contrib_package() # Kafka-mesh network filter. +# Mesh filter public docs: docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst + +envoy_cc_contrib_extension( + name = "config_lib", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + "//envoy/registry", + "//source/extensions/filters/network/common:factory_base_lib", + "@envoy_api//contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha:pkg_cc_proto", + ] + select({ + "//bazel:windows": [], + "//conditions:default": [ + ":filter_lib", + ":upstream_config_lib", + ":upstream_kafka_facade_lib", + ], + }), +) envoy_cc_library( name = "filter_lib", @@ -121,11 +141,15 @@ envoy_cc_library( envoy_cc_library( name = "upstream_config_lib", srcs = [ + "upstream_config.cc", ], hdrs = [ "upstream_config.h", ], tags = ["skip_on_windows"], deps = [ + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "@envoy_api//contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha:pkg_cc_proto", ], ) diff --git a/contrib/kafka/filters/network/source/mesh/config.cc b/contrib/kafka/filters/network/source/mesh/config.cc new file mode 100644 index 0000000000000..7c2a1f4e2474c --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/config.cc @@ -0,0 +1,55 @@ +#include "contrib/kafka/filters/network/source/mesh/config.h" + +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" +#include "envoy/stats/scope.h" + +#ifndef WIN32 +#include "contrib/kafka/filters/network/source/mesh/upstream_config.h" +#include "contrib/kafka/filters/network/source/mesh/upstream_kafka_facade.h" +#include "contrib/kafka/filters/network/source/mesh/filter.h" +#else +#include "envoy/common/exception.h" +#endif + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +// The mesh filter doesn't do anything special, it just sets up the shared entities. +// Any extra configuration validation is done in UpstreamKafkaConfiguration constructor. +Network::FilterFactoryCb KafkaMeshConfigFactory::createFilterFactoryFromProtoTyped( + const KafkaMeshProtoConfig& config, Server::Configuration::FactoryContext& context) { + +#ifdef WIN32 + throw EnvoyException("Kafka mesh filter is not supported on Windows"); +#else + // Shared configuration (tells us where the upstream clusters are). + const UpstreamKafkaConfigurationSharedPtr configuration = + std::make_shared(config); + + // Shared upstream facade (connects us to upstream Kafka clusters). + const UpstreamKafkaFacadeSharedPtr upstream_kafka_facade = + std::make_shared(*configuration, context.threadLocal(), + context.api().threadFactory()); + + return [configuration, upstream_kafka_facade](Network::FilterManager& filter_manager) -> void { + Network::ReadFilterSharedPtr filter = + std::make_shared(*configuration, *upstream_kafka_facade); + filter_manager.addReadFilter(filter); + }; +#endif +} + +/** + * Static registration for the Kafka filter. @see RegisterFactory. + */ +REGISTER_FACTORY(KafkaMeshConfigFactory, Server::Configuration::NamedNetworkFilterConfigFactory); + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/config.h b/contrib/kafka/filters/network/source/mesh/config.h new file mode 100644 index 0000000000000..12ba71691bbca --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/config.h @@ -0,0 +1,33 @@ +#pragma once + +#include "source/extensions/filters/network/common/factory_base.h" + +#include "contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.pb.h" +#include "contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.pb.validate.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +using KafkaMeshProtoConfig = envoy::extensions::filters::network::kafka_mesh::v3alpha::KafkaMesh; + +/** + * Config registration for the Kafka mesh filter. + */ +class KafkaMeshConfigFactory : public Common::FactoryBase { +public: + KafkaMeshConfigFactory() : FactoryBase("envoy.filters.network.kafka_mesh", true) {} + +private: + Network::FilterFactoryCb + createFilterFactoryFromProtoTyped(const KafkaMeshProtoConfig& config, + Server::Configuration::FactoryContext& context) override; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/upstream_config.cc b/contrib/kafka/filters/network/source/mesh/upstream_config.cc new file mode 100644 index 0000000000000..8e6917df034c8 --- /dev/null +++ b/contrib/kafka/filters/network/source/mesh/upstream_config.cc @@ -0,0 +1,93 @@ +#include "contrib/kafka/filters/network/source/mesh/upstream_config.h" + +#include "envoy/common/exception.h" + +#include "source/common/common/assert.h" + +#include "absl/strings/str_cat.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +using KafkaClusterDefinition = + envoy::extensions::filters::network::kafka_mesh::v3alpha::KafkaClusterDefinition; +using ForwardingRule = envoy::extensions::filters::network::kafka_mesh::v3alpha::ForwardingRule; + +UpstreamKafkaConfigurationImpl::UpstreamKafkaConfigurationImpl(const KafkaMeshProtoConfig& config) + : advertised_address_{config.advertised_host(), config.advertised_port()} { + + // Processing cluster data. + const auto& upstream_clusters = config.upstream_clusters(); + if (upstream_clusters.empty()) { + throw EnvoyException("kafka-mesh filter needs to have at least one upstream Kafka cluster"); + } + + // Processing cluster configuration. + std::map cluster_name_to_cluster_config; + for (const auto& upstream_cluster_definition : upstream_clusters) { + const std::string& cluster_name = upstream_cluster_definition.cluster_name(); + + // No duplicates are allowed. + if (cluster_name_to_cluster_config.find(cluster_name) != cluster_name_to_cluster_config.end()) { + throw EnvoyException( + absl::StrCat("kafka-mesh filter has multiple Kafka clusters referenced by the same name", + cluster_name)); + } + + // Upstream client configuration - use all the optional custom configs provided, and then use + // the target IPs. + std::map producer_configs = { + upstream_cluster_definition.producer_config().begin(), + upstream_cluster_definition.producer_config().end()}; + producer_configs["bootstrap.servers"] = upstream_cluster_definition.bootstrap_servers(); + ClusterConfig cluster_config = {cluster_name, upstream_cluster_definition.partition_count(), + producer_configs}; + cluster_name_to_cluster_config[cluster_name] = cluster_config; + } + + // Processing forwarding rules. + const auto& forwarding_rules = config.forwarding_rules(); + if (forwarding_rules.empty()) { + throw EnvoyException("kafka-mesh filter needs to have at least one forwarding rule"); + } + + for (const auto& rule : forwarding_rules) { + const std::string& target_cluster = rule.target_cluster(); + ASSERT(rule.trigger_case() == ForwardingRule::TriggerCase::kTopicPrefix); + ENVOY_LOG(trace, "Setting up forwarding rule: {} -> {}", rule.topic_prefix(), target_cluster); + // Each forwarding rule needs to reference a cluster. + if (cluster_name_to_cluster_config.find(target_cluster) == + cluster_name_to_cluster_config.end()) { + throw EnvoyException(absl::StrCat( + "kafka-mesh filter forwarding rule is referencing unknown upstream Kafka cluster: ", + target_cluster)); + } + topic_prefix_to_cluster_config_[rule.topic_prefix()] = + cluster_name_to_cluster_config[target_cluster]; + } +} + +absl::optional +UpstreamKafkaConfigurationImpl::computeClusterConfigForTopic(const std::string& topic) const { + // We find the first matching prefix (this is why ordering is important). + for (const auto& it : topic_prefix_to_cluster_config_) { + if (topic.rfind(it.first, 0) == 0) { + const ClusterConfig cluster_config = it.second; + return absl::make_optional(cluster_config); + } + } + return absl::nullopt; +} + +std::pair UpstreamKafkaConfigurationImpl::getAdvertisedAddress() const { + return advertised_address_; +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/mesh/upstream_config.h b/contrib/kafka/filters/network/source/mesh/upstream_config.h index 00e3e7faf32da..ad49f2f1304d6 100644 --- a/contrib/kafka/filters/network/source/mesh/upstream_config.h +++ b/contrib/kafka/filters/network/source/mesh/upstream_config.h @@ -7,7 +7,11 @@ #include "envoy/common/pure.h" +#include "source/common/common/logger.h" + #include "absl/types/optional.h" +#include "contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.pb.h" +#include "contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.pb.validate.h" namespace Envoy { namespace Extensions { @@ -15,6 +19,8 @@ namespace NetworkFilters { namespace Kafka { namespace Mesh { +using KafkaMeshProtoConfig = envoy::extensions::filters::network::kafka_mesh::v3alpha::KafkaMesh; + // Minor helper structure that contains information about upstream Kafka clusters. struct ClusterConfig { @@ -32,23 +38,52 @@ struct ClusterConfig { // This map always contains entry with key 'bootstrap.servers', as this is the only mandatory // producer property. std::map upstream_producer_properties_; + + bool operator==(const ClusterConfig& rhs) const { + return name_ == rhs.name_ && partition_count_ == rhs.partition_count_ && + upstream_producer_properties_ == rhs.upstream_producer_properties_; + } }; /** * Keeps the configuration related to upstream Kafka clusters. - * Impl note: current matching from topic to cluster is based on prefix matching but more complex - * rules could be added. */ class UpstreamKafkaConfiguration { public: virtual ~UpstreamKafkaConfiguration() = default; + + // Return this the host-port pair that's provided to Kafka clients. + // This value needs to follow same rules as 'advertised.address' property of Kafka broker. + virtual std::pair getAdvertisedAddress() const PURE; + + // Provides cluster for given Kafka topic, according to the rules contained within this + // configuration object. virtual absl::optional computeClusterConfigForTopic(const std::string& topic) const PURE; - virtual std::pair getAdvertisedAddress() const PURE; }; using UpstreamKafkaConfigurationSharedPtr = std::shared_ptr; +/** + * Implementation that uses only topic-prefix to figure out which Kafka cluster to use. + */ +class UpstreamKafkaConfigurationImpl : public UpstreamKafkaConfiguration, + private Logger::Loggable { +public: + UpstreamKafkaConfigurationImpl(const KafkaMeshProtoConfig& config); + + // UpstreamKafkaConfiguration + absl::optional + computeClusterConfigForTopic(const std::string& topic) const override; + + // UpstreamKafkaConfiguration + std::pair getAdvertisedAddress() const override; + +private: + const std::pair advertised_address_; + std::map topic_prefix_to_cluster_config_; +}; + } // namespace Mesh } // namespace Kafka } // namespace NetworkFilters diff --git a/contrib/kafka/filters/network/test/mesh/BUILD b/contrib/kafka/filters/network/test/mesh/BUILD index acff686d9e163..bc6e740f08842 100644 --- a/contrib/kafka/filters/network/test/mesh/BUILD +++ b/contrib/kafka/filters/network/test/mesh/BUILD @@ -13,6 +13,16 @@ licenses(["notice"]) # Apache 2 envoy_contrib_package() +envoy_cc_test( + name = "config_unit_test", + srcs = ["config_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh:config_lib", + "//test/mocks/server:factory_context_mocks", + ], +) + envoy_cc_test( name = "filter_unit_test", srcs = ["filter_unit_test.cc"], @@ -73,3 +83,12 @@ envoy_cc_test_library( envoy_external_dep_path("librdkafka"), ], ) + +envoy_cc_test( + name = "upstream_config_unit_test", + srcs = ["upstream_config_unit_test.cc"], + tags = ["skip_on_windows"], + deps = [ + "//contrib/kafka/filters/network/source/mesh:upstream_config_lib", + ], +) diff --git a/contrib/kafka/filters/network/test/mesh/config_unit_test.cc b/contrib/kafka/filters/network/test/mesh/config_unit_test.cc new file mode 100644 index 0000000000000..3ac2ad70a64e6 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/config_unit_test.cc @@ -0,0 +1,81 @@ +#include "test/mocks/server/factory_context.h" + +#include "contrib/kafka/filters/network/source/mesh/config.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +class MockThreadFactory : public Thread::ThreadFactory { +public: + MOCK_METHOD(Thread::ThreadPtr, createThread, (std::function, Thread::OptionsOptConstRef)); + MOCK_METHOD(Thread::ThreadId, currentThreadId, ()); +}; + +TEST(KafkaMeshConfigFactoryUnitTest, shouldCreateFilter) { + // given + const std::string yaml = R"EOF( +advertised_host: "127.0.0.1" +advertised_port: 19092 +upstream_clusters: +- cluster_name: kafka_c1 + bootstrap_servers: 127.0.0.1:9092 + partition_count: 1 +- cluster_name: kafka_c2 + bootstrap_servers: 127.0.0.1:9093 + partition_count: 1 +- cluster_name: kafka_c3 + bootstrap_servers: 127.0.0.1:9094 + partition_count: 5 + producer_config: + acks: "1" + linger.ms: "500" +forwarding_rules: +- target_cluster: kafka_c1 + topic_prefix: apples +- target_cluster: kafka_c2 + topic_prefix: bananas +- target_cluster: kafka_c3 + topic_prefix: cherries + )EOF"; + + KafkaMeshProtoConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + testing::NiceMock context; + testing::NiceMock thread_factory; + ON_CALL(context.api_, threadFactory()).WillByDefault(ReturnRef(thread_factory)); + KafkaMeshConfigFactory factory; + + Network::FilterFactoryCb cb = factory.createFilterFactoryFromProto(proto_config, context); + Network::MockConnection connection; + EXPECT_CALL(connection, addReadFilter(_)); + + // when + cb(connection); + + // then - connection had `addFilter` invoked +} + +TEST(KafkaMeshConfigFactoryUnitTest, throwsIfAdvertisedPortIsMissing) { + // given + const std::string yaml = R"EOF( +advertised_host: "127.0.0.1" + )EOF"; + + KafkaMeshProtoConfig proto_config; + + // when + // then - exception gets thrown + EXPECT_THROW(TestUtility::loadFromYamlAndValidate(yaml, proto_config), ProtoValidationException); +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/test/mesh/integration_test/BUILD b/contrib/kafka/filters/network/test/mesh/integration_test/BUILD new file mode 100644 index 0000000000000..295dcd6302177 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/integration_test/BUILD @@ -0,0 +1,31 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_contrib_package", + "envoy_py_test", +) +load("@kafka_pip3//:requirements.bzl", "requirement") + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +# This test sets up multiple services, and this can take variable amount of time (30-60 seconds). +envoy_py_test( + name = "kafka_mesh_integration_test", + srcs = [ + "kafka_mesh_integration_test.py", + "@kafka_python_client//:all", + ], + data = [ + "//contrib/exe:envoy-static", + "//bazel:remote_jdk11", + "@kafka_server_binary//:all", + ] + glob(["*.j2"]), + flaky = True, + python_version = "PY3", + srcs_version = "PY3", + deps = [ + requirement("Jinja2"), + requirement("MarkupSafe"), + ], +) diff --git a/contrib/kafka/filters/network/test/mesh/integration_test/envoy_config_yaml.j2 b/contrib/kafka/filters/network/test/mesh/integration_test/envoy_config_yaml.j2 new file mode 100644 index 0000000000000..fbb22d2af3a96 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/integration_test/envoy_config_yaml.j2 @@ -0,0 +1,34 @@ +static_resources: + listeners: + - address: + socket_address: + address: 127.0.0.1 + port_value: {{ data['kafka_envoy_port'] }} + filter_chains: + - filters: + - name: requesttypes + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker + stat_prefix: testfilter + - name: mesh + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.kafka_mesh.v3alpha.KafkaMesh + advertised_host: "127.0.0.1" + advertised_port: {{ data['kafka_envoy_port'] }} + upstream_clusters: + - cluster_name: kafka_c1 + bootstrap_servers: 127.0.0.1:{{ data['kafka_real_port1'] }} + partition_count: 1 + - cluster_name: kafka_c2 + bootstrap_servers: 127.0.0.1:{{ data['kafka_real_port2'] }} + partition_count: 1 + forwarding_rules: + - target_cluster: kafka_c1 + topic_prefix: a + - target_cluster: kafka_c2 + topic_prefix: b +admin: + access_log_path: /dev/null + profile_path: /dev/null + address: + socket_address: { address: 127.0.0.1, port_value: {{ data['envoy_monitoring_port'] }} } diff --git a/contrib/kafka/filters/network/test/mesh/integration_test/kafka_mesh_integration_test.py b/contrib/kafka/filters/network/test/mesh/integration_test/kafka_mesh_integration_test.py new file mode 100644 index 0000000000000..f21145cda99a4 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/integration_test/kafka_mesh_integration_test.py @@ -0,0 +1,650 @@ +#!/usr/bin/python + +import random +import os +import shutil +import socket +import subprocess +import tempfile +from threading import Thread, Semaphore +import time +import unittest +import random + +from kafka import KafkaConsumer, KafkaProducer, TopicPartition +import urllib.request + + +class IntegrationTest(unittest.TestCase): + """ + All tests in this class depend on Envoy/Zookeeper/Kafka running. + For each of these tests we are going to create Kafka producers and consumers, with producers + pointing to Envoy (so the records get forwarded to target Kafka clusters) and verifying consumers + pointing to Kafka clusters directly (as mesh filter does not yet support Fetch requests). + We expect every operation to succeed (as they should reach Kafka) and the corresponding metrics + to increase on Envoy side (to show that messages were received and forwarded successfully). + """ + + services = None + + @classmethod + def setUpClass(cls): + IntegrationTest.services = ServicesHolder() + IntegrationTest.services.start() + + @classmethod + def tearDownClass(cls): + IntegrationTest.services.shut_down() + + def setUp(self): + # We want to check if our services are okay before running any kind of test. + IntegrationTest.services.check_state() + self.metrics = MetricsHolder(self) + + def tearDown(self): + # We want to check if our services are okay after running any test. + IntegrationTest.services.check_state() + + @classmethod + def kafka_envoy_address(cls): + return '127.0.0.1:%s' % IntegrationTest.services.kafka_envoy_port + + @classmethod + def kafka_cluster1_address(cls): + return '127.0.0.1:%s' % IntegrationTest.services.kafka_real_port1 + + @classmethod + def kafka_cluster2_address(cls): + return '127.0.0.1:%s' % IntegrationTest.services.kafka_real_port2 + + @classmethod + def envoy_stats_address(cls): + return 'http://127.0.0.1:%s/stats' % IntegrationTest.services.envoy_monitoring_port + + def test_producing(self): + """ + This test verifies that producer can send messages through mesh filter. + We are going to send messages to two topics: 'apples' and 'bananas'. + The mesh filter is configured to forward records for topics starting with 'a' (like 'apples') + to the first cluster, and the ones starting with 'b' (so 'bananas') to the second one. + + We are going to send messages one by one, so they will not be batched in Kafka producer, + so the filter is going to receive them one by one too. + + After sending, the consumers are going to read from Kafka clusters directly to make sure that + nothing was lost. + """ + + messages_to_send = 100 + partition1 = TopicPartition('apples', 0) + partition2 = TopicPartition('bananas', 0) + + producer = KafkaProducer( + bootstrap_servers=IntegrationTest.kafka_envoy_address(), api_version=(1, 0, 0)) + offset_to_payload1 = {} + offset_to_payload2 = {} + for _ in range(messages_to_send): + payload = bytearray(random.getrandbits(8) for _ in range(5)) + future1 = producer.send( + value=payload, topic=partition1.topic, partition=partition1.partition) + self.assertTrue(future1.get().offset >= 0) + offset_to_payload1[future1.get().offset] = payload + + future2 = producer.send( + value=payload, topic=partition2.topic, partition=partition2.partition) + self.assertTrue(future2.get().offset >= 0) + offset_to_payload2[future2.get().offset] = payload + self.assertTrue(len(offset_to_payload1) == messages_to_send) + self.assertTrue(len(offset_to_payload2) == messages_to_send) + producer.close() + + # Check the target clusters. + self.__verify_target_kafka_cluster( + IntegrationTest.kafka_cluster1_address(), partition1, offset_to_payload1, partition2) + self.__verify_target_kafka_cluster( + IntegrationTest.kafka_cluster2_address(), partition2, offset_to_payload2, partition1) + + # Check if requests have been received. + self.metrics.collect_final_metrics() + self.metrics.assert_metric_increase('produce', 200) + + def test_producing_with_batched_records(self): + """ + Compared to previous test, we are going to have batching in Kafka producers (this is caused by high 'linger.ms' value). + So a single request that reaches a Kafka broker might be carrying more than one record, for different partitions. + """ + messages_to_send = 100 + partition1 = TopicPartition('apricots', 0) + partition2 = TopicPartition('berries', 0) + + # This ensures that records to 'apricots' and 'berries' partitions. + producer = KafkaProducer( + bootstrap_servers=IntegrationTest.kafka_envoy_address(), + api_version=(1, 0, 0), + linger_ms=1000, + batch_size=100) + future_to_payload1 = {} + future_to_payload2 = {} + for _ in range(messages_to_send): + payload = bytearray(random.getrandbits(8) for _ in range(5)) + future1 = producer.send( + value=payload, topic=partition1.topic, partition=partition1.partition) + future_to_payload1[future1] = payload + + payload = bytearray(random.getrandbits(8) for _ in range(5)) + future2 = producer.send( + value=payload, topic=partition2.topic, partition=partition2.partition) + future_to_payload2[future2] = payload + + offset_to_payload1 = {} + offset_to_payload2 = {} + for future in future_to_payload1.keys(): + offset_to_payload1[future.get().offset] = future_to_payload1[future] + self.assertTrue(future.get().offset >= 0) + for future in future_to_payload2.keys(): + offset_to_payload2[future.get().offset] = future_to_payload2[future] + self.assertTrue(future.get().offset >= 0) + self.assertTrue(len(offset_to_payload1) == messages_to_send) + self.assertTrue(len(offset_to_payload2) == messages_to_send) + producer.close() + + # Check the target clusters. + self.__verify_target_kafka_cluster( + IntegrationTest.kafka_cluster1_address(), partition1, offset_to_payload1, partition2) + self.__verify_target_kafka_cluster( + IntegrationTest.kafka_cluster2_address(), partition2, offset_to_payload2, partition1) + + # Check if requests have been received. + self.metrics.collect_final_metrics() + self.metrics.assert_metric_increase('produce', 1) + + def __verify_target_kafka_cluster( + self, bootstrap_servers, partition, offset_to_payload_map, other_partition): + # Check if records were properly forwarded to the cluster. + consumer = KafkaConsumer(bootstrap_servers=bootstrap_servers, auto_offset_reset='earliest') + consumer.assign([partition]) + received_messages = [] + while (len(received_messages) < len(offset_to_payload_map)): + poll_result = consumer.poll(timeout_ms=1000) + received_messages += poll_result[partition] + self.assertTrue(len(received_messages) == len(offset_to_payload_map)) + for record in received_messages: + self.assertTrue(record.value == offset_to_payload_map[record.offset]) + + # Check that no records were incorrectly routed from the "other" partition (they would have created the topics). + self.assertTrue(other_partition.topic not in consumer.topics()) + consumer.close(False) + + +class MetricsHolder: + """ + Utility for storing Envoy metrics. + Expected to be created before the test (to get initial metrics), and then to collect them at the + end of test, so the expected increases can be verified. + """ + + def __init__(self, owner): + self.owner = owner + self.initial_requests, self.inital_responses = MetricsHolder.get_envoy_stats() + self.final_requests = None + self.final_responses = None + + def collect_final_metrics(self): + self.final_requests, self.final_responses = MetricsHolder.get_envoy_stats() + + def assert_metric_increase(self, message_type, count): + request_type = message_type + '_request' + response_type = message_type + '_response' + + initial_request_value = self.initial_requests.get(request_type, 0) + final_request_value = self.final_requests.get(request_type, 0) + self.owner.assertGreaterEqual(final_request_value, initial_request_value + count) + + initial_response_value = self.inital_responses.get(response_type, 0) + final_response_value = self.final_responses.get(response_type, 0) + self.owner.assertGreaterEqual(final_response_value, initial_response_value + count) + + @staticmethod + def get_envoy_stats(): + """ + Grab request/response metrics from envoy's stats interface. + """ + + stats_url = IntegrationTest.envoy_stats_address() + requests = {} + responses = {} + with urllib.request.urlopen(stats_url) as remote_metrics_url: + payload = remote_metrics_url.read().decode() + lines = payload.splitlines() + for line in lines: + request_prefix = 'kafka.testfilter.request.' + response_prefix = 'kafka.testfilter.response.' + if line.startswith(request_prefix): + data = line[len(request_prefix):].split(': ') + requests[data[0]] = int(data[1]) + pass + if line.startswith(response_prefix) and '_response:' in line: + data = line[len(response_prefix):].split(': ') + responses[data[0]] = int(data[1]) + return [requests, responses] + + +class ServicesHolder: + """ + Utility class for setting up our external dependencies: Envoy, Zookeeper + and two Kafka clusters (single-broker each). + """ + + def __init__(self): + self.kafka_tmp_dir = None + + self.envoy_worker = None + self.zk_worker = None + self.kafka_workers = None + + @staticmethod + def get_random_listener_port(): + """ + Here we count on OS to give us some random socket. + Obviously this method will need to be invoked in a try loop anyways, as in degenerate scenario + someone else might have bound to it after we had closed the socket and before the service + that's supposed to use it binds to it. + """ + + import socket + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket: + server_socket.bind(('0.0.0.0', 0)) + socket_port = server_socket.getsockname()[1] + print('returning %s' % socket_port) + return socket_port + + def start(self): + """ + Starts all the services we need for integration tests. + """ + + # Find java installation that we are going to use to start Zookeeper & Kafka. + java_directory = ServicesHolder.find_java() + + launcher_environment = os.environ.copy() + # Make `java` visible to build script: + # https://github.com/apache/kafka/blob/2.2.0/bin/kafka-run-class.sh#L226 + new_path = os.path.abspath(java_directory) + os.pathsep + launcher_environment['PATH'] + launcher_environment['PATH'] = new_path + # Both ZK & Kafka use Kafka launcher script. + # By default it sets up JMX options: + # https://github.com/apache/kafka/blob/2.2.0/bin/kafka-run-class.sh#L167 + # But that forces the JVM to load file that is not present due to: + # https://docs.oracle.com/javase/9/management/monitoring-and-management-using-jmx-technology.htm + # Let's make it simple and just disable JMX. + launcher_environment['KAFKA_JMX_OPTS'] = ' ' + + # Setup a temporary directory, which will be used by Kafka & Zookeeper servers. + self.kafka_tmp_dir = tempfile.mkdtemp() + print('Temporary directory used for tests: ' + self.kafka_tmp_dir) + + # This directory will store the configuration files fed to services. + config_dir = self.kafka_tmp_dir + '/config' + os.mkdir(config_dir) + # This directory will store Zookeeper's data (== Kafka server metadata). + zookeeper_store_dir = self.kafka_tmp_dir + '/zookeeper_data' + os.mkdir(zookeeper_store_dir) + # These directories will store Kafka's data (== partitions). + kafka_store_dir1 = self.kafka_tmp_dir + '/kafka_data1' + os.mkdir(kafka_store_dir1) + kafka_store_dir2 = self.kafka_tmp_dir + '/kafka_data2' + os.mkdir(kafka_store_dir2) + + # Find the Kafka server 'bin' directory. + kafka_bin_dir = os.path.join('.', 'external', 'kafka_server_binary', 'bin') + + # Main initialization block: + # - generate random ports, + # - render configuration with these ports, + # - start services and check if they are running okay, + # - if anything is having problems, kill everything and start again. + while True: + + # Generate random ports. + zk_port = ServicesHolder.get_random_listener_port() + kafka_envoy_port = ServicesHolder.get_random_listener_port() + kafka_real_port1 = ServicesHolder.get_random_listener_port() + kafka_real_port2 = ServicesHolder.get_random_listener_port() + envoy_monitoring_port = ServicesHolder.get_random_listener_port() + + # These ports need to be exposed to tests. + self.kafka_envoy_port = kafka_envoy_port + self.kafka_real_port1 = kafka_real_port1 + self.kafka_real_port2 = kafka_real_port2 + self.envoy_monitoring_port = envoy_monitoring_port + + # Render config file for Envoy. + template = RenderingHelper.get_template('envoy_config_yaml.j2') + contents = template.render( + data={ + 'kafka_envoy_port': kafka_envoy_port, + 'kafka_real_port1': kafka_real_port1, + 'kafka_real_port2': kafka_real_port2, + 'envoy_monitoring_port': envoy_monitoring_port + }) + envoy_config_file = os.path.join(config_dir, 'envoy_config.yaml') + with open(envoy_config_file, 'w') as fd: + fd.write(contents) + print('Envoy config file rendered at: ' + envoy_config_file) + + # Render config file for Zookeeper. + template = RenderingHelper.get_template('zookeeper_properties.j2') + contents = template.render(data={'data_dir': zookeeper_store_dir, 'zk_port': zk_port}) + zookeeper_config_file = os.path.join(config_dir, 'zookeeper.properties') + with open(zookeeper_config_file, 'w') as fd: + fd.write(contents) + print('Zookeeper config file rendered at: ' + zookeeper_config_file) + + # Render config file for Kafka cluster 1. + template = RenderingHelper.get_template('kafka_server_properties.j2') + contents = template.render( + data={ + 'kafka_real_port': kafka_real_port1, + 'data_dir': kafka_store_dir1, + 'zk_port': zk_port, + 'kafka_zk_instance': 'instance1' + }) + kafka_config_file1 = os.path.join(config_dir, 'kafka_server1.properties') + with open(kafka_config_file1, 'w') as fd: + fd.write(contents) + print('Kafka config file rendered at: ' + kafka_config_file1) + + # Render config file for Kafka cluster 2. + template = RenderingHelper.get_template('kafka_server_properties.j2') + contents = template.render( + data={ + 'kafka_real_port': kafka_real_port2, + 'data_dir': kafka_store_dir2, + 'zk_port': zk_port, + 'kafka_zk_instance': 'instance2' + }) + kafka_config_file2 = os.path.join(config_dir, 'kafka_server2.properties') + with open(kafka_config_file2, 'w') as fd: + fd.write(contents) + print('Kafka config file rendered at: ' + kafka_config_file2) + + # Start the services now. + try: + + # Start Envoy in the background, pointing to rendered config file. + envoy_binary = ServicesHolder.find_envoy() + # --base-id is added to allow multiple Envoy instances to run at the same time. + envoy_args = [ + os.path.abspath(envoy_binary), '-c', envoy_config_file, '--base-id', + str(random.randint(1, 999999)) + ] + envoy_handle = subprocess.Popen( + envoy_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + self.envoy_worker = ProcessWorker( + envoy_handle, 'Envoy', 'starting main dispatch loop') + self.envoy_worker.await_startup() + + # Start Zookeeper in background, pointing to rendered config file. + zk_binary = os.path.join(kafka_bin_dir, 'zookeeper-server-start.sh') + zk_args = [os.path.abspath(zk_binary), zookeeper_config_file] + zk_handle = subprocess.Popen( + zk_args, + env=launcher_environment, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + self.zk_worker = ProcessWorker(zk_handle, 'Zookeeper', 'binding to port') + self.zk_worker.await_startup() + + self.kafka_workers = [] + + # Start Kafka 1 in background, pointing to rendered config file. + kafka_binary = os.path.join(kafka_bin_dir, 'kafka-server-start.sh') + kafka_args = [os.path.abspath(kafka_binary), os.path.abspath(kafka_config_file1)] + kafka_handle = subprocess.Popen( + kafka_args, + env=launcher_environment, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + kafka_worker = ProcessWorker(kafka_handle, 'Kafka', '[KafkaServer id=0] started') + kafka_worker.await_startup() + self.kafka_workers.append(kafka_worker) + + # Start Kafka 2 in background, pointing to rendered config file. + kafka_binary = os.path.join(kafka_bin_dir, 'kafka-server-start.sh') + kafka_args = [os.path.abspath(kafka_binary), os.path.abspath(kafka_config_file2)] + kafka_handle = subprocess.Popen( + kafka_args, + env=launcher_environment, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + kafka_worker = ProcessWorker(kafka_handle, 'Kafka', '[KafkaServer id=0] started') + kafka_worker.await_startup() + self.kafka_workers.append(kafka_worker) + + # All services have started without problems - now we can finally finish. + break + + except Exception as e: + print('Could not start services, will try again', e) + + if self.kafka_workers: + self.kafka_worker.kill() + self.kafka_worker = None + if self.zk_worker: + self.zk_worker.kill() + self.zk_worker = None + if self.envoy_worker: + self.envoy_worker.kill() + self.envoy_worker = None + + @staticmethod + def find_java(): + """ + This method just locates the Java installation in current directory. + We cannot hardcode the name, as the dirname changes as per: + https://github.com/bazelbuild/bazel/blob/master/tools/jdk/BUILD#L491 + """ + + external_dir = os.path.join('.', 'external') + for directory in os.listdir(external_dir): + if 'remotejdk11' in directory: + result = os.path.join(external_dir, directory, 'bin') + print('Using Java: ' + result) + return result + raise Exception('Could not find Java in: ' + external_dir) + + @staticmethod + def find_envoy(): + """ + This method locates envoy binary. + It's present at ./contrib/exe/envoy-static (at least for mac/bazel-asan/bazel-tsan), + or at ./external/envoy/contrib/exe/envoy-static (for bazel-compile_time_options). + """ + + candidate = os.path.join('.', 'contrib', 'exe', 'envoy-static') + if os.path.isfile(candidate): + return candidate + candidate = os.path.join('.', 'external', 'envoy', 'contrib', 'exe', 'envoy-static') + if os.path.isfile(candidate): + return candidate + raise Exception("Could not find Envoy") + + def shut_down(self): + # Teardown - kill Kafka, Zookeeper, and Envoy. Then delete their data directory. + print('Cleaning up') + + if self.kafka_workers: + for worker in self.kafka_workers: + worker.kill() + + if self.zk_worker: + self.zk_worker.kill() + + if self.envoy_worker: + self.envoy_worker.kill() + + if self.kafka_tmp_dir: + print('Removing temporary directory: ' + self.kafka_tmp_dir) + shutil.rmtree(self.kafka_tmp_dir) + + def check_state(self): + self.envoy_worker.check_state() + self.zk_worker.check_state() + for worker in self.kafka_workers: + worker.check_state() + + +class ProcessWorker: + """ + Helper class that wraps the external service process. + Provides ability to wait until service is ready to use (this is done by tracing logs) and + printing service's output to stdout. + """ + + # Service is considered to be properly initialized after it has logged its startup message + # and has been alive for INITIALIZATION_WAIT_SECONDS after that message has been seen. + # This (clunky) design is needed because Zookeeper happens to log "binding to port" and then + # might fail to bind. + INITIALIZATION_WAIT_SECONDS = 3 + + def __init__(self, process_handle, name, startup_message): + # Handle to process and pretty name. + self.process_handle = process_handle + self.name = name + + self.startup_message = startup_message + self.startup_message_ts = None + + # Semaphore raised when startup has finished and information regarding startup's success. + self.initialization_semaphore = Semaphore(value=0) + self.initialization_ok = False + + self.state_worker = Thread(target=ProcessWorker.initialization_worker, args=(self,)) + self.state_worker.start() + self.out_worker = Thread( + target=ProcessWorker.pipe_handler, args=(self, self.process_handle.stdout, 'out')) + self.out_worker.start() + self.err_worker = Thread( + target=ProcessWorker.pipe_handler, args=(self, self.process_handle.stderr, 'err')) + self.err_worker.start() + + @staticmethod + def initialization_worker(owner): + """ + Worker thread. + Responsible for detecting if service died during initialization steps and ensuring if enough + time has passed since the startup message has been seen. + When either of these happens, we just raise the initialization semaphore. + """ + + while True: + status = owner.process_handle.poll() + if status: + # Service died. + print('%s did not initialize properly - finished with: %s' % (owner.name, status)) + owner.initialization_ok = False + owner.initialization_semaphore.release() + break + else: + # Service is still running. + startup_message_ts = owner.startup_message_ts + if startup_message_ts: + # The log message has been registered (by pipe_handler thread), let's just ensure that + # some time has passed and mark the service as running. + current_time = int(round(time.time())) + if current_time - startup_message_ts >= ProcessWorker.INITIALIZATION_WAIT_SECONDS: + print( + 'Startup message seen %s seconds ago, and service is still running' % + (ProcessWorker.INITIALIZATION_WAIT_SECONDS), + flush=True) + owner.initialization_ok = True + owner.initialization_semaphore.release() + break + time.sleep(1) + print('Initialization worker for %s has finished' % (owner.name)) + + @staticmethod + def pipe_handler(owner, pipe, pipe_name): + """ + Worker thread. + If a service startup message is seen, then it just registers the timestamp of its appearance. + Also prints every received message. + """ + + try: + for raw_line in pipe: + line = raw_line.decode().rstrip() + print('%s(%s):' % (owner.name, pipe_name), line, flush=True) + if owner.startup_message in line: + print( + '%s initialization message [%s] has been logged' % + (owner.name, owner.startup_message)) + owner.startup_message_ts = int(round(time.time())) + finally: + pipe.close() + print('Pipe handler for %s(%s) has finished' % (owner.name, pipe_name)) + + def await_startup(self): + """ + Awaits on initialization semaphore, and then verifies the initialization state. + If everything is okay, we just continue (we can use the service), otherwise throw. + """ + + print('Waiting for %s to start...' % (self.name)) + self.initialization_semaphore.acquire() + try: + if self.initialization_ok: + print('Service %s started successfully' % (self.name)) + else: + raise Exception('%s could not start' % (self.name)) + finally: + self.initialization_semaphore.release() + + def check_state(self): + """ + Verifies if the service is still running. Throws if it is not. + """ + + status = self.process_handle.poll() + if status: + raise Exception('%s died with: %s' % (self.name, str(status))) + + def kill(self): + """ + Utility method to kill the main service thread and all related workers. + """ + + print('Stopping service %s' % self.name) + + # Kill the real process. + self.process_handle.kill() + self.process_handle.wait() + + # The sub-workers are going to finish on their own, as they will detect main thread dying + # (through pipes closing, or .poll() returning a non-null value). + self.state_worker.join() + self.out_worker.join() + self.err_worker.join() + + print('Service %s has been stopped' % self.name) + + +class RenderingHelper: + """ + Helper for jinja templates. + """ + + @staticmethod + def get_template(template): + import jinja2 + import os + import sys + # Templates are resolved relatively to main start script, due to main & test templates being + # stored in different directories. + env = jinja2.Environment( + loader=jinja2.FileSystemLoader(searchpath=os.path.dirname(os.path.abspath(__file__)))) + return env.get_template(template) + + +if __name__ == '__main__': + unittest.main() diff --git a/contrib/kafka/filters/network/test/mesh/integration_test/kafka_server_properties.j2 b/contrib/kafka/filters/network/test/mesh/integration_test/kafka_server_properties.j2 new file mode 100644 index 0000000000000..021991a0d4670 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/integration_test/kafka_server_properties.j2 @@ -0,0 +1,31 @@ +broker.id=0 +listeners=PLAINTEXT://127.0.0.1:{{ data['kafka_real_port'] }} +advertised.listeners=PLAINTEXT://127.0.0.1:{{ data['kafka_real_port'] }} + +num.network.threads=3 +num.io.threads=8 +socket.send.buffer.bytes=102400 +socket.receive.buffer.bytes=102400 +socket.request.max.bytes=104857600 + +log.dirs={{ data['data_dir'] }} +num.partitions=1 +num.recovery.threads.per.data.dir=1 + +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +log.retention.hours=168 +log.segment.bytes=1073741824 +log.retention.check.interval.ms=300000 + +# As we are going to have multiple Kafka clusters (not even brokers!), +# we need to register them at different paths in ZK. +zookeeper.connect=127.0.0.1:{{ data['zk_port'] }}/{{ data['kafka_zk_instance'] }} +zookeeper.connection.timeout.ms=6000 + +group.initial.rebalance.delay.ms=0 + +# The number of __consumer_offsets partitions is reduced to make logs a bit more readable. +offsets.topic.num.partitions=5 diff --git a/contrib/kafka/filters/network/test/mesh/integration_test/zookeeper_properties.j2 b/contrib/kafka/filters/network/test/mesh/integration_test/zookeeper_properties.j2 new file mode 100644 index 0000000000000..be524bea342bc --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/integration_test/zookeeper_properties.j2 @@ -0,0 +1,5 @@ +clientPort={{ data['zk_port'] }} +dataDir={{ data['data_dir'] }} +maxClientCnxns=0 +# ZK 3.5 tries to bind 8080 for introspection capacility - we do not need that. +admin.enableServer=false diff --git a/contrib/kafka/filters/network/test/mesh/upstream_config_unit_test.cc b/contrib/kafka/filters/network/test/mesh/upstream_config_unit_test.cc new file mode 100644 index 0000000000000..23bfb039b9a03 --- /dev/null +++ b/contrib/kafka/filters/network/test/mesh/upstream_config_unit_test.cc @@ -0,0 +1,143 @@ +#include "source/common/protobuf/utility.h" + +#include "test/test_common/utility.h" + +#include "contrib/kafka/filters/network/source/mesh/upstream_config.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +TEST(UpstreamKafkaConfigurationTest, shouldThrowIfNoKafkaClusters) { + // given + KafkaMeshProtoConfig proto_config; + + // when + // then - exception gets thrown + EXPECT_THROW_WITH_REGEX(UpstreamKafkaConfigurationImpl{proto_config}, EnvoyException, + "at least one upstream Kafka cluster"); +} + +TEST(UpstreamKafkaConfigurationTest, shouldThrowIfKafkaClustersWithSameName) { + // given + const std::string yaml = R"EOF( +advertised_host: mock +advertised_port: 1 +upstream_clusters: +- cluster_name: REPEATEDNAME + bootstrap_servers: mock + partition_count : 1 +- cluster_name: REPEATEDNAME + bootstrap_servers: mock + partition_count : 1 +forwarding_rules: + )EOF"; + KafkaMeshProtoConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + // when + // then - exception gets thrown + EXPECT_THROW_WITH_REGEX(UpstreamKafkaConfigurationImpl{proto_config}, EnvoyException, + "multiple Kafka clusters referenced by the same name"); +} + +TEST(UpstreamKafkaConfigurationTest, shouldThrowIfNoForwardingRules) { + // given + const std::string yaml = R"EOF( +advertised_host: mock_host +advertised_port: 42 +upstream_clusters: +- cluster_name: mock + bootstrap_servers: mock + partition_count : 1 +forwarding_rules: + )EOF"; + KafkaMeshProtoConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + // when + // then - exception gets thrown + EXPECT_THROW_WITH_REGEX(UpstreamKafkaConfigurationImpl{proto_config}, EnvoyException, + "at least one forwarding rule"); +} + +TEST(UpstreamKafkaConfigurationTest, shouldThrowIfForwardingRuleWithUnknownTarget) { + // given + const std::string yaml = R"EOF( +advertised_host: mock_host +advertised_port: 42 +upstream_clusters: +- cluster_name: mock + bootstrap_servers: mock + partition_count : 1 +forwarding_rules: +- target_cluster: BADNAME + topic_prefix: mock + )EOF"; + KafkaMeshProtoConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + + // when + // then - exception gets thrown + EXPECT_THROW_WITH_REGEX(UpstreamKafkaConfigurationImpl{proto_config}, EnvoyException, + "forwarding rule is referencing unknown upstream Kafka cluster"); +} + +TEST(UpstreamKafkaConfigurationTest, shouldBehaveProperly) { + // given + const std::string yaml = R"EOF( +advertised_host: mock_host +advertised_port: 42 +upstream_clusters: +- cluster_name: cluster1 + bootstrap_servers: s1 + partition_count : 1 +- cluster_name: cluster2 + bootstrap_servers: s2 + partition_count : 2 +forwarding_rules: +- target_cluster: cluster1 + topic_prefix: prefix1 +- target_cluster: cluster2 + topic_prefix: prefix2 + )EOF"; + KafkaMeshProtoConfig proto_config; + TestUtility::loadFromYamlAndValidate(yaml, proto_config); + const UpstreamKafkaConfiguration& testee = UpstreamKafkaConfigurationImpl{proto_config}; + + const ClusterConfig cluster1 = {"cluster1", 1, {{"bootstrap.servers", "s1"}}}; + const ClusterConfig cluster2 = {"cluster2", 2, {{"bootstrap.servers", "s2"}}}; + + // when, then (advertised address is returned properly) + const auto address = testee.getAdvertisedAddress(); + EXPECT_EQ(address.first, "mock_host"); + EXPECT_EQ(address.second, 42); + + // when, then (matching prefix with something more) + const auto res1 = testee.computeClusterConfigForTopic("prefix1somethingmore"); + ASSERT_TRUE(res1.has_value()); + EXPECT_EQ(*res1, cluster1); + + // when, then (matching prefix alone) + const auto res2 = testee.computeClusterConfigForTopic("prefix1"); + ASSERT_TRUE(res2.has_value()); + EXPECT_EQ(*res2, cluster1); + + // when, then (failing to match first rule, but then matching the second one) + const auto res3 = testee.computeClusterConfigForTopic("prefix2somethingmore"); + ASSERT_TRUE(res3.has_value()); + EXPECT_EQ(*res3, cluster2); + + // when, then (no rules match) + const auto res4 = testee.computeClusterConfigForTopic("someotherthing"); + EXPECT_FALSE(res4.has_value()); +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst index 4753f3845a78c..2c286686f33f5 100644 --- a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst +++ b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst @@ -14,6 +14,10 @@ this filter) are forwarded as-is. * :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.kafka_broker*. +.. attention:: + + The Kafka broker filter is only included in :ref:`contrib images ` + .. attention:: The kafka_broker filter is experimental and is currently under active development. diff --git a/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst b/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst new file mode 100644 index 0000000000000..4a8504b7d67e3 --- /dev/null +++ b/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst @@ -0,0 +1,103 @@ +.. _config_network_filters_kafka_mesh: + +Kafka Mesh filter +=================== + +The Apache Kafka mesh filter provides a facade for `Apache Kafka `_ +producers. Produce requests sent to this filter insance can be forwarded to one of multiple +clusters, depending on configured forwarding rules. Corresponding message versions from +Kafka 2.4.0 are supported. + +* :ref:`v3 API reference ` +* This filter should be configured with the name *envoy.filters.network.kafka_mesh*. + +.. attention:: + + The Kafka mesh filter is only included in :ref:`contrib images ` + +.. attention:: + + The kafka_mesh filter is experimental and is currently under active development. + Capabilities will be expanded over time and the configuration structures are likely to change. + +.. attention:: + + The kafka_mesh filter is does not work on Windows (the blocker is getting librdkafka compiled). + +.. _config_network_filters_kafka_mesh_config: + +Configuration +------------- + +Below example shows us typical filter configuration that proxies 3 Kafka clusters. +Clients are going to connect to '127.0.0.1:19092', and their messages are going to be distributed +to cluster depending on topic names. + +.. code-block:: yaml + + listeners: + - address: + socket_address: + address: 127.0.0.1 # Host that Kafka clients should connect to. + port_value: 19092 # Port that Kafka clients should connect to. + filter_chains: + - filters: + - name: envoy.filters.network.kafka_mesh + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.kafka_mesh.v3alpha.KafkaMesh + advertised_host: "127.0.0.1" + advertised_port: 19092 + upstream_clusters: + - cluster_name: kafka_c1 + bootstrap_servers: cluster1_node1:9092,cluster1_node2:9092,cluster1_node3:9092 + partition_count: 1 + - cluster_name: kafka_c2 + bootstrap_servers: cluster2_node1:9092,cluster2_node2:9092,cluster2_node3:9092 + partition_count: 1 + - cluster_name: kafka_c3 + bootstrap_servers: cluster3_node1:9092,cluster3_node2:9092 + partition_count: 5 + producer_config: + acks: "1" + linger.ms: "500" + forwarding_rules: + - target_cluster: kafka_c1 + topic_prefix: apples + - target_cluster: kafka_c2 + topic_prefix: bananas + - target_cluster: kafka_c3 + topic_prefix: cherries + +It should be noted that Kafka broker filter can be inserted before Kafka mesh filter in the filter +chain to capture the request processing metrics. + +.. _config_network_filters_kafka_mesh_notes: + +Notes +----- +Given that this filter does its own processing of received requests, there are some changes +in behaviour compared to explicit connection to a Kafka cluster: + +#. Record headers are not sent upstream. +#. Only ProduceRequests with version 2 are supported (what means very old producers like 0.8 are + not going to be supported). +#. Python producers need to set API version of at least 1.0.0, so that the produce requests they + send are going to have records with magic equal to 2. +#. Downstream handling of Kafka producer 'acks' property is delegated to upstream client. + E.g. if upstream client is configured to use acks=0 then the response is going to be sent + to downstream client as soon as possible (even if they had non-zero acks!). +#. As the filter splits single producer requests into separate records, it's possible that delivery + of only some of these records fails. In that case, the response returned to upstream client is + a failure, however it is possible some of the records have been appended in target cluster. +#. Because of the splitting mentioned above, records are not necessarily appended one after another + (as they do not get sent as single request to upstream). Users that want to avoid this scenario + might want to take a look into downstream producer configs: 'linger.ms' and 'batch.size'. +#. Produce requests that reference to topics that do not match any of the rules are going to close + connection and fail. This usually should not happen (clients request metadata first, and they + should then fail with 'no broker available' first), but is possible if someone tailors binary + payloads over the connection. +#. librdkafka was compiled without ssl, lz4, gssapi, so related custom producer config options are + not supported. +#. Invalid custom producer configs are not found at startup (only when appropriate clusters are + being sent to). Requests that would have referenced these clusters are going to close connection + and fail. diff --git a/docs/root/configuration/listeners/network_filters/network_filters.rst b/docs/root/configuration/listeners/network_filters/network_filters.rst index a4b918ddf380b..d4dcc5e86c97a 100644 --- a/docs/root/configuration/listeners/network_filters/network_filters.rst +++ b/docs/root/configuration/listeners/network_filters/network_filters.rst @@ -17,6 +17,7 @@ filters. direct_response_filter ext_authz_filter kafka_broker_filter + kafka_mesh_filter local_rate_limit_filter mongo_proxy_filter mysql_proxy_filter diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD index 5bbde32946b63..93f9184a2b400 100644 --- a/generated_api_shadow/BUILD +++ b/generated_api_shadow/BUILD @@ -60,6 +60,7 @@ proto_library( "//contrib/envoy/extensions/filters/http/squash/v3:pkg", "//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg", + "//contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD new file mode 100644 index 0000000000000..ee92fb652582e --- /dev/null +++ b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto new file mode 100644 index 0000000000000..03a6522852ab5 --- /dev/null +++ b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.kafka_mesh.v3alpha; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_mesh.v3alpha"; +option java_outer_classname = "KafkaMeshProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).work_in_progress = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Kafka Mesh] +// Kafka Mesh :ref:`configuration overview `. +// [#extension: envoy.filters.network.kafka_mesh] + +message KafkaMesh { + // Envoy's host that's advertised to clients. + // Has the same meaning as corresponding Kafka broker properties. + // Usually equal to filter chain's listener config, but needs to be reachable by clients + // (so 0.0.0.0 will not work). + string advertised_host = 1 [(validate.rules).string = {min_len: 1}]; + + // Envoy's port that's advertised to clients. + int32 advertised_port = 2 [(validate.rules).int32 = {gt: 0}]; + + // Upstream clusters this filter will connect to. + repeated KafkaClusterDefinition upstream_clusters = 3; + + // Rules that will decide which cluster gets which request. + repeated ForwardingRule forwarding_rules = 4; +} + +message KafkaClusterDefinition { + // Cluster name. + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; + + // Kafka cluster address. + string bootstrap_servers = 2 [(validate.rules).string = {min_len: 1}]; + + // Default number of partitions present in this cluster. + // This is especially important for clients that do not specify partition in their payloads and depend on this value for hashing. + int32 partition_count = 3 [(validate.rules).int32 = {gt: 0}]; + + // Custom configuration passed to Kafka producer. + map producer_config = 4; +} + +message ForwardingRule { + // Cluster name. + string target_cluster = 1; + + oneof trigger { + // Intended place for future types of forwarding rules. + string topic_prefix = 2; + } +} diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index d46f40af7dc2c..fa88387510e19 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -257,6 +257,8 @@ "extensions/filters/network/redis_proxy", "extensions/filters/network/kafka", "extensions/filters/network/kafka/broker", + "extensions/filters/network/kafka/mesh", + "extensions/filters/network/kafka/mesh/command_handlers", "extensions/filters/network/kafka/protocol", "extensions/filters/network/kafka/serialization", "extensions/filters/network/mongo_proxy", From 64650143ac2bf28c28886f02fbf57d4842fe2c8f Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 10 Sep 2021 18:16:36 +0100 Subject: [PATCH 015/121] bazel: Make use of new incremental pip installer (#18026) Signed-off-by: Ryan Northey Signed-off-by: gayang --- .github/actions/pr_notifier/requirements.in | 2 + .github/dependabot.yml | 43 +- WORKSPACE | 4 + bazel/repositories.bzl | 6 - bazel/repositories_extra.bzl | 116 +-- bazel/repository_locations.bzl | 9 +- bazel/rules_python.patch | 15 - ci/flaky_test/requirements.in | 1 + configs/BUILD | 3 +- contrib/kafka/filters/network/source/BUILD | 2 +- .../filters/network/source/requirements.txt | 38 - contrib/kafka/filters/network/test/BUILD | 2 +- .../test/broker/integration_test/BUILD | 2 +- .../network/thrift_proxy/requirements.in | 2 + tools/base/requirements.in | 30 + tools/base/requirements.txt | 717 +++++++++++++++++- tools/code_format/BUILD | 2 +- tools/config_validation/BUILD | 2 +- tools/config_validation/requirements.txt | 30 - tools/distribution/BUILD | 2 +- tools/distribution/requirements.txt | 382 ---------- tools/docs/BUILD | 30 +- tools/docs/requirements.txt | 239 ------ tools/git/BUILD | 2 +- tools/git/requirements.txt | 18 - tools/protodoc/BUILD | 2 +- tools/protodoc/requirements.txt | 38 - tools/testing/BUILD | 2 +- tools/testing/requirements.txt | 124 --- 29 files changed, 766 insertions(+), 1099 deletions(-) create mode 100644 .github/actions/pr_notifier/requirements.in delete mode 100644 bazel/rules_python.patch create mode 100644 ci/flaky_test/requirements.in delete mode 100644 contrib/kafka/filters/network/source/requirements.txt create mode 100644 test/extensions/filters/network/thrift_proxy/requirements.in create mode 100644 tools/base/requirements.in delete mode 100644 tools/config_validation/requirements.txt delete mode 100644 tools/distribution/requirements.txt delete mode 100644 tools/docs/requirements.txt delete mode 100644 tools/git/requirements.txt delete mode 100644 tools/protodoc/requirements.txt delete mode 100644 tools/testing/requirements.txt diff --git a/.github/actions/pr_notifier/requirements.in b/.github/actions/pr_notifier/requirements.in new file mode 100644 index 0000000000000..b27ccacba25ae --- /dev/null +++ b/.github/actions/pr_notifier/requirements.in @@ -0,0 +1,2 @@ +pygithub +slack_sdk diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 36d284ee60378..4aeb246c9db95 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,12 +7,12 @@ updates: interval: "daily" - package-ecosystem: "pip" - directory: "/test/extensions/filters/network/thrift_proxy" + directory: "/configs" schedule: interval: "daily" - package-ecosystem: "pip" - directory: "/contrib/kafka/filters/network/source" + directory: "/test/extensions/filters/network/thrift_proxy" schedule: interval: "daily" @@ -27,17 +27,7 @@ updates: interval: "daily" - package-ecosystem: "pip" - directory: "/tools/docs" - schedule: - interval: "daily" - -- package-ecosystem: "pip" - directory: "/tools/git" - schedule: - interval: "daily" - -- package-ecosystem: "pip" - directory: "/tools/config_validation" + directory: "/tools/code_format" schedule: interval: "daily" @@ -46,28 +36,13 @@ updates: schedule: interval: "daily" -- package-ecosystem: "pip" - directory: "/tools/deprecate_version" - schedule: - interval: "daily" - -- package-ecosystem: "pip" - directory: "/tools/distribution" - schedule: - interval: "daily" - -- package-ecosystem: "pip" - directory: "/tools/protodoc" - schedule: - interval: "daily" - - package-ecosystem: "pip" directory: "/tools/deprecate_features" schedule: interval: "daily" - package-ecosystem: "pip" - directory: "/tools/code_format" + directory: "/tools/deprecate_version" schedule: interval: "daily" @@ -76,16 +51,6 @@ updates: schedule: interval: "daily" -- package-ecosystem: "pip" - directory: "/configs" - schedule: - interval: "daily" - -- package-ecosystem: "pip" - directory: "/tools/testing" - schedule: - interval: "daily" - - package-ecosystem: "docker" directory: "/ci" schedule: diff --git a/WORKSPACE b/WORKSPACE index a96cba5013021..65b97f124d45d 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -16,6 +16,10 @@ load("//bazel:repositories_extra.bzl", "envoy_dependencies_extra") envoy_dependencies_extra() +load("@base_pip3//:requirements.bzl", "install_deps") + +install_deps() + load("//bazel:dependency_imports.bzl", "envoy_dependency_imports") envoy_dependency_imports() diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index c53d62da1bbb6..ae04a8918212c 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -637,14 +637,8 @@ def _com_google_absl(): ) def _com_google_protobuf(): - # TODO(phlax): remove patch - # patch is applied to update setuptools to version (0.5.4), - # and can be removed once this has been updated in rules_python - # see https://github.com/envoyproxy/envoy/pull/15236#issuecomment-788650946 for discussion external_http_archive( name = "rules_python", - patches = ["@envoy//bazel:rules_python.patch"], - patch_args = ["-p1"], ) external_http_archive( diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index 6b9c483a6ea72..686e90998f16d 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -1,138 +1,28 @@ -load("@rules_python//python:pip.bzl", "pip_install") +load("@rules_python//python:pip.bzl", "pip_install", "pip_parse") load("@proxy_wasm_cpp_host//bazel/cargo:crates.bzl", "proxy_wasm_cpp_host_fetch_remote_crates") load("//bazel/external/cargo:crates.bzl", "raze_fetch_remote_crates") # Python dependencies. def _python_deps(): - pip_install( + pip_parse( name = "base_pip3", - requirements = "@envoy//tools/base:requirements.txt", + requirements_lock = "@envoy//tools/base:requirements.txt", extra_pip_args = ["--require-hashes"], ) - pip_install( - name = "config_validation_pip3", - requirements = "@envoy//tools/config_validation:requirements.txt", - extra_pip_args = ["--require-hashes"], - - # project_name = "PyYAML", - # project_url = "https://github.com/yaml/pyyaml", - # version = "5.4.1", - # release_date = "2021-01-20" - # use_category = ["devtools"], - # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", - ) pip_install( name = "configs_pip3", requirements = "@envoy//configs:requirements.txt", extra_pip_args = ["--require-hashes"], - - # project_name = "Jinja", - # project_url = "http://palletsprojects.com/p/jinja", - # version = "2.11.2", - # release_date = "2020-04-13" - # use_category = ["test"], - # cpe = "cpe:2.3:a:palletsprojects:jinja:*", - - # project_name = "MarkupSafe", - # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/", - # version = "1.1.1", - # release_date = "2019-02-23" - # use_category = ["test"], - ) - pip_install( - name = "docs_pip3", - requirements = "@envoy//tools/docs:requirements.txt", - extra_pip_args = ["--require-hashes"], - ) - pip_install( - name = "deps_pip3", - requirements = "@envoy//tools/dependency:requirements.txt", - extra_pip_args = ["--require-hashes"], - ) - pip_install( - name = "distribution_pip3", - requirements = "@envoy//tools/distribution:requirements.txt", - extra_pip_args = ["--require-hashes"], - ) - pip_install( - name = "git_pip3", - requirements = "@envoy//tools/git:requirements.txt", - extra_pip_args = ["--require-hashes"], - ) - pip_install( - name = "kafka_pip3", - requirements = "@envoy//contrib/kafka/filters/network/source:requirements.txt", - extra_pip_args = ["--require-hashes"], - - # project_name = "Jinja", - # project_url = "http://palletsprojects.com/p/jinja", - # version = "2.11.2", - # release_date = "2020-04-13" - # use_category = ["test"], - # cpe = "cpe:2.3:a:palletsprojects:jinja:*", - - # project_name = "MarkupSafe", - # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/", - # version = "1.1.1", - # release_date = "2019-02-23" - # use_category = ["test"], - ) - pip_install( - name = "protodoc_pip3", - requirements = "@envoy//tools/protodoc:requirements.txt", - extra_pip_args = ["--require-hashes"], - - # project_name = "PyYAML", - # project_url = "https://github.com/yaml/pyyaml", - # version = "5.4.1", - # release_date = "2021-01-20" - # use_category = ["docs"], - # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", - ) - pip_install( - name = "pylint_pip3", - requirements = "@envoy//tools/code_format:requirements.txt", - extra_pip_args = ["--require-hashes"], - ) - pip_install( - name = "testing_pip3", - requirements = "@envoy//tools/testing:requirements.txt", - extra_pip_args = ["--require-hashes"], ) pip_install( name = "thrift_pip3", requirements = "@envoy//test/extensions/filters/network/thrift_proxy:requirements.txt", extra_pip_args = ["--require-hashes"], - - # project_name = "Apache Thrift", - # project_url = "http://thrift.apache.org/", - # version = "0.11.0", - # release_date = "2017-12-07" - # use_category = ["test"], - # cpe = "cpe:2.3:a:apache:thrift:*", - - # project_name = "Six: Python 2 and 3 Compatibility Library", - # project_url = "https://six.readthedocs.io/", - # version = "1.15.0", - # release_date = "2020-05-21" - # use_category = ["test"], ) pip_install( name = "fuzzing_pip3", requirements = "@rules_fuzzing//fuzzing:requirements.txt", extra_pip_args = ["--require-hashes"], - - # project_name = "Abseil Python Common Libraries", - # project_url = "https://github.com/abseil/abseil-py", - # version = "0.11.0", - # release_date = "2020-10-27", - # use_category = ["test"], - - # project_name = "Six: Python 2 and 3 Compatibility Library", - # project_url = "https://six.readthedocs.io/", - # version = "1.15.0", - # release_date = "2020-05-21" - # use_category = ["test"], ) # Envoy deps that rely on a first stage of dependency loading in envoy_dependencies(). diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 75fa3544bcf83..e6aa827110114 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -675,10 +675,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Python rules for Bazel", project_desc = "Bazel rules for the Python language", project_url = "https://github.com/bazelbuild/rules_python", - version = "0.3.0", - sha256 = "934c9ceb552e84577b0faf1e5a2f0450314985b4d8712b2b70717dc679fdc01b", - release_date = "2021-06-23", - urls = ["https://github.com/bazelbuild/rules_python/releases/download/{version}/rules_python-{version}.tar.gz"], + version = "9f597623ccfbe430b0d81c82498e33b80b7aec88", + sha256 = "8d61fed6974f1e69e09243ca78c9ecf82f50fa3de64bb5df6b0b9061f9c9639b", + release_date = "2021-09-07", + strip_prefix = "rules_python-{version}", + urls = ["https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz"], use_category = ["build"], ), rules_pkg = dict( diff --git a/bazel/rules_python.patch b/bazel/rules_python.patch deleted file mode 100644 index 205998745d576..0000000000000 --- a/bazel/rules_python.patch +++ /dev/null @@ -1,15 +0,0 @@ -diff --git a/python/pip_install/repositories.bzl b/python/pip_install/repositories.bzl -index 302ff0e..c40deae 100644 ---- a/python/pip_install/repositories.bzl -+++ b/python/pip_install/repositories.bzl -@@ -26,8 +26,8 @@ _RULE_DEPS = [ - ), - ( - "pypi__setuptools", -- "https://files.pythonhosted.org/packages/ab/b5/3679d7c98be5b65fa5522671ef437b792d909cf3908ba54fe9eca5d2a766/setuptools-44.1.0-py2.py3-none-any.whl", -- "992728077ca19db6598072414fb83e0a284aca1253aaf2e24bb1e55ee6db1a30", -+ "https://files.pythonhosted.org/packages/70/06/849cc805ac6332210083f2114a95b22ee252ce81ed4e1be4f1d2b87c9108/setuptools-54.0.0-py3-none-any.whl", -+ "d85b57c41e88b69ab87065c964134ec85b7573cbab0fdaa7ef32330ed764600a", - ), - ( - "pypi__wheel", diff --git a/ci/flaky_test/requirements.in b/ci/flaky_test/requirements.in new file mode 100644 index 0000000000000..e5704ffe9fbe6 --- /dev/null +++ b/ci/flaky_test/requirements.in @@ -0,0 +1 @@ +slackclient diff --git a/configs/BUILD b/configs/BUILD index 80583f0aa32fb..71e3fa5f9774a 100644 --- a/configs/BUILD +++ b/configs/BUILD @@ -20,8 +20,7 @@ py_binary( "*.yaml", ]), deps = [ - requirement("Jinja2"), - requirement("MarkupSafe"), + requirement("jinja2"), ], ) diff --git a/contrib/kafka/filters/network/source/BUILD b/contrib/kafka/filters/network/source/BUILD index ec196b5e9abe2..b90a1598249e5 100644 --- a/contrib/kafka/filters/network/source/BUILD +++ b/contrib/kafka/filters/network/source/BUILD @@ -5,7 +5,7 @@ load( "envoy_contrib_package", ) load("@rules_python//python:defs.bzl", "py_binary", "py_library") -load("@kafka_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 diff --git a/contrib/kafka/filters/network/source/requirements.txt b/contrib/kafka/filters/network/source/requirements.txt deleted file mode 100644 index 1cd69909b9962..0000000000000 --- a/contrib/kafka/filters/network/source/requirements.txt +++ /dev/null @@ -1,38 +0,0 @@ -Jinja2==3.0.1 \ - --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ - --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 -MarkupSafe==2.0.1 \ - --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ - --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ - --hash=sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b \ - --hash=sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94 \ - --hash=sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872 \ - --hash=sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f \ - --hash=sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d \ - --hash=sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9 \ - --hash=sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567 \ - --hash=sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18 \ - --hash=sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f \ - --hash=sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f \ - --hash=sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2 \ - --hash=sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d \ - --hash=sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415 \ - --hash=sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914 \ - --hash=sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066 \ - --hash=sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35 \ - --hash=sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b \ - --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \ - --hash=sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75 \ - --hash=sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb \ - --hash=sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64 \ - --hash=sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833 \ - --hash=sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26 \ - --hash=sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7 \ - --hash=sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8 \ - --hash=sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5 \ - --hash=sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135 \ - --hash=sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902 \ - --hash=sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509 \ - --hash=sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74 \ - --hash=sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8 \ - --hash=sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a diff --git a/contrib/kafka/filters/network/test/BUILD b/contrib/kafka/filters/network/test/BUILD index 4c620dcfe43b9..93b2d2e35a29b 100644 --- a/contrib/kafka/filters/network/test/BUILD +++ b/contrib/kafka/filters/network/test/BUILD @@ -5,7 +5,7 @@ load( "envoy_contrib_package", ) load("@rules_python//python:defs.bzl", "py_binary") -load("@kafka_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 diff --git a/contrib/kafka/filters/network/test/broker/integration_test/BUILD b/contrib/kafka/filters/network/test/broker/integration_test/BUILD index 080c2a21a3805..cb9410485692c 100644 --- a/contrib/kafka/filters/network/test/broker/integration_test/BUILD +++ b/contrib/kafka/filters/network/test/broker/integration_test/BUILD @@ -3,7 +3,7 @@ load( "envoy_contrib_package", "envoy_py_test", ) -load("@kafka_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 diff --git a/test/extensions/filters/network/thrift_proxy/requirements.in b/test/extensions/filters/network/thrift_proxy/requirements.in new file mode 100644 index 0000000000000..0e405bcd08bc1 --- /dev/null +++ b/test/extensions/filters/network/thrift_proxy/requirements.in @@ -0,0 +1,2 @@ +six +thrift diff --git a/tools/base/requirements.in b/tools/base/requirements.in new file mode 100644 index 0000000000000..8ec1dbb9be567 --- /dev/null +++ b/tools/base/requirements.in @@ -0,0 +1,30 @@ +aio.functional +colorama +coloredlogs +coverage +envoy.base.utils +envoy.distribution.release +envoy.distribution.verify +envoy.gpg.sign +flake8 +frozendict +gitpython +jinja2 +pep8-naming +pygithub +pyreadline +pytest +pytest-asyncio +pytest-cov +pytest-patches +pyyaml +setuptools +sphinx +sphinxcontrib-httpdomain +sphinxcontrib-serializinghtml +sphinx-copybutton +sphinxext-rediraffe +sphinx-rtd-theme +sphinx-tabs +verboselogs +yapf diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index 658174e0bc9ff..8d18996322e79 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -2,32 +2,566 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile --allow-unsafe --generate-hashes tools/base/requirements.txt +# pip-compile --allow-unsafe --generate-hashes tools/base/requirements.in # abstracts==0.0.12 \ --hash=sha256:acc01ff56c8a05fb88150dff62e295f9071fc33388c42f1dfc2787a8d1c755ff - # via aio.functional + # via + # aio.functional + # envoy.abstract.command + # envoy.base.utils + # envoy.github.abstract + # envoy.github.release aio.functional==0.0.9 \ --hash=sha256:824a997a394ad891bc9f403426babc13c9d0d1f4d1708c38e77d6aecae1cab1d - # via -r tools/base/requirements.txt + # via + # -r tools/base/requirements.in + # aio.tasks + # envoy.github.abstract + # envoy.github.release +aio.stream==0.0.2 \ + --hash=sha256:6f5baaff48f6319db134cd56c06ccf89db1f7c5f67a26382e081efc96f2f675d + # via envoy.github.release +aio.tasks==0.0.4 \ + --hash=sha256:9abd4b0881edb292c4f91a2f63b1dea7a9829a4bd4e8440225a1a412a90461fc + # via + # envoy.github.abstract + # envoy.github.release +aiodocker==0.21.0 \ + --hash=sha256:1f2e6db6377195962bb676d4822f6e3a0c525e1b5d60b8ebbab68230bff3d227 \ + --hash=sha256:6fe00135bb7dc40a407669d3157ecdfd856f3737d939df54f40a479d40cf7bdc + # via + # envoy.distribution.distrotest + # envoy.docker.utils +aiofiles==0.7.0 \ + --hash=sha256:a1c4fc9b2ff81568c83e21392a82f344ea9d23da906e4f6a52662764545e19d4 \ + --hash=sha256:c67a6823b5f23fcab0a2595a289cec7d8c863ffcb4322fb8cd6b90400aedfdbc + # via aio.stream +aiohttp==3.7.4.post0 \ + --hash=sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe \ + --hash=sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe \ + --hash=sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5 \ + --hash=sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8 \ + --hash=sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd \ + --hash=sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb \ + --hash=sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c \ + --hash=sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87 \ + --hash=sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0 \ + --hash=sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290 \ + --hash=sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5 \ + --hash=sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287 \ + --hash=sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde \ + --hash=sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf \ + --hash=sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8 \ + --hash=sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16 \ + --hash=sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf \ + --hash=sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809 \ + --hash=sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213 \ + --hash=sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f \ + --hash=sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013 \ + --hash=sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b \ + --hash=sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9 \ + --hash=sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5 \ + --hash=sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb \ + --hash=sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df \ + --hash=sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4 \ + --hash=sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439 \ + --hash=sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f \ + --hash=sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22 \ + --hash=sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f \ + --hash=sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5 \ + --hash=sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970 \ + --hash=sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009 \ + --hash=sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc \ + --hash=sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a \ + --hash=sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95 + # via + # aio.stream + # aiodocker + # envoy.github.abstract + # envoy.github.release +alabaster==0.7.12 \ + --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \ + --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 + # via sphinx +async-timeout==3.0.1 \ + --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ + --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 + # via aiohttp +attrs==21.2.0 \ + --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ + --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb + # via + # aiohttp + # pytest +babel==2.9.1 \ + --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \ + --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0 + # via sphinx +certifi==2021.5.30 \ + --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ + --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 + # via requests +cffi==1.14.6 \ + --hash=sha256:06c54a68935738d206570b20da5ef2b6b6d92b38ef3ec45c5422c0ebaf338d4d \ + --hash=sha256:0c0591bee64e438883b0c92a7bed78f6290d40bf02e54c5bf0978eaf36061771 \ + --hash=sha256:19ca0dbdeda3b2615421d54bef8985f72af6e0c47082a8d26122adac81a95872 \ + --hash=sha256:22b9c3c320171c108e903d61a3723b51e37aaa8c81255b5e7ce102775bd01e2c \ + --hash=sha256:26bb2549b72708c833f5abe62b756176022a7b9a7f689b571e74c8478ead51dc \ + --hash=sha256:33791e8a2dc2953f28b8d8d300dde42dd929ac28f974c4b4c6272cb2955cb762 \ + --hash=sha256:3c8d896becff2fa653dc4438b54a5a25a971d1f4110b32bd3068db3722c80202 \ + --hash=sha256:4373612d59c404baeb7cbd788a18b2b2a8331abcc84c3ba40051fcd18b17a4d5 \ + --hash=sha256:487d63e1454627c8e47dd230025780e91869cfba4c753a74fda196a1f6ad6548 \ + --hash=sha256:48916e459c54c4a70e52745639f1db524542140433599e13911b2f329834276a \ + --hash=sha256:4922cd707b25e623b902c86188aca466d3620892db76c0bdd7b99a3d5e61d35f \ + --hash=sha256:55af55e32ae468e9946f741a5d51f9896da6b9bf0bbdd326843fec05c730eb20 \ + --hash=sha256:57e555a9feb4a8460415f1aac331a2dc833b1115284f7ded7278b54afc5bd218 \ + --hash=sha256:5d4b68e216fc65e9fe4f524c177b54964af043dde734807586cf5435af84045c \ + --hash=sha256:64fda793737bc4037521d4899be780534b9aea552eb673b9833b01f945904c2e \ + --hash=sha256:6d6169cb3c6c2ad50db5b868db6491a790300ade1ed5d1da29289d73bbe40b56 \ + --hash=sha256:7bcac9a2b4fdbed2c16fa5681356d7121ecabf041f18d97ed5b8e0dd38a80224 \ + --hash=sha256:80b06212075346b5546b0417b9f2bf467fea3bfe7352f781ffc05a8ab24ba14a \ + --hash=sha256:818014c754cd3dba7229c0f5884396264d51ffb87ec86e927ef0be140bfdb0d2 \ + --hash=sha256:8eb687582ed7cd8c4bdbff3df6c0da443eb89c3c72e6e5dcdd9c81729712791a \ + --hash=sha256:99f27fefe34c37ba9875f224a8f36e31d744d8083e00f520f133cab79ad5e819 \ + --hash=sha256:9f3e33c28cd39d1b655ed1ba7247133b6f7fc16fa16887b120c0c670e35ce346 \ + --hash=sha256:a8661b2ce9694ca01c529bfa204dbb144b275a31685a075ce123f12331be790b \ + --hash=sha256:a9da7010cec5a12193d1af9872a00888f396aba3dc79186604a09ea3ee7c029e \ + --hash=sha256:aedb15f0a5a5949ecb129a82b72b19df97bbbca024081ed2ef88bd5c0a610534 \ + --hash=sha256:b315d709717a99f4b27b59b021e6207c64620790ca3e0bde636a6c7f14618abb \ + --hash=sha256:ba6f2b3f452e150945d58f4badd92310449876c4c954836cfb1803bdd7b422f0 \ + --hash=sha256:c33d18eb6e6bc36f09d793c0dc58b0211fccc6ae5149b808da4a62660678b156 \ + --hash=sha256:c9a875ce9d7fe32887784274dd533c57909b7b1dcadcc128a2ac21331a9765dd \ + --hash=sha256:c9e005e9bd57bc987764c32a1bee4364c44fdc11a3cc20a40b93b444984f2b87 \ + --hash=sha256:d2ad4d668a5c0645d281dcd17aff2be3212bc109b33814bbb15c4939f44181cc \ + --hash=sha256:d950695ae4381ecd856bcaf2b1e866720e4ab9a1498cba61c602e56630ca7195 \ + --hash=sha256:e22dcb48709fc51a7b58a927391b23ab37eb3737a98ac4338e2448bef8559b33 \ + --hash=sha256:e8c6a99be100371dbb046880e7a282152aa5d6127ae01783e37662ef73850d8f \ + --hash=sha256:e9dc245e3ac69c92ee4c167fbdd7428ec1956d4e754223124991ef29eb57a09d \ + --hash=sha256:eb687a11f0a7a1839719edd80f41e459cc5366857ecbed383ff376c4e3cc6afd \ + --hash=sha256:eb9e2a346c5238a30a746893f23a9535e700f8192a68c07c0258e7ece6ff3728 \ + --hash=sha256:ed38b924ce794e505647f7c331b22a693bee1538fdf46b0222c4717b42f744e7 \ + --hash=sha256:f0010c6f9d1a4011e429109fda55a225921e3206e7f62a0c22a35344bfd13cca \ + --hash=sha256:f0c5d1acbfca6ebdd6b1e3eded8d261affb6ddcf2186205518f1428b8569bb99 \ + --hash=sha256:f10afb1004f102c7868ebfe91c28f4a712227fe4cb24974350ace1f90e1febbf \ + --hash=sha256:f174135f5609428cc6e1b9090f9268f5c8935fddb1b25ccb8255a2d50de6789e \ + --hash=sha256:f3ebe6e73c319340830a9b2825d32eb6d8475c1dac020b4f0aa774ee3b898d1c \ + --hash=sha256:f627688813d0a4140153ff532537fbe4afea5a3dffce1f9deb7f91f848a832b5 \ + --hash=sha256:fd4305f86f53dfd8cd3522269ed7fc34856a8ee3709a5e28b2836b2db9d4cd69 + # via + # cryptography + # pynacl +chardet==4.0.0 \ + --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ + --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 + # via aiohttp +charset-normalizer==2.0.4 \ + --hash=sha256:0c8911edd15d19223366a194a513099a302055a962bca2cec0f54b8b63175d8b \ + --hash=sha256:f23667ebe1084be45f6ae0538e4a5a865206544097e4e8bbcacf42cd02a348f3 + # via requests colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 - # via -r tools/base/requirements.txt + # via -r tools/base/requirements.in coloredlogs==15.0.1 \ --hash=sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934 \ --hash=sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0 - # via -r tools/base/requirements.txt + # via + # -r tools/base/requirements.in + # envoy.base.runner +coverage==5.5 \ + --hash=sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c \ + --hash=sha256:01d84219b5cdbfc8122223b39a954820929497a1cb1422824bb86b07b74594b6 \ + --hash=sha256:040af6c32813fa3eae5305d53f18875bedd079960822ef8ec067a66dd8afcd45 \ + --hash=sha256:06191eb60f8d8a5bc046f3799f8a07a2d7aefb9504b0209aff0b47298333302a \ + --hash=sha256:13034c4409db851670bc9acd836243aeee299949bd5673e11844befcb0149f03 \ + --hash=sha256:13c4ee887eca0f4c5a247b75398d4114c37882658300e153113dafb1d76de529 \ + --hash=sha256:184a47bbe0aa6400ed2d41d8e9ed868b8205046518c52464fde713ea06e3a74a \ + --hash=sha256:18ba8bbede96a2c3dde7b868de9dcbd55670690af0988713f0603f037848418a \ + --hash=sha256:1aa846f56c3d49205c952d8318e76ccc2ae23303351d9270ab220004c580cfe2 \ + --hash=sha256:217658ec7187497e3f3ebd901afdca1af062b42cfe3e0dafea4cced3983739f6 \ + --hash=sha256:24d4a7de75446be83244eabbff746d66b9240ae020ced65d060815fac3423759 \ + --hash=sha256:2910f4d36a6a9b4214bb7038d537f015346f413a975d57ca6b43bf23d6563b53 \ + --hash=sha256:2949cad1c5208b8298d5686d5a85b66aae46d73eec2c3e08c817dd3513e5848a \ + --hash=sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4 \ + --hash=sha256:2cafbbb3af0733db200c9b5f798d18953b1a304d3f86a938367de1567f4b5bff \ + --hash=sha256:2e0d881ad471768bf6e6c2bf905d183543f10098e3b3640fc029509530091502 \ + --hash=sha256:30c77c1dc9f253283e34c27935fded5015f7d1abe83bc7821680ac444eaf7793 \ + --hash=sha256:3487286bc29a5aa4b93a072e9592f22254291ce96a9fbc5251f566b6b7343cdb \ + --hash=sha256:372da284cfd642d8e08ef606917846fa2ee350f64994bebfbd3afb0040436905 \ + --hash=sha256:41179b8a845742d1eb60449bdb2992196e211341818565abded11cfa90efb821 \ + --hash=sha256:44d654437b8ddd9eee7d1eaee28b7219bec228520ff809af170488fd2fed3e2b \ + --hash=sha256:4a7697d8cb0f27399b0e393c0b90f0f1e40c82023ea4d45d22bce7032a5d7b81 \ + --hash=sha256:51cb9476a3987c8967ebab3f0fe144819781fca264f57f89760037a2ea191cb0 \ + --hash=sha256:52596d3d0e8bdf3af43db3e9ba8dcdaac724ba7b5ca3f6358529d56f7a166f8b \ + --hash=sha256:53194af30d5bad77fcba80e23a1441c71abfb3e01192034f8246e0d8f99528f3 \ + --hash=sha256:5fec2d43a2cc6965edc0bb9e83e1e4b557f76f843a77a2496cbe719583ce8184 \ + --hash=sha256:6c90e11318f0d3c436a42409f2749ee1a115cd8b067d7f14c148f1ce5574d701 \ + --hash=sha256:74d881fc777ebb11c63736622b60cb9e4aee5cace591ce274fb69e582a12a61a \ + --hash=sha256:7501140f755b725495941b43347ba8a2777407fc7f250d4f5a7d2a1050ba8e82 \ + --hash=sha256:796c9c3c79747146ebd278dbe1e5c5c05dd6b10cc3bcb8389dfdf844f3ead638 \ + --hash=sha256:869a64f53488f40fa5b5b9dcb9e9b2962a66a87dab37790f3fcfb5144b996ef5 \ + --hash=sha256:8963a499849a1fc54b35b1c9f162f4108017b2e6db2c46c1bed93a72262ed083 \ + --hash=sha256:8d0a0725ad7c1a0bcd8d1b437e191107d457e2ec1084b9f190630a4fb1af78e6 \ + --hash=sha256:900fbf7759501bc7807fd6638c947d7a831fc9fdf742dc10f02956ff7220fa90 \ + --hash=sha256:92b017ce34b68a7d67bd6d117e6d443a9bf63a2ecf8567bb3d8c6c7bc5014465 \ + --hash=sha256:970284a88b99673ccb2e4e334cfb38a10aab7cd44f7457564d11898a74b62d0a \ + --hash=sha256:972c85d205b51e30e59525694670de6a8a89691186012535f9d7dbaa230e42c3 \ + --hash=sha256:9a1ef3b66e38ef8618ce5fdc7bea3d9f45f3624e2a66295eea5e57966c85909e \ + --hash=sha256:af0e781009aaf59e25c5a678122391cb0f345ac0ec272c7961dc5455e1c40066 \ + --hash=sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf \ + --hash=sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b \ + --hash=sha256:c0891a6a97b09c1f3e073a890514d5012eb256845c451bd48f7968ef939bf4ae \ + --hash=sha256:c2723d347ab06e7ddad1a58b2a821218239249a9e4365eaff6649d31180c1669 \ + --hash=sha256:d1f8bf7b90ba55699b3a5e44930e93ff0189aa27186e96071fac7dd0d06a1873 \ + --hash=sha256:d1f9ce122f83b2305592c11d64f181b87153fc2c2bbd3bb4a3dde8303cfb1a6b \ + --hash=sha256:d314ed732c25d29775e84a960c3c60808b682c08d86602ec2c3008e1202e3bb6 \ + --hash=sha256:d636598c8305e1f90b439dbf4f66437de4a5e3c31fdf47ad29542478c8508bbb \ + --hash=sha256:deee1077aae10d8fa88cb02c845cfba9b62c55e1183f52f6ae6a2df6a2187160 \ + --hash=sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c \ + --hash=sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079 \ + --hash=sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d \ + --hash=sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6 + # via + # -r tools/base/requirements.in + # pytest-cov +cryptography==3.4.8 \ + --hash=sha256:0a7dcbcd3f1913f664aca35d47c1331fce738d44ec34b7be8b9d332151b0b01e \ + --hash=sha256:1eb7bb0df6f6f583dd8e054689def236255161ebbcf62b226454ab9ec663746b \ + --hash=sha256:21ca464b3a4b8d8e86ba0ee5045e103a1fcfac3b39319727bc0fc58c09c6aff7 \ + --hash=sha256:34dae04a0dce5730d8eb7894eab617d8a70d0c97da76b905de9efb7128ad7085 \ + --hash=sha256:3520667fda779eb788ea00080124875be18f2d8f0848ec00733c0ec3bb8219fc \ + --hash=sha256:3fa3a7ccf96e826affdf1a0a9432be74dc73423125c8f96a909e3835a5ef194a \ + --hash=sha256:5b0fbfae7ff7febdb74b574055c7466da334a5371f253732d7e2e7525d570498 \ + --hash=sha256:8695456444f277af73a4877db9fc979849cd3ee74c198d04fc0776ebc3db52b9 \ + --hash=sha256:94cc5ed4ceaefcbe5bf38c8fba6a21fc1d365bb8fb826ea1688e3370b2e24a1c \ + --hash=sha256:94fff993ee9bc1b2440d3b7243d488c6a3d9724cc2b09cdb297f6a886d040ef7 \ + --hash=sha256:9965c46c674ba8cc572bc09a03f4c649292ee73e1b683adb1ce81e82e9a6a0fb \ + --hash=sha256:a00cf305f07b26c351d8d4e1af84ad7501eca8a342dedf24a7acb0e7b7406e14 \ + --hash=sha256:a305600e7a6b7b855cd798e00278161b681ad6e9b7eca94c721d5f588ab212af \ + --hash=sha256:cd65b60cfe004790c795cc35f272e41a3df4631e2fb6b35aa7ac6ef2859d554e \ + --hash=sha256:d2a6e5ef66503da51d2110edf6c403dc6b494cc0082f85db12f54e9c5d4c3ec5 \ + --hash=sha256:d9ec0e67a14f9d1d48dd87a2531009a9b251c02ea42851c060b25c782516ff06 \ + --hash=sha256:f44d141b8c4ea5eb4dbc9b3ad992d45580c1d22bf5e24363f2fbf50c2d7ae8a7 + # via pyjwt +deprecated==1.2.13 \ + --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d \ + --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d + # via pygithub +docutils==0.16 \ + --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ + --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc + # via + # sphinx + # sphinx-rtd-theme + # sphinx-tabs +envoy.abstract.command==0.0.3 \ + --hash=sha256:4b7b15c91bea1f2eb7c2e8e35f95cd9437e1c8f151adc093bf7858fc85d48221 + # via + # envoy.base.runner + # envoy.distribution.release +envoy.base.checker==0.0.2 \ + --hash=sha256:2ac81efa20fd01fff644ff7dc7fadeac1c3e4dbb6210881ac7a7919ec0e048d8 + # via + # envoy.distribution.distrotest + # envoy.distribution.verify +envoy.base.runner==0.0.4 \ + --hash=sha256:4eeb2b661f1f0c402df4425852be554a8a83ef5d338bfae69ddcb9b90755379e + # via + # envoy.base.checker + # envoy.distribution.release + # envoy.github.abstract + # envoy.gpg.sign +envoy.base.utils==0.0.8 \ + --hash=sha256:b82e18ab0535207b7136d6980239c9350f7113fa5da7dda781bcb6ad1e05b3ab + # via + # -r tools/base/requirements.in + # envoy.distribution.distrotest + # envoy.github.release + # envoy.gpg.sign +envoy.distribution.distrotest==0.0.3 \ + --hash=sha256:c094adbd959eb1336f93afc00aedb7ee4e68e8252e2365be816a6f9ede8a3de7 + # via envoy.distribution.verify +envoy.distribution.release==0.0.4 \ + --hash=sha256:41037e0488f0593ce5173739fe0cd1b45a4775f5a47738b85d9d04024ca241a2 + # via -r tools/base/requirements.in +envoy.distribution.verify==0.0.2 \ + --hash=sha256:ae59134085de50203edf51c243dbf3301cbe5550db29f0ec6f9ea1c3b82fee1c + # via -r tools/base/requirements.in +envoy.docker.utils==0.0.2 \ + --hash=sha256:a12cb57f0b6e204d646cbf94f927b3a8f5a27ed15f60d0576176584ec16a4b76 + # via envoy.distribution.distrotest +envoy.github.abstract==0.0.16 \ + --hash=sha256:badf04104492fb6b37ba2163f2b225132ed04aba680beb218e7c7d918564f8ee + # via + # envoy.distribution.release + # envoy.github.release +envoy.github.release==0.0.8 \ + --hash=sha256:fbc4354030137eb565b8c4d679965e4ef60b01de0c09310441836e592ca0cd19 + # via envoy.distribution.release +envoy.gpg.identity==0.0.2 \ + --hash=sha256:7d32ff9133e00b9974b4dabd2512b4872b091b8c5069d0112240dcc1a56bc406 + # via envoy.gpg.sign +envoy.gpg.sign==0.0.3 \ + --hash=sha256:31667931f5d7ff05fd809b89748f277511486311c777652af4cb8889bd641049 + # via -r tools/base/requirements.in +flake8-polyfill==1.0.2 \ + --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ + --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda + # via pep8-naming +flake8==3.9.2 \ + --hash=sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b \ + --hash=sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907 + # via + # -r tools/base/requirements.in + # flake8-polyfill + # pep8-naming frozendict==2.0.6 \ --hash=sha256:3f00de72805cf4c9e81b334f3f04809278b967d2fed84552313a0fcce511beb1 \ --hash=sha256:5d3f75832c35d4df041f0e19c268964cbef29c1eb34cd3517cf883f1c2d089b9 - # via -r tools/base/requirements.txt + # via + # -r tools/base/requirements.in + # envoy.base.runner +gidgethub==5.0.1 \ + --hash=sha256:3efbd6998600254ec7a2869318bd3ffde38edc3a0d37be0c14bc46b45947b682 \ + --hash=sha256:67245e93eb0918b37df038148af675df43b62e832c529d7f859f6b90d9f3e70d + # via + # envoy.github.abstract + # envoy.github.release +gitdb==4.0.7 \ + --hash=sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0 \ + --hash=sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005 + # via gitpython +gitpython==3.1.18 \ + --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b \ + --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 + # via -r tools/base/requirements.in humanfriendly==9.2 \ --hash=sha256:332da98c24cc150efcc91b5508b19115209272bfdf4b0764a56795932f854271 \ --hash=sha256:f7dba53ac7935fd0b4a2fc9a29e316ddd9ea135fb3052d3d0279d10c18ff9c48 + # via coloredlogs +idna==3.2 \ + --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ + --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 + # via + # requests + # yarl +imagesize==1.2.0 \ + --hash=sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1 \ + --hash=sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1 + # via sphinx +iniconfig==1.1.1 \ + --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ + --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 + # via pytest +jinja2==3.0.1 \ + --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ + --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 + # via + # -r tools/base/requirements.in + # sphinx +markupsafe==2.0.1 \ + --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \ + --hash=sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64 \ + --hash=sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b \ + --hash=sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567 \ + --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ + --hash=sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724 \ + --hash=sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74 \ + --hash=sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646 \ + --hash=sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35 \ + --hash=sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6 \ + --hash=sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6 \ + --hash=sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad \ + --hash=sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26 \ + --hash=sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38 \ + --hash=sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac \ + --hash=sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7 \ + --hash=sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6 \ + --hash=sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75 \ + --hash=sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f \ + --hash=sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135 \ + --hash=sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8 \ + --hash=sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a \ + --hash=sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a \ + --hash=sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9 \ + --hash=sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864 \ + --hash=sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914 \ + --hash=sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18 \ + --hash=sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8 \ + --hash=sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2 \ + --hash=sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d \ + --hash=sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b \ + --hash=sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b \ + --hash=sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f \ + --hash=sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb \ + --hash=sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833 \ + --hash=sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28 \ + --hash=sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415 \ + --hash=sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902 \ + --hash=sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d \ + --hash=sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9 \ + --hash=sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d \ + --hash=sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145 \ + --hash=sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066 \ + --hash=sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c \ + --hash=sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1 \ + --hash=sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f \ + --hash=sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53 \ + --hash=sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134 \ + --hash=sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85 \ + --hash=sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5 \ + --hash=sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94 \ + --hash=sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509 \ + --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ + --hash=sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872 + # via jinja2 +mccabe==0.6.1 \ + --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ + --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f + # via flake8 +multidict==5.1.0 \ + --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ + --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \ + --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \ + --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \ + --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \ + --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \ + --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \ + --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \ + --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \ + --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \ + --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \ + --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \ + --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \ + --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \ + --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \ + --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \ + --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \ + --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \ + --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \ + --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \ + --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \ + --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \ + --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \ + --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \ + --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \ + --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \ + --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \ + --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \ + --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \ + --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \ + --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \ + --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \ + --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \ + --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ + --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ + --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ + --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 + # via + # aiohttp + # yarl +packaging==21.0 \ + --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ + --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 # via - # -r tools/base/requirements.txt - # coloredlogs + # envoy.github.release + # pytest + # sphinx +pep8-naming==0.12.1 \ + --hash=sha256:4a8daeaeb33cfcde779309fc0c9c0a68a3bbe2ad8a8308b763c5068f86eb9f37 \ + --hash=sha256:bb2455947757d162aa4cad55dba4ce029005cd1692f2899a21d51d8630ca7841 + # via -r tools/base/requirements.in +pluggy==1.0.0 \ + --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ + --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 + # via pytest +py==1.10.0 \ + --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \ + --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a + # via pytest +pycodestyle==2.7.0 \ + --hash=sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068 \ + --hash=sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef + # via flake8 +pycparser==2.20 \ + --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ + --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 + # via cffi +pyflakes==2.3.1 \ + --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \ + --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db + # via flake8 +pygithub==1.55 \ + --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ + --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b + # via -r tools/base/requirements.in +pygments==2.10.0 \ + --hash=sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380 \ + --hash=sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6 + # via + # sphinx + # sphinx-tabs +pyjwt[crypto]==2.1.0 \ + --hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \ + --hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130 + # via + # gidgethub + # pygithub +pynacl==1.4.0 \ + --hash=sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4 \ + --hash=sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4 \ + --hash=sha256:2fe0fc5a2480361dcaf4e6e7cea00e078fcda07ba45f811b167e3f99e8cff574 \ + --hash=sha256:30f9b96db44e09b3304f9ea95079b1b7316b2b4f3744fe3aaecccd95d547063d \ + --hash=sha256:4e10569f8cbed81cb7526ae137049759d2a8d57726d52c1a000a3ce366779634 \ + --hash=sha256:511d269ee845037b95c9781aa702f90ccc36036f95d0f31373a6a79bd8242e25 \ + --hash=sha256:537a7ccbea22905a0ab36ea58577b39d1fa9b1884869d173b5cf111f006f689f \ + --hash=sha256:54e9a2c849c742006516ad56a88f5c74bf2ce92c9f67435187c3c5953b346505 \ + --hash=sha256:757250ddb3bff1eecd7e41e65f7f833a8405fede0194319f87899690624f2122 \ + --hash=sha256:7757ae33dae81c300487591c68790dfb5145c7d03324000433d9a2c141f82af7 \ + --hash=sha256:7c6092102219f59ff29788860ccb021e80fffd953920c4a8653889c029b2d420 \ + --hash=sha256:8122ba5f2a2169ca5da936b2e5a511740ffb73979381b4229d9188f6dcb22f1f \ + --hash=sha256:9c4a7ea4fb81536c1b1f5cc44d54a296f96ae78c1ebd2311bd0b60be45a48d96 \ + --hash=sha256:c914f78da4953b33d4685e3cdc7ce63401247a21425c16a39760e282075ac4a6 \ + --hash=sha256:cd401ccbc2a249a47a3a1724c2918fcd04be1f7b54eb2a5a71ff915db0ac51c6 \ + --hash=sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514 \ + --hash=sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff \ + --hash=sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80 + # via pygithub +pyparsing==2.4.7 \ + --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ + --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b + # via packaging +pyreadline==2.1 \ + --hash=sha256:4530592fc2e85b25b1a9f79664433da09237c1a270e4d78ea5aa3a2c7229e2d1 + # via -r tools/base/requirements.in +pytest-asyncio==0.15.1 \ + --hash=sha256:2564ceb9612bbd560d19ca4b41347b54e7835c2f792c504f698e05395ed63f6f \ + --hash=sha256:3042bcdf1c5d978f6b74d96a151c4cfb9dcece65006198389ccd7e6c60eb1eea + # via -r tools/base/requirements.in +pytest-cov==2.12.1 \ + --hash=sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a \ + --hash=sha256:261ceeb8c227b726249b376b8526b600f38667ee314f910353fa318caa01f4d7 + # via -r tools/base/requirements.in +pytest-patches==0.0.3 \ + --hash=sha256:6f8cdc8641c708c4812f58ae48d410f373a6fd16cd6cc4dc4d3fb8951df9c92a + # via -r tools/base/requirements.in +pytest==6.2.5 \ + --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 \ + --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 + # via + # -r tools/base/requirements.in + # pytest-asyncio + # pytest-cov + # pytest-patches +python-gnupg==0.4.7 \ + --hash=sha256:2061f56b1942c29b92727bf9aecbd3cea3893acc9cccbdc7eb4604285efe4ac7 \ + --hash=sha256:3ff5b1bf5e397de6e1fe41a7c0f403dad4e242ac92b345f440eaecfb72a7ebae + # via envoy.gpg.identity +pytz==2021.1 \ + --hash=sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da \ + --hash=sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798 + # via babel pyyaml==5.4.1 \ --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ @@ -58,14 +592,169 @@ pyyaml==5.4.1 \ --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 - # via -r tools/base/requirements.txt + # via + # -r tools/base/requirements.in + # envoy.base.utils +requests==2.26.0 \ + --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ + --hash=sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7 + # via + # pygithub + # sphinx +six==1.16.0 \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via + # pynacl + # sphinxcontrib-httpdomain +smmap==4.0.0 \ + --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ + --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 + # via gitdb +snowballstemmer==2.1.0 \ + --hash=sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2 \ + --hash=sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914 + # via sphinx +sphinx-copybutton==0.4.0 \ + --hash=sha256:4340d33c169dac6dd82dce2c83333412aa786a42dd01a81a8decac3b130dc8b0 \ + --hash=sha256:8daed13a87afd5013c3a9af3575cc4d5bec052075ccd3db243f895c07a689386 + # via -r tools/base/requirements.in +sphinx-rtd-theme==0.5.2 \ + --hash=sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a \ + --hash=sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f + # via -r tools/base/requirements.in +sphinx-tabs==3.2.0 \ + --hash=sha256:1e1b1846c80137bd81a78e4a69b02664b98b1e1da361beb30600b939dfc75065 \ + --hash=sha256:33137914ed9b276e6a686d7a337310ee77b1dae316fdcbce60476913a152e0a4 + # via -r tools/base/requirements.in +sphinx==4.1.2 \ + --hash=sha256:3092d929cd807926d846018f2ace47ba2f3b671b309c7a89cd3306e80c826b13 \ + --hash=sha256:46d52c6cee13fec44744b8c01ed692c18a640f6910a725cbb938bc36e8d64544 + # via + # -r tools/base/requirements.in + # sphinx-copybutton + # sphinx-rtd-theme + # sphinx-tabs + # sphinxcontrib-httpdomain + # sphinxext-rediraffe +sphinxcontrib-applehelp==1.0.2 \ + --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ + --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 + # via sphinx +sphinxcontrib-devhelp==1.0.2 \ + --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \ + --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4 + # via sphinx +sphinxcontrib-htmlhelp==2.0.0 \ + --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \ + --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2 + # via sphinx +sphinxcontrib-httpdomain==1.7.0 \ + --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ + --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 + # via -r tools/base/requirements.in +sphinxcontrib-jsmath==1.0.1 \ + --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ + --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 + # via sphinx +sphinxcontrib-qthelp==1.0.3 \ + --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \ + --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 + # via sphinx +sphinxcontrib-serializinghtml==1.1.5 \ + --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \ + --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 + # via + # -r tools/base/requirements.in + # sphinx +sphinxext-rediraffe==0.2.7 \ + --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ + --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c + # via -r tools/base/requirements.in +toml==0.10.2 \ + --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ + --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f + # via + # pytest + # pytest-cov +trycast==0.3.0 \ + --hash=sha256:1b7b4c0d4b0d674770a53f34a762e52a6cd6879eb251ab21625602699920080d \ + --hash=sha256:687185b812e8d1c45f2ba841e8de7bdcdee0695dcf3464f206800505d4c65f26 + # via envoy.base.utils +typing-extensions==3.10.0.2 \ + --hash=sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e \ + --hash=sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7 \ + --hash=sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34 + # via + # aiodocker + # aiohttp +uritemplate==3.0.1 \ + --hash=sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f \ + --hash=sha256:5af8ad10cec94f215e3f48112de2022e1d5a37ed427fbd88652fa908f2ab7cae + # via gidgethub +urllib3==1.26.6 \ + --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ + --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f + # via requests verboselogs==1.7 \ --hash=sha256:d63f23bf568295b95d3530c6864a0b580cec70e7ff974177dead1e4ffbc6ff49 \ --hash=sha256:e33ddedcdfdafcb3a174701150430b11b46ceb64c2a9a26198c76a156568e427 - # via -r tools/base/requirements.txt + # via + # -r tools/base/requirements.in + # envoy.base.runner + # envoy.github.abstract + # envoy.github.release +wrapt==1.12.1 \ + --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 + # via deprecated +yapf==0.31.0 \ + --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \ + --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e + # via -r tools/base/requirements.in +yarl==1.6.3 \ + --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ + --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ + --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \ + --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \ + --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \ + --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \ + --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \ + --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \ + --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \ + --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \ + --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \ + --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \ + --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \ + --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \ + --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \ + --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \ + --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \ + --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \ + --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \ + --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \ + --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \ + --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \ + --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \ + --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \ + --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \ + --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ + --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \ + --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \ + --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \ + --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \ + --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \ + --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \ + --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \ + --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ + --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ + --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ + --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 + # via aiohttp # The following packages are considered to be unsafe in a requirements file: -setuptools==57.4.0 \ - --hash=sha256:6bac238ffdf24e8806c61440e755192470352850f3419a52f26ffe0a1a64f465 \ - --hash=sha256:a49230977aa6cfb9d933614d2f7b79036e9945c4cdd7583163f4e920b83418d6 - # via -r tools/base/requirements.txt +setuptools==58.0.3 \ + --hash=sha256:1ceadf3ea9a821ef305505db995f2e21550ea62500900164278c4b23109204f3 \ + --hash=sha256:5e4c36f55012a46c1b3e4b67a8236d1d73856a90fc7b3207d29bedb7d2bac417 + # via + # -r tools/base/requirements.in + # sphinx diff --git a/tools/code_format/BUILD b/tools/code_format/BUILD index bb9683c151330..ba9de5fce8557 100644 --- a/tools/code_format/BUILD +++ b/tools/code_format/BUILD @@ -1,4 +1,4 @@ -load("@pylint_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//tools/base:envoy_python.bzl", "envoy_py_binary") diff --git a/tools/config_validation/BUILD b/tools/config_validation/BUILD index 5ca3d0ef9a4af..ce2c1d3afbc55 100644 --- a/tools/config_validation/BUILD +++ b/tools/config_validation/BUILD @@ -1,5 +1,5 @@ load("@rules_python//python:defs.bzl", "py_binary") -load("@config_validation_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 diff --git a/tools/config_validation/requirements.txt b/tools/config_validation/requirements.txt deleted file mode 100644 index 34601fe949b16..0000000000000 --- a/tools/config_validation/requirements.txt +++ /dev/null @@ -1,30 +0,0 @@ -PyYAML==5.4.1 \ - --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ - --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ - --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ - --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ - --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ - --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ - --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ - --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ - --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ - --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ - --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ - --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ - --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ - --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ - --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ - --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ - --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ - --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ - --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ - --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ - --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ - --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ - --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ - --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ - --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ - --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ - --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ - --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ - --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 diff --git a/tools/distribution/BUILD b/tools/distribution/BUILD index 11142e4324f0e..6b60dda875708 100644 --- a/tools/distribution/BUILD +++ b/tools/distribution/BUILD @@ -1,6 +1,6 @@ load("//bazel:envoy_build_system.bzl", "envoy_package") load("//tools/base:envoy_python.bzl", "envoy_py_script") -load("@distribution_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 diff --git a/tools/distribution/requirements.txt b/tools/distribution/requirements.txt deleted file mode 100644 index dcd1a7600ba2f..0000000000000 --- a/tools/distribution/requirements.txt +++ /dev/null @@ -1,382 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --generate-hashes tools/distribution/requirements.txt -# -abstracts==0.0.12 \ - --hash=sha256:acc01ff56c8a05fb88150dff62e295f9071fc33388c42f1dfc2787a8d1c755ff - # via - # aio.functional - # envoy.abstract.command - # envoy.github.abstract - # envoy.github.release -aio.functional==0.0.9 \ - --hash=sha256:824a997a394ad891bc9f403426babc13c9d0d1f4d1708c38e77d6aecae1cab1d - # via - # aio.tasks - # envoy.github.abstract - # envoy.github.release -aio.stream==0.0.2 \ - --hash=sha256:6f5baaff48f6319db134cd56c06ccf89db1f7c5f67a26382e081efc96f2f675d - # via envoy.github.release -aio.tasks==0.0.4 \ - --hash=sha256:9abd4b0881edb292c4f91a2f63b1dea7a9829a4bd4e8440225a1a412a90461fc - # via - # envoy.github.abstract - # envoy.github.release -aiodocker==0.21.0 \ - --hash=sha256:1f2e6db6377195962bb676d4822f6e3a0c525e1b5d60b8ebbab68230bff3d227 \ - --hash=sha256:6fe00135bb7dc40a407669d3157ecdfd856f3737d939df54f40a479d40cf7bdc - # via - # envoy.distribution.distrotest - # envoy.docker.utils -aiofiles==0.7.0 \ - --hash=sha256:a1c4fc9b2ff81568c83e21392a82f344ea9d23da906e4f6a52662764545e19d4 \ - --hash=sha256:c67a6823b5f23fcab0a2595a289cec7d8c863ffcb4322fb8cd6b90400aedfdbc - # via aio.stream -aiohttp==3.7.4.post0 \ - --hash=sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe \ - --hash=sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe \ - --hash=sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5 \ - --hash=sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8 \ - --hash=sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd \ - --hash=sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb \ - --hash=sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c \ - --hash=sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87 \ - --hash=sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0 \ - --hash=sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290 \ - --hash=sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5 \ - --hash=sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287 \ - --hash=sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde \ - --hash=sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf \ - --hash=sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8 \ - --hash=sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16 \ - --hash=sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf \ - --hash=sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809 \ - --hash=sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213 \ - --hash=sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f \ - --hash=sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013 \ - --hash=sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b \ - --hash=sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9 \ - --hash=sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5 \ - --hash=sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb \ - --hash=sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df \ - --hash=sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4 \ - --hash=sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439 \ - --hash=sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f \ - --hash=sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22 \ - --hash=sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f \ - --hash=sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5 \ - --hash=sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970 \ - --hash=sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009 \ - --hash=sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc \ - --hash=sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a \ - --hash=sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95 - # via - # aio.stream - # aiodocker - # envoy.github.abstract - # envoy.github.release -async-timeout==3.0.1 \ - --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ - --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 - # via aiohttp -attrs==21.2.0 \ - --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ - --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb - # via aiohttp -cffi==1.14.6 \ - --hash=sha256:06c54a68935738d206570b20da5ef2b6b6d92b38ef3ec45c5422c0ebaf338d4d \ - --hash=sha256:0c0591bee64e438883b0c92a7bed78f6290d40bf02e54c5bf0978eaf36061771 \ - --hash=sha256:19ca0dbdeda3b2615421d54bef8985f72af6e0c47082a8d26122adac81a95872 \ - --hash=sha256:22b9c3c320171c108e903d61a3723b51e37aaa8c81255b5e7ce102775bd01e2c \ - --hash=sha256:26bb2549b72708c833f5abe62b756176022a7b9a7f689b571e74c8478ead51dc \ - --hash=sha256:33791e8a2dc2953f28b8d8d300dde42dd929ac28f974c4b4c6272cb2955cb762 \ - --hash=sha256:3c8d896becff2fa653dc4438b54a5a25a971d1f4110b32bd3068db3722c80202 \ - --hash=sha256:4373612d59c404baeb7cbd788a18b2b2a8331abcc84c3ba40051fcd18b17a4d5 \ - --hash=sha256:487d63e1454627c8e47dd230025780e91869cfba4c753a74fda196a1f6ad6548 \ - --hash=sha256:48916e459c54c4a70e52745639f1db524542140433599e13911b2f329834276a \ - --hash=sha256:4922cd707b25e623b902c86188aca466d3620892db76c0bdd7b99a3d5e61d35f \ - --hash=sha256:55af55e32ae468e9946f741a5d51f9896da6b9bf0bbdd326843fec05c730eb20 \ - --hash=sha256:57e555a9feb4a8460415f1aac331a2dc833b1115284f7ded7278b54afc5bd218 \ - --hash=sha256:5d4b68e216fc65e9fe4f524c177b54964af043dde734807586cf5435af84045c \ - --hash=sha256:64fda793737bc4037521d4899be780534b9aea552eb673b9833b01f945904c2e \ - --hash=sha256:6d6169cb3c6c2ad50db5b868db6491a790300ade1ed5d1da29289d73bbe40b56 \ - --hash=sha256:7bcac9a2b4fdbed2c16fa5681356d7121ecabf041f18d97ed5b8e0dd38a80224 \ - --hash=sha256:80b06212075346b5546b0417b9f2bf467fea3bfe7352f781ffc05a8ab24ba14a \ - --hash=sha256:818014c754cd3dba7229c0f5884396264d51ffb87ec86e927ef0be140bfdb0d2 \ - --hash=sha256:8eb687582ed7cd8c4bdbff3df6c0da443eb89c3c72e6e5dcdd9c81729712791a \ - --hash=sha256:99f27fefe34c37ba9875f224a8f36e31d744d8083e00f520f133cab79ad5e819 \ - --hash=sha256:9f3e33c28cd39d1b655ed1ba7247133b6f7fc16fa16887b120c0c670e35ce346 \ - --hash=sha256:a8661b2ce9694ca01c529bfa204dbb144b275a31685a075ce123f12331be790b \ - --hash=sha256:a9da7010cec5a12193d1af9872a00888f396aba3dc79186604a09ea3ee7c029e \ - --hash=sha256:aedb15f0a5a5949ecb129a82b72b19df97bbbca024081ed2ef88bd5c0a610534 \ - --hash=sha256:b315d709717a99f4b27b59b021e6207c64620790ca3e0bde636a6c7f14618abb \ - --hash=sha256:ba6f2b3f452e150945d58f4badd92310449876c4c954836cfb1803bdd7b422f0 \ - --hash=sha256:c33d18eb6e6bc36f09d793c0dc58b0211fccc6ae5149b808da4a62660678b156 \ - --hash=sha256:c9a875ce9d7fe32887784274dd533c57909b7b1dcadcc128a2ac21331a9765dd \ - --hash=sha256:c9e005e9bd57bc987764c32a1bee4364c44fdc11a3cc20a40b93b444984f2b87 \ - --hash=sha256:d2ad4d668a5c0645d281dcd17aff2be3212bc109b33814bbb15c4939f44181cc \ - --hash=sha256:d950695ae4381ecd856bcaf2b1e866720e4ab9a1498cba61c602e56630ca7195 \ - --hash=sha256:e22dcb48709fc51a7b58a927391b23ab37eb3737a98ac4338e2448bef8559b33 \ - --hash=sha256:e8c6a99be100371dbb046880e7a282152aa5d6127ae01783e37662ef73850d8f \ - --hash=sha256:e9dc245e3ac69c92ee4c167fbdd7428ec1956d4e754223124991ef29eb57a09d \ - --hash=sha256:eb687a11f0a7a1839719edd80f41e459cc5366857ecbed383ff376c4e3cc6afd \ - --hash=sha256:eb9e2a346c5238a30a746893f23a9535e700f8192a68c07c0258e7ece6ff3728 \ - --hash=sha256:ed38b924ce794e505647f7c331b22a693bee1538fdf46b0222c4717b42f744e7 \ - --hash=sha256:f0010c6f9d1a4011e429109fda55a225921e3206e7f62a0c22a35344bfd13cca \ - --hash=sha256:f0c5d1acbfca6ebdd6b1e3eded8d261affb6ddcf2186205518f1428b8569bb99 \ - --hash=sha256:f10afb1004f102c7868ebfe91c28f4a712227fe4cb24974350ace1f90e1febbf \ - --hash=sha256:f174135f5609428cc6e1b9090f9268f5c8935fddb1b25ccb8255a2d50de6789e \ - --hash=sha256:f3ebe6e73c319340830a9b2825d32eb6d8475c1dac020b4f0aa774ee3b898d1c \ - --hash=sha256:f627688813d0a4140153ff532537fbe4afea5a3dffce1f9deb7f91f848a832b5 \ - --hash=sha256:fd4305f86f53dfd8cd3522269ed7fc34856a8ee3709a5e28b2836b2db9d4cd69 - # via cryptography -chardet==4.0.0 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via aiohttp -coloredlogs==15.0.1 \ - --hash=sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934 \ - --hash=sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0 - # via envoy.base.runner -cryptography==3.4.8 \ - --hash=sha256:0a7dcbcd3f1913f664aca35d47c1331fce738d44ec34b7be8b9d332151b0b01e \ - --hash=sha256:1eb7bb0df6f6f583dd8e054689def236255161ebbcf62b226454ab9ec663746b \ - --hash=sha256:21ca464b3a4b8d8e86ba0ee5045e103a1fcfac3b39319727bc0fc58c09c6aff7 \ - --hash=sha256:34dae04a0dce5730d8eb7894eab617d8a70d0c97da76b905de9efb7128ad7085 \ - --hash=sha256:3520667fda779eb788ea00080124875be18f2d8f0848ec00733c0ec3bb8219fc \ - --hash=sha256:3fa3a7ccf96e826affdf1a0a9432be74dc73423125c8f96a909e3835a5ef194a \ - --hash=sha256:5b0fbfae7ff7febdb74b574055c7466da334a5371f253732d7e2e7525d570498 \ - --hash=sha256:8695456444f277af73a4877db9fc979849cd3ee74c198d04fc0776ebc3db52b9 \ - --hash=sha256:94cc5ed4ceaefcbe5bf38c8fba6a21fc1d365bb8fb826ea1688e3370b2e24a1c \ - --hash=sha256:94fff993ee9bc1b2440d3b7243d488c6a3d9724cc2b09cdb297f6a886d040ef7 \ - --hash=sha256:9965c46c674ba8cc572bc09a03f4c649292ee73e1b683adb1ce81e82e9a6a0fb \ - --hash=sha256:a00cf305f07b26c351d8d4e1af84ad7501eca8a342dedf24a7acb0e7b7406e14 \ - --hash=sha256:a305600e7a6b7b855cd798e00278161b681ad6e9b7eca94c721d5f588ab212af \ - --hash=sha256:cd65b60cfe004790c795cc35f272e41a3df4631e2fb6b35aa7ac6ef2859d554e \ - --hash=sha256:d2a6e5ef66503da51d2110edf6c403dc6b494cc0082f85db12f54e9c5d4c3ec5 \ - --hash=sha256:d9ec0e67a14f9d1d48dd87a2531009a9b251c02ea42851c060b25c782516ff06 \ - --hash=sha256:f44d141b8c4ea5eb4dbc9b3ad992d45580c1d22bf5e24363f2fbf50c2d7ae8a7 - # via pyjwt -envoy.abstract.command==0.0.3 \ - --hash=sha256:4b7b15c91bea1f2eb7c2e8e35f95cd9437e1c8f151adc093bf7858fc85d48221 - # via - # envoy.base.runner - # envoy.distribution.release -envoy.base.checker==0.0.2 \ - --hash=sha256:2ac81efa20fd01fff644ff7dc7fadeac1c3e4dbb6210881ac7a7919ec0e048d8 - # via - # envoy.distribution.distrotest - # envoy.distribution.verify -envoy.base.runner==0.0.4 \ - --hash=sha256:4eeb2b661f1f0c402df4425852be554a8a83ef5d338bfae69ddcb9b90755379e - # via - # envoy.base.checker - # envoy.distribution.release - # envoy.github.abstract - # envoy.gpg.sign -envoy.base.utils==0.0.6 \ - --hash=sha256:58ed057137ebe80d78db90997efc59822115ee616e435a9afc3d7a19069bb53c - # via - # envoy.distribution.distrotest - # envoy.github.release - # envoy.gpg.sign -envoy.distribution.distrotest==0.0.3 \ - --hash=sha256:c094adbd959eb1336f93afc00aedb7ee4e68e8252e2365be816a6f9ede8a3de7 - # via envoy.distribution.verify -envoy.distribution.release==0.0.4 \ - --hash=sha256:41037e0488f0593ce5173739fe0cd1b45a4775f5a47738b85d9d04024ca241a2 - # via -r tools/distribution/requirements.txt -envoy.distribution.verify==0.0.2 \ - --hash=sha256:ae59134085de50203edf51c243dbf3301cbe5550db29f0ec6f9ea1c3b82fee1c - # via -r tools/distribution/requirements.txt -envoy.docker.utils==0.0.2 \ - --hash=sha256:a12cb57f0b6e204d646cbf94f927b3a8f5a27ed15f60d0576176584ec16a4b76 - # via envoy.distribution.distrotest -envoy.github.abstract==0.0.16 \ - --hash=sha256:badf04104492fb6b37ba2163f2b225132ed04aba680beb218e7c7d918564f8ee - # via - # envoy.distribution.release - # envoy.github.release -envoy.github.release==0.0.8 \ - --hash=sha256:fbc4354030137eb565b8c4d679965e4ef60b01de0c09310441836e592ca0cd19 - # via envoy.distribution.release -envoy.gpg.identity==0.0.2 \ - --hash=sha256:7d32ff9133e00b9974b4dabd2512b4872b091b8c5069d0112240dcc1a56bc406 - # via envoy.gpg.sign -envoy.gpg.sign==0.0.3 \ - --hash=sha256:31667931f5d7ff05fd809b89748f277511486311c777652af4cb8889bd641049 - # via -r tools/distribution/requirements.txt -frozendict==2.0.6 \ - --hash=sha256:3f00de72805cf4c9e81b334f3f04809278b967d2fed84552313a0fcce511beb1 \ - --hash=sha256:5d3f75832c35d4df041f0e19c268964cbef29c1eb34cd3517cf883f1c2d089b9 - # via envoy.base.runner -gidgethub==5.0.1 \ - --hash=sha256:3efbd6998600254ec7a2869318bd3ffde38edc3a0d37be0c14bc46b45947b682 \ - --hash=sha256:67245e93eb0918b37df038148af675df43b62e832c529d7f859f6b90d9f3e70d - # via - # envoy.github.abstract - # envoy.github.release -humanfriendly==9.2 \ - --hash=sha256:332da98c24cc150efcc91b5508b19115209272bfdf4b0764a56795932f854271 \ - --hash=sha256:f7dba53ac7935fd0b4a2fc9a29e316ddd9ea135fb3052d3d0279d10c18ff9c48 - # via coloredlogs -idna==3.2 \ - --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ - --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 - # via yarl -multidict==5.1.0 \ - --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ - --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \ - --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \ - --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \ - --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \ - --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \ - --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \ - --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \ - --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \ - --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \ - --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \ - --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \ - --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \ - --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \ - --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \ - --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \ - --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \ - --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \ - --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \ - --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \ - --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \ - --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \ - --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \ - --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \ - --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \ - --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \ - --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \ - --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \ - --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \ - --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \ - --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \ - --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \ - --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \ - --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ - --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ - --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ - --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 - # via - # aiohttp - # yarl -packaging==21.0 \ - --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ - --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 - # via envoy.github.release -pycparser==2.20 \ - --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ - --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 - # via cffi -pyjwt[crypto]==2.1.0 \ - --hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \ - --hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130 - # via gidgethub -pyparsing==2.4.7 \ - --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ - --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b - # via packaging -python-gnupg==0.4.7 \ - --hash=sha256:2061f56b1942c29b92727bf9aecbd3cea3893acc9cccbdc7eb4604285efe4ac7 \ - --hash=sha256:3ff5b1bf5e397de6e1fe41a7c0f403dad4e242ac92b345f440eaecfb72a7ebae - # via envoy.gpg.identity -pyyaml==5.4.1 \ - --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ - --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ - --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ - --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ - --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ - --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ - --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ - --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ - --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ - --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ - --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ - --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ - --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ - --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ - --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ - --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ - --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ - --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ - --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ - --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ - --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ - --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ - --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ - --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ - --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ - --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ - --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ - --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ - --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 - # via envoy.base.utils -trycast==0.3.0 \ - --hash=sha256:1b7b4c0d4b0d674770a53f34a762e52a6cd6879eb251ab21625602699920080d \ - --hash=sha256:687185b812e8d1c45f2ba841e8de7bdcdee0695dcf3464f206800505d4c65f26 - # via envoy.base.utils -typing-extensions==3.10.0.2 \ - --hash=sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e \ - --hash=sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7 \ - --hash=sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34 - # via - # aiodocker - # aiohttp -uritemplate==3.0.1 \ - --hash=sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f \ - --hash=sha256:5af8ad10cec94f215e3f48112de2022e1d5a37ed427fbd88652fa908f2ab7cae - # via gidgethub -verboselogs==1.7 \ - --hash=sha256:d63f23bf568295b95d3530c6864a0b580cec70e7ff974177dead1e4ffbc6ff49 \ - --hash=sha256:e33ddedcdfdafcb3a174701150430b11b46ceb64c2a9a26198c76a156568e427 - # via - # envoy.base.runner - # envoy.github.abstract - # envoy.github.release -yarl==1.6.3 \ - --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ - --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ - --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \ - --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \ - --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \ - --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \ - --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \ - --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \ - --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \ - --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \ - --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \ - --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \ - --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \ - --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \ - --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \ - --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \ - --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \ - --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \ - --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \ - --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \ - --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \ - --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \ - --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \ - --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \ - --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \ - --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ - --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \ - --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \ - --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \ - --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \ - --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \ - --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \ - --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \ - --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ - --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ - --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ - --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 - # via aiohttp diff --git a/tools/docs/BUILD b/tools/docs/BUILD index 56892499d24d5..4f82feb9de76e 100644 --- a/tools/docs/BUILD +++ b/tools/docs/BUILD @@ -1,6 +1,6 @@ load("@rules_python//python:defs.bzl", "py_binary") load("//bazel:envoy_build_system.bzl", "envoy_package") -load("@docs_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") load("//tools/base:envoy_python.bzl", "envoy_py_binary") licenses(["notice"]) # Apache 2 @@ -38,40 +38,14 @@ envoy_py_binary( deps = [ "//tools/base:runner", "//tools/base:utils", - requirement("alabaster"), - requirement("Babel"), - requirement("certifi"), - requirement("chardet"), requirement("colorama"), - requirement("docutils"), - requirement("gitdb"), - requirement("GitPython"), - requirement("idna"), - requirement("imagesize"), - requirement("Jinja2"), - requirement("MarkupSafe"), - requirement("packaging"), - requirement("Pygments"), - requirement("pyparsing"), - requirement("pytz"), - requirement("requests"), - requirement("setuptools"), - requirement("six"), - requirement("smmap"), - requirement("snowballstemmer"), requirement("Sphinx"), requirement("sphinx-copybutton"), requirement("sphinx-rtd-theme"), requirement("sphinx-tabs"), - requirement("sphinxcontrib-applehelp"), - requirement("sphinxcontrib-devhelp"), - requirement("sphinxcontrib-htmlhelp"), requirement("sphinxcontrib-httpdomain"), - requirement("sphinxcontrib-jsmath"), - requirement("sphinxcontrib-qthelp"), - requirement("sphinxext-rediraffe"), requirement("sphinxcontrib-serializinghtml"), - requirement("urllib3"), + requirement("sphinxext-rediraffe"), ], ) diff --git a/tools/docs/requirements.txt b/tools/docs/requirements.txt deleted file mode 100644 index 075bb65491822..0000000000000 --- a/tools/docs/requirements.txt +++ /dev/null @@ -1,239 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --allow-unsafe --generate-hashes tools/docs/requirements.txt -# -alabaster==0.7.12 \ - --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \ - --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 - # via - # -r tools/docs/requirements.txt - # sphinx -babel==2.9.1 \ - --hash=sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9 \ - --hash=sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0 - # via - # -r tools/docs/requirements.txt - # sphinx -certifi==2021.5.30 \ - --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ - --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 - # via - # -r tools/docs/requirements.txt - # requests -chardet==4.0.0 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via -r tools/docs/requirements.txt -charset-normalizer==2.0.4 \ - --hash=sha256:0c8911edd15d19223366a194a513099a302055a962bca2cec0f54b8b63175d8b \ - --hash=sha256:f23667ebe1084be45f6ae0538e4a5a865206544097e4e8bbcacf42cd02a348f3 - # via requests -colorama==0.4.4 \ - --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ - --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 - # via -r tools/docs/requirements.txt -docutils==0.16 \ - --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ - --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc - # via - # -r tools/docs/requirements.txt - # sphinx - # sphinx-rtd-theme - # sphinx-tabs -gitdb==4.0.7 \ - --hash=sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0 \ - --hash=sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005 - # via - # -r tools/docs/requirements.txt - # gitpython -gitpython==3.1.18 \ - --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b \ - --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 - # via -r tools/docs/requirements.txt -idna==2.10 \ - --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ - --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 - # via - # -r tools/docs/requirements.txt - # requests -imagesize==1.2.0 \ - --hash=sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1 \ - --hash=sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1 - # via - # -r tools/docs/requirements.txt - # sphinx -jinja2==3.0.1 \ - --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ - --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 - # via - # -r tools/docs/requirements.txt - # sphinx -markupsafe==2.0.1 \ - --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \ - --hash=sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64 \ - --hash=sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b \ - --hash=sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567 \ - --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ - --hash=sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74 \ - --hash=sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35 \ - --hash=sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26 \ - --hash=sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7 \ - --hash=sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75 \ - --hash=sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f \ - --hash=sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135 \ - --hash=sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8 \ - --hash=sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a \ - --hash=sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914 \ - --hash=sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18 \ - --hash=sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8 \ - --hash=sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2 \ - --hash=sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d \ - --hash=sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b \ - --hash=sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f \ - --hash=sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb \ - --hash=sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833 \ - --hash=sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415 \ - --hash=sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902 \ - --hash=sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9 \ - --hash=sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d \ - --hash=sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066 \ - --hash=sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f \ - --hash=sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5 \ - --hash=sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94 \ - --hash=sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509 \ - --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ - --hash=sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872 - # via - # -r tools/docs/requirements.txt - # jinja2 -packaging==21.0 \ - --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ - --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 - # via - # -r tools/docs/requirements.txt - # sphinx -pygments==2.10.0 \ - --hash=sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380 \ - --hash=sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6 - # via - # -r tools/docs/requirements.txt - # sphinx - # sphinx-tabs -pyparsing==2.4.7 \ - --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ - --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b - # via - # -r tools/docs/requirements.txt - # packaging -pytz==2021.1 \ - --hash=sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da \ - --hash=sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798 - # via - # -r tools/docs/requirements.txt - # babel -requests==2.26.0 \ - --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ - --hash=sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7 - # via - # -r tools/docs/requirements.txt - # sphinx -six==1.16.0 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 - # via - # -r tools/docs/requirements.txt - # sphinxcontrib-httpdomain -smmap==4.0.0 \ - --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ - --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 - # via - # -r tools/docs/requirements.txt - # gitdb -snowballstemmer==2.1.0 \ - --hash=sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2 \ - --hash=sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914 - # via - # -r tools/docs/requirements.txt - # sphinx -sphinx-copybutton==0.4.0 \ - --hash=sha256:4340d33c169dac6dd82dce2c83333412aa786a42dd01a81a8decac3b130dc8b0 \ - --hash=sha256:8daed13a87afd5013c3a9af3575cc4d5bec052075ccd3db243f895c07a689386 - # via -r tools/docs/requirements.txt -sphinx-rtd-theme==0.5.2 \ - --hash=sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a \ - --hash=sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f - # via -r tools/docs/requirements.txt -sphinx-tabs==3.2.0 \ - --hash=sha256:1e1b1846c80137bd81a78e4a69b02664b98b1e1da361beb30600b939dfc75065 \ - --hash=sha256:33137914ed9b276e6a686d7a337310ee77b1dae316fdcbce60476913a152e0a4 - # via -r tools/docs/requirements.txt -sphinx==4.1.1 \ - --hash=sha256:23c846a1841af998cb736218539bb86d16f5eb95f5760b1966abcd2d584e62b8 \ - --hash=sha256:3d513088236eef51e5b0adb78b0492eb22cc3b8ccdb0b36dd021173b365d4454 - # via - # -r tools/docs/requirements.txt - # sphinx-copybutton - # sphinx-rtd-theme - # sphinx-tabs - # sphinxcontrib-httpdomain - # sphinxext-rediraffe -sphinxcontrib-applehelp==1.0.2 \ - --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ - --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 - # via - # -r tools/docs/requirements.txt - # sphinx -sphinxcontrib-devhelp==1.0.2 \ - --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \ - --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4 - # via - # -r tools/docs/requirements.txt - # sphinx -sphinxcontrib-htmlhelp==2.0.0 \ - --hash=sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07 \ - --hash=sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2 - # via - # -r tools/docs/requirements.txt - # sphinx -sphinxcontrib-httpdomain==1.7.0 \ - --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ - --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 - # via -r tools/docs/requirements.txt -sphinxcontrib-jsmath==1.0.1 \ - --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ - --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 - # via - # -r tools/docs/requirements.txt - # sphinx -sphinxcontrib-qthelp==1.0.3 \ - --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \ - --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 - # via - # -r tools/docs/requirements.txt - # sphinx -sphinxcontrib-serializinghtml==1.1.5 \ - --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \ - --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 - # via - # -r tools/docs/requirements.txt - # sphinx -sphinxext-rediraffe==0.2.7 \ - --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ - --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c - # via -r tools/docs/requirements.txt -urllib3==1.26.6 \ - --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ - --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f - # via - # -r tools/docs/requirements.txt - # requests - -# The following packages are considered to be unsafe in a requirements file: -setuptools==57.0.0 \ - --hash=sha256:401cbf33a7bf817d08014d51560fc003b895c4cdc1a5b521ad2969e928a07535 \ - --hash=sha256:c8b9f1a457949002e358fea7d3f2a1e1b94ddc0354b2e40afc066bf95d21bf7b - # via - # -r tools/docs/requirements.txt - # sphinx diff --git a/tools/git/BUILD b/tools/git/BUILD index d7baad83ad3f3..46f740a6b6641 100644 --- a/tools/git/BUILD +++ b/tools/git/BUILD @@ -1,4 +1,4 @@ -load("@git_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//tools/base:envoy_python.bzl", "envoy_py_library") diff --git a/tools/git/requirements.txt b/tools/git/requirements.txt deleted file mode 100644 index 886a35eec12ab..0000000000000 --- a/tools/git/requirements.txt +++ /dev/null @@ -1,18 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --generate-hashes tools/git/requirements.txt -# -gitdb==4.0.7 \ - --hash=sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0 \ - --hash=sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005 - # via gitpython -gitpython==3.1.18 \ - --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b \ - --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 - # via -r tools/git/requirements.txt -smmap==4.0.0 \ - --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ - --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 - # via gitdb diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 0636bd08fbc75..4eb0e6f5cf9ec 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -1,5 +1,5 @@ load("@rules_python//python:defs.bzl", "py_binary") -load("@protodoc_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package", "envoy_proto_library") load("//tools/protodoc:protodoc.bzl", "protodoc_rule") diff --git a/tools/protodoc/requirements.txt b/tools/protodoc/requirements.txt deleted file mode 100644 index 1cd69909b9962..0000000000000 --- a/tools/protodoc/requirements.txt +++ /dev/null @@ -1,38 +0,0 @@ -Jinja2==3.0.1 \ - --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ - --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 -MarkupSafe==2.0.1 \ - --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ - --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ - --hash=sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b \ - --hash=sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94 \ - --hash=sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872 \ - --hash=sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f \ - --hash=sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d \ - --hash=sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9 \ - --hash=sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567 \ - --hash=sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18 \ - --hash=sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f \ - --hash=sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f \ - --hash=sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2 \ - --hash=sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d \ - --hash=sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415 \ - --hash=sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914 \ - --hash=sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066 \ - --hash=sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35 \ - --hash=sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b \ - --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \ - --hash=sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75 \ - --hash=sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb \ - --hash=sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64 \ - --hash=sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833 \ - --hash=sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26 \ - --hash=sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7 \ - --hash=sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8 \ - --hash=sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5 \ - --hash=sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135 \ - --hash=sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902 \ - --hash=sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509 \ - --hash=sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74 \ - --hash=sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8 \ - --hash=sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a diff --git a/tools/testing/BUILD b/tools/testing/BUILD index a740e89ad5e32..35b9cf843a286 100644 --- a/tools/testing/BUILD +++ b/tools/testing/BUILD @@ -1,5 +1,5 @@ load("@rules_python//python:defs.bzl", "py_library") -load("@testing_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//tools/base:envoy_python.bzl", "envoy_py_binary") diff --git a/tools/testing/requirements.txt b/tools/testing/requirements.txt deleted file mode 100644 index 0c4dc20def2bb..0000000000000 --- a/tools/testing/requirements.txt +++ /dev/null @@ -1,124 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --generate-hashes tools/testing/requirements.txt -# -attrs==21.2.0 \ - --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ - --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb - # via - # -r tools/testing/requirements.txt - # pytest -coverage==5.5 \ - --hash=sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c \ - --hash=sha256:01d84219b5cdbfc8122223b39a954820929497a1cb1422824bb86b07b74594b6 \ - --hash=sha256:040af6c32813fa3eae5305d53f18875bedd079960822ef8ec067a66dd8afcd45 \ - --hash=sha256:06191eb60f8d8a5bc046f3799f8a07a2d7aefb9504b0209aff0b47298333302a \ - --hash=sha256:13034c4409db851670bc9acd836243aeee299949bd5673e11844befcb0149f03 \ - --hash=sha256:13c4ee887eca0f4c5a247b75398d4114c37882658300e153113dafb1d76de529 \ - --hash=sha256:184a47bbe0aa6400ed2d41d8e9ed868b8205046518c52464fde713ea06e3a74a \ - --hash=sha256:18ba8bbede96a2c3dde7b868de9dcbd55670690af0988713f0603f037848418a \ - --hash=sha256:1aa846f56c3d49205c952d8318e76ccc2ae23303351d9270ab220004c580cfe2 \ - --hash=sha256:217658ec7187497e3f3ebd901afdca1af062b42cfe3e0dafea4cced3983739f6 \ - --hash=sha256:24d4a7de75446be83244eabbff746d66b9240ae020ced65d060815fac3423759 \ - --hash=sha256:2910f4d36a6a9b4214bb7038d537f015346f413a975d57ca6b43bf23d6563b53 \ - --hash=sha256:2949cad1c5208b8298d5686d5a85b66aae46d73eec2c3e08c817dd3513e5848a \ - --hash=sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4 \ - --hash=sha256:2cafbbb3af0733db200c9b5f798d18953b1a304d3f86a938367de1567f4b5bff \ - --hash=sha256:2e0d881ad471768bf6e6c2bf905d183543f10098e3b3640fc029509530091502 \ - --hash=sha256:30c77c1dc9f253283e34c27935fded5015f7d1abe83bc7821680ac444eaf7793 \ - --hash=sha256:3487286bc29a5aa4b93a072e9592f22254291ce96a9fbc5251f566b6b7343cdb \ - --hash=sha256:372da284cfd642d8e08ef606917846fa2ee350f64994bebfbd3afb0040436905 \ - --hash=sha256:41179b8a845742d1eb60449bdb2992196e211341818565abded11cfa90efb821 \ - --hash=sha256:44d654437b8ddd9eee7d1eaee28b7219bec228520ff809af170488fd2fed3e2b \ - --hash=sha256:4a7697d8cb0f27399b0e393c0b90f0f1e40c82023ea4d45d22bce7032a5d7b81 \ - --hash=sha256:51cb9476a3987c8967ebab3f0fe144819781fca264f57f89760037a2ea191cb0 \ - --hash=sha256:52596d3d0e8bdf3af43db3e9ba8dcdaac724ba7b5ca3f6358529d56f7a166f8b \ - --hash=sha256:53194af30d5bad77fcba80e23a1441c71abfb3e01192034f8246e0d8f99528f3 \ - --hash=sha256:5fec2d43a2cc6965edc0bb9e83e1e4b557f76f843a77a2496cbe719583ce8184 \ - --hash=sha256:6c90e11318f0d3c436a42409f2749ee1a115cd8b067d7f14c148f1ce5574d701 \ - --hash=sha256:74d881fc777ebb11c63736622b60cb9e4aee5cace591ce274fb69e582a12a61a \ - --hash=sha256:7501140f755b725495941b43347ba8a2777407fc7f250d4f5a7d2a1050ba8e82 \ - --hash=sha256:796c9c3c79747146ebd278dbe1e5c5c05dd6b10cc3bcb8389dfdf844f3ead638 \ - --hash=sha256:869a64f53488f40fa5b5b9dcb9e9b2962a66a87dab37790f3fcfb5144b996ef5 \ - --hash=sha256:8963a499849a1fc54b35b1c9f162f4108017b2e6db2c46c1bed93a72262ed083 \ - --hash=sha256:8d0a0725ad7c1a0bcd8d1b437e191107d457e2ec1084b9f190630a4fb1af78e6 \ - --hash=sha256:900fbf7759501bc7807fd6638c947d7a831fc9fdf742dc10f02956ff7220fa90 \ - --hash=sha256:92b017ce34b68a7d67bd6d117e6d443a9bf63a2ecf8567bb3d8c6c7bc5014465 \ - --hash=sha256:970284a88b99673ccb2e4e334cfb38a10aab7cd44f7457564d11898a74b62d0a \ - --hash=sha256:972c85d205b51e30e59525694670de6a8a89691186012535f9d7dbaa230e42c3 \ - --hash=sha256:9a1ef3b66e38ef8618ce5fdc7bea3d9f45f3624e2a66295eea5e57966c85909e \ - --hash=sha256:af0e781009aaf59e25c5a678122391cb0f345ac0ec272c7961dc5455e1c40066 \ - --hash=sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf \ - --hash=sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b \ - --hash=sha256:c0891a6a97b09c1f3e073a890514d5012eb256845c451bd48f7968ef939bf4ae \ - --hash=sha256:c2723d347ab06e7ddad1a58b2a821218239249a9e4365eaff6649d31180c1669 \ - --hash=sha256:d1f8bf7b90ba55699b3a5e44930e93ff0189aa27186e96071fac7dd0d06a1873 \ - --hash=sha256:d1f9ce122f83b2305592c11d64f181b87153fc2c2bbd3bb4a3dde8303cfb1a6b \ - --hash=sha256:d314ed732c25d29775e84a960c3c60808b682c08d86602ec2c3008e1202e3bb6 \ - --hash=sha256:d636598c8305e1f90b439dbf4f66437de4a5e3c31fdf47ad29542478c8508bbb \ - --hash=sha256:deee1077aae10d8fa88cb02c845cfba9b62c55e1183f52f6ae6a2df6a2187160 \ - --hash=sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c \ - --hash=sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079 \ - --hash=sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d \ - --hash=sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6 - # via - # -r tools/testing/requirements.txt - # pytest-cov -iniconfig==1.1.1 \ - --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ - --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 - # via - # -r tools/testing/requirements.txt - # pytest -packaging==21.0 \ - --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ - --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 - # via - # -r tools/testing/requirements.txt - # pytest -pluggy==1.0.0 \ - --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 \ - --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 - # via - # -r tools/testing/requirements.txt - # pytest -py==1.10.0 \ - --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \ - --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a - # via - # -r tools/testing/requirements.txt - # pytest -pyparsing==2.4.7 \ - --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ - --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b - # via - # -r tools/testing/requirements.txt - # packaging -pytest-asyncio==0.15.1 \ - --hash=sha256:2564ceb9612bbd560d19ca4b41347b54e7835c2f792c504f698e05395ed63f6f \ - --hash=sha256:3042bcdf1c5d978f6b74d96a151c4cfb9dcece65006198389ccd7e6c60eb1eea - # via -r tools/testing/requirements.txt -pytest-cov==2.12.1 \ - --hash=sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a \ - --hash=sha256:261ceeb8c227b726249b376b8526b600f38667ee314f910353fa318caa01f4d7 - # via -r tools/testing/requirements.txt -pytest-patches==0.0.3 \ - --hash=sha256:6f8cdc8641c708c4812f58ae48d410f373a6fd16cd6cc4dc4d3fb8951df9c92a - # via -r tools/testing/requirements.txt -pytest==6.2.5 \ - --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 \ - --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 - # via - # -r tools/testing/requirements.txt - # pytest-asyncio - # pytest-cov - # pytest-patches -toml==0.10.2 \ - --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ - --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f - # via - # -r tools/testing/requirements.txt - # pytest - # pytest-cov From 97b0271cd4db3a062e8bd9d5dac0d4c379b29ccd Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 10 Sep 2021 19:55:11 +0100 Subject: [PATCH 016/121] deps: Fix for kafka requirement (#18072) Signed-off-by: Ryan Northey Signed-off-by: gayang --- contrib/kafka/filters/network/test/mesh/integration_test/BUILD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/kafka/filters/network/test/mesh/integration_test/BUILD b/contrib/kafka/filters/network/test/mesh/integration_test/BUILD index 295dcd6302177..449098f15e15f 100644 --- a/contrib/kafka/filters/network/test/mesh/integration_test/BUILD +++ b/contrib/kafka/filters/network/test/mesh/integration_test/BUILD @@ -3,7 +3,7 @@ load( "envoy_contrib_package", "envoy_py_test", ) -load("@kafka_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 From 2c753c1cc3e9e7198acfc629777c89b05f29c5e0 Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 10 Sep 2021 20:14:25 +0100 Subject: [PATCH 017/121] docker: Add distroless image as an artefact (#18068) Signed-off-by: Ryan Northey Signed-off-by: gayang --- ci/docker_ci.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index 7fb99271c63cc..9eb97f75afe62 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -147,7 +147,7 @@ for BUILD_TYPE in "${BUILD_TYPES[@]}"; do build_images "${BUILD_TYPE}" "$image_tag" if ! is_windows; then - if [[ "$BUILD_TYPE" == "" || "$BUILD_TYPE" == "-contrib" || "$BUILD_TYPE" == "-alpine" ]]; then + if [[ "$BUILD_TYPE" == "" || "$BUILD_TYPE" == "-contrib" || "$BUILD_TYPE" == "-alpine" || "$BUILD_TYPE" == "-distroless" ]]; then # verify_examples expects the base and alpine images, and for them to be named `-dev` dev_image="envoyproxy/envoy${BUILD_TYPE}-dev:latest" docker tag "$image_tag" "$dev_image" From 96fab554c14fc6a2d125138a82e4686404199e69 Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 10 Sep 2021 22:30:01 +0100 Subject: [PATCH 018/121] cve: Suppress false +ves (#18070) Signed-off-by: Ryan Northey Signed-off-by: gayang --- tools/dependency/cve_scan.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/dependency/cve_scan.py b/tools/dependency/cve_scan.py index ca2d2a144253c..5cdee9a90af33 100755 --- a/tools/dependency/cve_scan.py +++ b/tools/dependency/cve_scan.py @@ -47,6 +47,9 @@ 'CVE-2020-8169', 'CVE-2020-8177', 'CVE-2020-8284', + # Low severity Curl issue with incorrect re-use of connections due to case + # in/sensitivity + 'CVE-2021-22924', # Node.js issue unrelated to http-parser (Node TLS). 'CVE-2020-8265', # Node.js request smuggling. @@ -64,8 +67,14 @@ # Node.js issues unrelated to http-parser. # See https://nvd.nist.gov/vuln/detail/CVE-2021-22918 # See https://nvd.nist.gov/vuln/detail/CVE-2021-22921 + # See https://nvd.nist.gov/vuln/detail/CVE-2021-22931 + # See https://nvd.nist.gov/vuln/detail/CVE-2021-22939 + # See https://nvd.nist.gov/vuln/detail/CVE-2021-22940 'CVE-2021-22918', 'CVE-2021-22921', + 'CVE-2021-22931', + 'CVE-2021-22939', + 'CVE-2021-22940', ]) # Subset of CVE fields that are useful below. From be6d058f55e910e166f87ae0acff38f4a59ceb9f Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 10 Sep 2021 23:51:54 +0100 Subject: [PATCH 019/121] tooling: Fix sync_assignable (#17997) Signed-off-by: Ryan Northey Signed-off-by: gayang --- tools/github/BUILD | 9 +++++++++ tools/github/sync_assignable.sh | 7 ------- 2 files changed, 9 insertions(+), 7 deletions(-) delete mode 100755 tools/github/sync_assignable.sh diff --git a/tools/github/BUILD b/tools/github/BUILD index 779d1695d3b7c..ae7eae1cf310d 100644 --- a/tools/github/BUILD +++ b/tools/github/BUILD @@ -1 +1,10 @@ +load("@rules_python//python:defs.bzl", "py_binary") +load("@base_pip3//:requirements.bzl", "requirement") + licenses(["notice"]) # Apache 2 + +py_binary( + name = "sync_assignable", + srcs = ["sync_assignable.py"], + deps = [requirement("pygithub")], +) diff --git a/tools/github/sync_assignable.sh b/tools/github/sync_assignable.sh deleted file mode 100755 index ac11d9ccc3c86..0000000000000 --- a/tools/github/sync_assignable.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -. tools/shell_utils.sh - -set -e - -python_venv sync_assignable From cf190770f0f69ecf7969be2bfe135ebedf46f633 Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Fri, 10 Sep 2021 15:54:32 -0700 Subject: [PATCH 020/121] add empty() to PriorityConnPoolMap (#18057) Signed-off-by: Yuchen Dai Signed-off-by: gayang --- source/common/upstream/cluster_manager_impl.cc | 6 +++--- source/common/upstream/conn_pool_map.h | 5 +++++ source/common/upstream/conn_pool_map_impl.h | 5 +++++ source/common/upstream/priority_conn_pool_map.h | 5 +++++ source/common/upstream/priority_conn_pool_map_impl.h | 10 ++++++++++ .../upstream/priority_conn_pool_map_impl_test.cc | 4 ++-- 6 files changed, 30 insertions(+), 5 deletions(-) diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index cbe3507907f5d..6e1f4e1b73587 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1200,7 +1200,7 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools( pools->drainConnections(Envoy::ConnectionPool::DrainBehavior::DrainAndDelete); container.do_not_delete_ = false; - if (container.pools_->size() == 0) { + if (container.pools_->empty()) { host_http_conn_pool_map_.erase(old_host); } } @@ -1393,7 +1393,7 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainAllConnPoolsWorker( Envoy::ConnectionPool::DrainBehavior::DrainExistingConnections); container->do_not_delete_ = false; - if (container->pools_->size() == 0) { + if (container->pools_->empty()) { host_http_conn_pool_map_.erase(host); } } @@ -1539,7 +1539,7 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::httpConnPoolIsIdle( // Guard deletion of the container with `do_not_delete_` to avoid deletion while // iterating through the container in `container->pools_->startDrain()`. See // comment in `ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools`. - if (!container->do_not_delete_ && container->pools_->size() == 0) { + if (!container->do_not_delete_ && container->pools_->empty()) { ENVOY_LOG(trace, "Pool container empty for host {}, erasing host entry", host); host_http_conn_pool_map_.erase( host); // NOTE: `container` is erased after this point in the lambda. diff --git a/source/common/upstream/conn_pool_map.h b/source/common/upstream/conn_pool_map.h index b3840c3600cd6..6b9891cc84f64 100644 --- a/source/common/upstream/conn_pool_map.h +++ b/source/common/upstream/conn_pool_map.h @@ -46,6 +46,11 @@ template class ConnPoolMap { */ size_t size() const; + /** + * @return true if the pools are empty. + */ + size_t empty() const; + /** * Destroys all mapped pools. */ diff --git a/source/common/upstream/conn_pool_map_impl.h b/source/common/upstream/conn_pool_map_impl.h index 63db84a047412..6df615ce013cf 100644 --- a/source/common/upstream/conn_pool_map_impl.h +++ b/source/common/upstream/conn_pool_map_impl.h @@ -80,6 +80,11 @@ size_t ConnPoolMap::size() const { return active_pools_.size(); } +template +size_t ConnPoolMap::empty() const { + return active_pools_.empty(); +} + template void ConnPoolMap::clear() { Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); for (auto& pool_pair : active_pools_) { diff --git a/source/common/upstream/priority_conn_pool_map.h b/source/common/upstream/priority_conn_pool_map.h index d3c3c66bd4714..fc69b19ea6e23 100644 --- a/source/common/upstream/priority_conn_pool_map.h +++ b/source/common/upstream/priority_conn_pool_map.h @@ -38,6 +38,11 @@ template class PriorityConnPoolMap { */ size_t size() const; + /** + * @return true if the pools across all priorities are empty. + */ + bool empty() const; + /** * Destroys all mapped pools. */ diff --git a/source/common/upstream/priority_conn_pool_map_impl.h b/source/common/upstream/priority_conn_pool_map_impl.h index a706938b8e182..855da91d0bfc0 100644 --- a/source/common/upstream/priority_conn_pool_map_impl.h +++ b/source/common/upstream/priority_conn_pool_map_impl.h @@ -40,6 +40,16 @@ size_t PriorityConnPoolMap::size() const { return size; } +template +bool PriorityConnPoolMap::empty() const { + for (const auto& pool_map : conn_pool_maps_) { + if (!pool_map->empty()) { + return false; + } + } + return true; +} + template void PriorityConnPoolMap::clear() { for (auto& pool_map : conn_pool_maps_) { diff --git a/test/common/upstream/priority_conn_pool_map_impl_test.cc b/test/common/upstream/priority_conn_pool_map_impl_test.cc index db48e6afc799d..03856fd544f82 100644 --- a/test/common/upstream/priority_conn_pool_map_impl_test.cc +++ b/test/common/upstream/priority_conn_pool_map_impl_test.cc @@ -108,7 +108,7 @@ TEST_F(PriorityConnPoolMapImplTest, TestClearEmptiesOut) { test_map->getPool(ResourcePriority::Default, 2, getBasicFactory()); test_map->clear(); - EXPECT_EQ(test_map->size(), 0); + EXPECT_TRUE(test_map->empty()); } TEST_F(PriorityConnPoolMapImplTest, TestErase) { @@ -124,7 +124,7 @@ TEST_F(PriorityConnPoolMapImplTest, TestErase) { EXPECT_EQ(2, test_map->size()); EXPECT_TRUE(test_map->erasePool(ResourcePriority::Default, 1)); EXPECT_TRUE(test_map->erasePool(ResourcePriority::High, 1)); - EXPECT_EQ(0, test_map->size()); + EXPECT_TRUE(test_map->empty()); EXPECT_NE(pool_ptr, &test_map->getPool(ResourcePriority::High, 1, getBasicFactory()).value().get()); } From e34cd21372c195188cdd87fb2d75288016a3bae3 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 10 Sep 2021 18:54:56 -0400 Subject: [PATCH 021/121] quic: attempting to fix non-linux QUIC builds (#18027) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- source/common/quic/udp_gso_batch_writer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/quic/udp_gso_batch_writer.h b/source/common/quic/udp_gso_batch_writer.h index 367f248bd7b4e..06dfc06cf4620 100644 --- a/source/common/quic/udp_gso_batch_writer.h +++ b/source/common/quic/udp_gso_batch_writer.h @@ -1,6 +1,6 @@ #pragma once -#if !defined(__linux__) +#if !defined(__linux__) || defined(__ANDROID_API__) #define UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT 0 #else #define UDP_GSO_BATCH_WRITER_COMPILETIME_SUPPORT 1 From fa5dcce026012b154a3bade17f57c77beb0c09bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Fri, 10 Sep 2021 20:07:41 -0400 Subject: [PATCH 022/121] tests: rename addFilter() to prependFilter() (#18059) To avoid confusion when writing integration tests, make it clear that filters are prepended. Signed-off-by: Raul Gutierrez Segales Signed-off-by: gayang --- .../test/squash_filter_integration_test.cc | 2 +- test/config/utility.cc | 4 +- test/config/utility.h | 12 +- ...tive_concurrency_filter_integration_test.h | 2 +- .../admission_control_integration_test.cc | 2 +- .../filter_integration_test.cc | 2 +- .../aws_lambda_filter_integration_test.cc | 2 +- .../buffer/buffer_filter_integration_test.cc | 20 ++-- .../cache/cache_filter_integration_test.cc | 2 +- .../http/cdn_loop/filter_integration_test.cc | 16 +-- .../composite_filter_integration_test.cc | 2 +- .../compressor_filter_integration_test.cc | 2 +- .../compressor_integration_tests.cc | 4 +- .../http/cors/cors_filter_integration_test.cc | 2 +- .../http/csrf/csrf_filter_integration_test.cc | 22 ++-- .../decompressor_filter_integration_test.cc | 9 +- .../proxy_filter_integration_test.cc | 2 +- .../ext_authz/ext_authz_integration_test.cc | 6 +- .../http/ext_proc/ext_proc_grpc_fuzz.cc | 2 +- .../ext_proc/ext_proc_integration_test.cc | 2 +- .../ext_proc/streaming_integration_test.cc | 2 +- .../fault/fault_filter_integration_test.cc | 2 +- .../reverse_bridge_integration_test.cc | 2 +- .../grpc_json_transcoder_integration_test.cc | 20 ++-- .../grpc_web_filter_integration_test.cc | 2 +- .../http/jwt_authn/filter_integration_test.cc | 22 ++-- .../kill_request/crash_integration_test.cc | 18 +-- .../kill_request_filter_integration_test.cc | 2 +- .../local_ratelimit_integration_test.cc | 2 +- .../filters/http/lua/lua_integration_test.cc | 6 +- .../http/oauth2/oauth_integration_test.cc | 2 +- .../ratelimit/ratelimit_integration_test.cc | 2 +- .../http/rbac/rbac_filter_integration_test.cc | 30 ++--- .../http/tap/tap_filter_integration_test.cc | 4 +- ...reserve_case_formatter_integration_test.cc | 2 +- .../alts/alts_integration_test.cc | 2 +- test/integration/README.md | 2 +- .../drain_close_integration_test.cc | 4 +- test/integration/eds_integration_test.cc | 2 +- .../http2_flood_integration_test.cc | 2 +- test/integration/http_integration.cc | 4 +- .../idle_timeout_integration_test.cc | 4 +- test/integration/integration_admin_test.cc | 2 +- test/integration/integration_admin_test.h | 2 +- test/integration/integration_test.cc | 32 +++--- .../multiplexed_integration_test.cc | 20 ++-- .../multiplexed_integration_test.h | 7 +- .../multiplexed_upstream_integration_test.cc | 6 +- test/integration/protocol_integration_test.cc | 108 +++++++++--------- test/integration/redirect_integration_test.cc | 2 +- .../scoped_rds_integration_test.cc | 12 +- .../sds_generic_secret_integration_test.cc | 2 +- test/integration/version_integration_test.cc | 8 +- .../integration/websocket_integration_test.cc | 2 +- 54 files changed, 231 insertions(+), 227 deletions(-) diff --git a/contrib/squash/filters/http/test/squash_filter_integration_test.cc b/contrib/squash/filters/http/test/squash_filter_integration_test.cc index 83f10a7069c66..938d2e6a59d40 100644 --- a/contrib/squash/filters/http/test/squash_filter_integration_test.cc +++ b/contrib/squash/filters/http/test/squash_filter_integration_test.cc @@ -82,7 +82,7 @@ class SquashFilterIntegrationTest : public testing::TestWithParamadd_clusters(); diff --git a/test/config/utility.cc b/test/config/utility.cc index 4c39561331fa1..0928efd680b86 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -1037,7 +1037,9 @@ void ConfigHelper::addVirtualHost(const envoy::config::route::v3::VirtualHost& v storeHttpConnectionManager(hcm_config); } -void ConfigHelper::addFilter(const std::string& config) { +void ConfigHelper::addFilter(const std::string& config) { prependFilter(config); } + +void ConfigHelper::prependFilter(const std::string& config) { RELEASE_ASSERT(!finalized_, ""); envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager hcm_config; diff --git a/test/config/utility.h b/test/config/utility.h index 06865cc36518a..f421c95ba8c10 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -127,13 +127,13 @@ class ConfigHelper { static std::string httpProxyConfig(bool downstream_use_quic = false); // A basic configuration for L7 proxying with QUIC transport. static std::string quicHttpProxyConfig(); - // A string for a basic buffer filter, which can be used with addFilter() + // A string for a basic buffer filter, which can be used with prependFilter() static std::string defaultBufferFilter(); - // A string for a small buffer filter, which can be used with addFilter() + // A string for a small buffer filter, which can be used with prependFilter() static std::string smallBufferFilter(); - // A string for a health check filter which can be used with addFilter() + // A string for a health check filter which can be used with prependFilter() static std::string defaultHealthCheckFilter(); - // A string for a squash filter which can be used with addFilter() + // A string for a squash filter which can be used with prependFilter() static std::string defaultSquashFilter(); // A string for startTls transport socket config. static std::string startTlsConfig(); @@ -213,6 +213,10 @@ class ConfigHelper { void addVirtualHost(const envoy::config::route::v3::VirtualHost& vhost); // Add an HTTP filter prior to existing filters. + void prependFilter(const std::string& filter_yaml); + + // Add an HTTP filter prior to existing filters. + // TODO(rgs1): remove once envoy-filter-example has been updated. void addFilter(const std::string& filter_yaml); // Add a network filter prior to existing filters. diff --git a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h index 35a107450298e..022ad5a114bb1 100644 --- a/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h +++ b/test/extensions/filters/http/adaptive_concurrency/adaptive_concurrency_filter_integration_test.h @@ -41,7 +41,7 @@ class AdaptiveConcurrencyIntegrationTest void customInit() { setDownstreamProtocol(Http::CodecType::HTTP2); setUpstreamProtocol(Http::CodecType::HTTP2); - config_helper_.addFilter(ADAPTIVE_CONCURRENCY_CONFIG); + config_helper_.prependFilter(ADAPTIVE_CONCURRENCY_CONFIG); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); } diff --git a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc index 8fb07712f747b..e59dba7611f65 100644 --- a/test/extensions/filters/http/admission_control/admission_control_integration_test.cc +++ b/test/extensions/filters/http/admission_control/admission_control_integration_test.cc @@ -46,7 +46,7 @@ class AdmissionControlIntegrationTest : public Event::TestUsingSimulatedTime, void initialize() override { config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); - config_helper_.addFilter(ADMISSION_CONTROL_CONFIG); + config_helper_.prependFilter(ADMISSION_CONTROL_CONFIG); HttpIntegrationTest::initialize(); } diff --git a/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc b/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc index 74660d6a2c6b2..0bfc84ac19fae 100644 --- a/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc +++ b/test/extensions/filters/http/alternate_protocols_cache/filter_integration_test.cc @@ -25,7 +25,7 @@ name: alternate_protocols_cache alternate_protocols_cache_options: name: default_alternate_protocols_cache )EOF"; - config_helper_.addFilter(filter); + config_helper_.prependFilter(filter); upstream_tls_ = true; config_helper_.configureUpstreamTls(/*use_alpn=*/true, /*http3=*/true, diff --git a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc index db3227d9a2df4..ce86d677eb696 100644 --- a/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc +++ b/test/extensions/filters/http/aws_lambda/aws_lambda_filter_integration_test.cc @@ -37,7 +37,7 @@ class AwsLambdaFilterIntegrationTest : public testing::TestWithParamset_seconds(2000 * 1000); }); - config_helper_.addFilter(ConfigHelper::smallBufferFilter()); + config_helper_.prependFilter(ConfigHelper::smallBufferFilter()); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -142,7 +142,7 @@ ConfigHelper::HttpModifierFunction overrideConfig(const std::string& json_config TEST_P(BufferIntegrationTest, RouteDisabled) { ConfigHelper::HttpModifierFunction mod = overrideConfig(R"EOF({"disabled": true})EOF"); config_helper_.addConfigModifier(mod); - config_helper_.addFilter(ConfigHelper::smallBufferFilter()); + config_helper_.prependFilter(ConfigHelper::smallBufferFilter()); config_helper_.setBufferLimits(1024, 1024); initialize(); @@ -169,7 +169,7 @@ TEST_P(BufferIntegrationTest, RouteOverride) { "max_request_bytes": 5242880 }})EOF"); config_helper_.addConfigModifier(mod); - config_helper_.addFilter(ConfigHelper::smallBufferFilter()); + config_helper_.prependFilter(ConfigHelper::smallBufferFilter()); initialize(); diff --git a/test/extensions/filters/http/cache/cache_filter_integration_test.cc b/test/extensions/filters/http/cache/cache_filter_integration_test.cc index d275df8cc48f3..5f09027b73900 100644 --- a/test/extensions/filters/http/cache/cache_filter_integration_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_integration_test.cc @@ -33,7 +33,7 @@ class CacheIntegrationTest : public Event::TestUsingSimulatedTime, } void initializeFilter(const std::string& config) { - config_helper_.addFilter(config); + config_helper_.prependFilter(config); initialize(); codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); } diff --git a/test/extensions/filters/http/cdn_loop/filter_integration_test.cc b/test/extensions/filters/http/cdn_loop/filter_integration_test.cc index 5e61749d0bf28..1403d84aa5adc 100644 --- a/test/extensions/filters/http/cdn_loop/filter_integration_test.cc +++ b/test/extensions/filters/http/cdn_loop/filter_integration_test.cc @@ -31,7 +31,7 @@ name: envoy.filters.http.cdn_loop class CdnLoopFilterIntegrationTest : public HttpProtocolIntegrationTest {}; TEST_P(CdnLoopFilterIntegrationTest, NoCdnLoopHeader) { - config_helper_.addFilter(MaxDefaultConfig); + config_helper_.prependFilter(MaxDefaultConfig); initialize(); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); @@ -48,7 +48,7 @@ TEST_P(CdnLoopFilterIntegrationTest, NoCdnLoopHeader) { } TEST_P(CdnLoopFilterIntegrationTest, CdnLoopHeaderWithOtherCdns) { - config_helper_.addFilter(MaxDefaultConfig); + config_helper_.prependFilter(MaxDefaultConfig); initialize(); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); @@ -68,7 +68,7 @@ TEST_P(CdnLoopFilterIntegrationTest, CdnLoopHeaderWithOtherCdns) { } TEST_P(CdnLoopFilterIntegrationTest, MultipleCdnLoopHeaders) { - config_helper_.addFilter(MaxDefaultConfig); + config_helper_.prependFilter(MaxDefaultConfig); initialize(); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); @@ -86,7 +86,7 @@ TEST_P(CdnLoopFilterIntegrationTest, MultipleCdnLoopHeaders) { } TEST_P(CdnLoopFilterIntegrationTest, CdnLoop0Allowed1Seen) { - config_helper_.addFilter(MaxDefaultConfig); + config_helper_.prependFilter(MaxDefaultConfig); initialize(); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); @@ -103,7 +103,7 @@ TEST_P(CdnLoopFilterIntegrationTest, CdnLoop0Allowed1Seen) { } TEST_P(CdnLoopFilterIntegrationTest, UnparseableHeader) { - config_helper_.addFilter(MaxDefaultConfig); + config_helper_.prependFilter(MaxDefaultConfig); initialize(); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); @@ -120,7 +120,7 @@ TEST_P(CdnLoopFilterIntegrationTest, UnparseableHeader) { } TEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed1Seen) { - config_helper_.addFilter(MaxOf2Config); + config_helper_.prependFilter(MaxOf2Config); initialize(); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); @@ -140,7 +140,7 @@ TEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed1Seen) { } TEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed2Seen) { - config_helper_.addFilter(MaxOf2Config); + config_helper_.prependFilter(MaxOf2Config); initialize(); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); @@ -160,7 +160,7 @@ TEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed2Seen) { } TEST_P(CdnLoopFilterIntegrationTest, CdnLoop2Allowed3Seen) { - config_helper_.addFilter(MaxOf2Config); + config_helper_.prependFilter(MaxOf2Config); initialize(); codec_client_ = makeHttpConnection(makeClientConnection(lookupPort("http"))); diff --git a/test/extensions/filters/http/composite/composite_filter_integration_test.cc b/test/extensions/filters/http/composite/composite_filter_integration_test.cc index cd7ffec42e28d..551112736898c 100644 --- a/test/extensions/filters/http/composite/composite_filter_integration_test.cc +++ b/test/extensions/filters/http/composite/composite_filter_integration_test.cc @@ -16,7 +16,7 @@ class CompositeFilterIntegrationTest : public testing::TestWithParam void { hcm.mutable_http2_protocol_options()->set_allow_connect(true); }); } - config_helper_.addFilter(compressorFilterConfig); + config_helper_.prependFilter(compressorFilterConfig); HttpProtocolIntegrationTest::initialize(); } @@ -247,7 +247,7 @@ void CompressorProxyingConnectIntegrationTest::initialize() { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { ConfigHelper::setConnectConfig(hcm, false, false); }); - config_helper_.addFilter(compressorFilterConfig); + config_helper_.prependFilter(compressorFilterConfig); HttpProtocolIntegrationTest::initialize(); } diff --git a/test/extensions/filters/http/cors/cors_filter_integration_test.cc b/test/extensions/filters/http/cors/cors_filter_integration_test.cc index 77a0522b4bed2..00bd075b37a92 100644 --- a/test/extensions/filters/http/cors/cors_filter_integration_test.cc +++ b/test/extensions/filters/http/cors/cors_filter_integration_test.cc @@ -14,7 +14,7 @@ class CorsFilterIntegrationTest : public testing::TestWithParam void { diff --git a/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc b/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc index 3b8055e79b5e5..c2dc6f4f10dc6 100644 --- a/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc +++ b/test/extensions/filters/http/csrf/csrf_filter_integration_test.cc @@ -79,7 +79,7 @@ INSTANTIATE_TEST_SUITE_P(Protocols, CsrfFilterIntegrationTest, HttpProtocolIntegrationTest::protocolTestParamsToString); TEST_P(CsrfFilterIntegrationTest, TestCsrfSuccess) { - config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_FILTER_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{ {":method", "PUT"}, {":path", "/"}, @@ -93,7 +93,7 @@ TEST_P(CsrfFilterIntegrationTest, TestCsrfSuccess) { } TEST_P(CsrfFilterIntegrationTest, TestCsrfDisabled) { - config_helper_.addFilter(CSRF_DISABLED_CONFIG); + config_helper_.prependFilter(CSRF_DISABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{ {":method", "PUT"}, {":path", "/"}, @@ -107,7 +107,7 @@ TEST_P(CsrfFilterIntegrationTest, TestCsrfDisabled) { } TEST_P(CsrfFilterIntegrationTest, TestNonMutationMethod) { - config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_FILTER_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{ {":method", "GET"}, {":path", "/"}, @@ -121,7 +121,7 @@ TEST_P(CsrfFilterIntegrationTest, TestNonMutationMethod) { } TEST_P(CsrfFilterIntegrationTest, TestOriginMismatch) { - config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_FILTER_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{ {":method", "PUT"}, {":path", "/"}, @@ -135,7 +135,7 @@ TEST_P(CsrfFilterIntegrationTest, TestOriginMismatch) { } TEST_P(CsrfFilterIntegrationTest, TestEnforcesPost) { - config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_FILTER_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{ {":method", "POST"}, {":path", "/"}, @@ -149,7 +149,7 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesPost) { } TEST_P(CsrfFilterIntegrationTest, TestEnforcesDelete) { - config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_FILTER_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{ {":method", "DELETE"}, {":path", "/"}, @@ -163,7 +163,7 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesDelete) { } TEST_P(CsrfFilterIntegrationTest, TestEnforcesPatch) { - config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_FILTER_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{ {":method", "PATCH"}, {":path", "/"}, @@ -177,7 +177,7 @@ TEST_P(CsrfFilterIntegrationTest, TestEnforcesPatch) { } TEST_P(CsrfFilterIntegrationTest, TestRefererFallback) { - config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_FILTER_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{":method", "DELETE"}, {":path", "/"}, {":scheme", "http"}, @@ -189,7 +189,7 @@ TEST_P(CsrfFilterIntegrationTest, TestRefererFallback) { } TEST_P(CsrfFilterIntegrationTest, TestMissingOrigin) { - config_helper_.addFilter(CSRF_FILTER_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_FILTER_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = { {{":method", "DELETE"}, {":path", "/"}, {":scheme", "http"}, {"host", "test-origin"}}}; const auto& response = sendRequest(headers); @@ -198,7 +198,7 @@ TEST_P(CsrfFilterIntegrationTest, TestMissingOrigin) { } TEST_P(CsrfFilterIntegrationTest, TestShadowOnlyMode) { - config_helper_.addFilter(CSRF_SHADOW_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_SHADOW_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{ {":method", "PUT"}, {":path", "/"}, @@ -212,7 +212,7 @@ TEST_P(CsrfFilterIntegrationTest, TestShadowOnlyMode) { } TEST_P(CsrfFilterIntegrationTest, TestFilterAndShadowEnabled) { - config_helper_.addFilter(CSRF_ENABLED_CONFIG); + config_helper_.prependFilter(CSRF_ENABLED_CONFIG); Http::TestRequestHeaderMapImpl headers = {{ {":method", "PUT"}, {":path", "/"}, diff --git a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc index d02ba84b3aa2c..2aa2cf3d9a342 100644 --- a/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc +++ b/test/extensions/filters/http/decompressor/decompressor_filter_integration_test.cc @@ -30,7 +30,7 @@ class DecompressorIntegrationTest : public testing::TestWithParamPackFrom(proto_config_); - config_helper_.addFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_authz_filter)); + config_helper_.prependFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_authz_filter)); }); } @@ -506,7 +506,7 @@ class ExtAuthzHttpIntegrationTest : public HttpIntegrationTest, ext_authz_filter.set_name("envoy.filters.http.ext_authz"); ext_authz_filter.mutable_typed_config()->PackFrom(proto_config_); - config_helper_.addFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_authz_filter)); + config_helper_.prependFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_authz_filter)); }); } @@ -795,7 +795,7 @@ TEST_P(ExtAuthzLocalReplyIntegrationTest, DeniedHeaderTest) { envoy::config::listener::v3::Filter ext_authz_filter; ext_authz_filter.set_name("envoy.filters.http.ext_authz"); ext_authz_filter.mutable_typed_config()->PackFrom(proto_config); - config_helper_.addFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_authz_filter)); + config_helper_.prependFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_authz_filter)); }); const std::string local_reply_yaml = R"EOF( diff --git a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc index 85f01a408c12f..c03cafa52ada7 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_grpc_fuzz.cc @@ -101,7 +101,7 @@ class ExtProcIntegrationFuzz : public HttpIntegrationTest, envoy::config::listener::v3::Filter ext_proc_filter; ext_proc_filter.set_name("envoy.filters.http.ext_proc"); ext_proc_filter.mutable_typed_config()->PackFrom(proto_config_); - config_helper_.addFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_proc_filter)); + config_helper_.prependFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_proc_filter)); }); // Make sure that we have control over when buffers will fill up. diff --git a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc index f84e0342e52c4..7dc977d926406 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc @@ -86,7 +86,7 @@ class ExtProcIntegrationTest : public HttpIntegrationTest, envoy::config::listener::v3::Filter ext_proc_filter; ext_proc_filter.set_name("envoy.filters.http.ext_proc"); ext_proc_filter.mutable_typed_config()->PackFrom(proto_config_); - config_helper_.addFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_proc_filter)); + config_helper_.prependFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_proc_filter)); }); setUpstreamProtocol(Http::CodecType::HTTP2); setDownstreamProtocol(Http::CodecType::HTTP2); diff --git a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc index 8bbc35672982c..4e47554838335 100644 --- a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc @@ -74,7 +74,7 @@ class StreamingIntegrationTest : public HttpIntegrationTest, envoy::config::listener::v3::Filter ext_proc_filter; ext_proc_filter.set_name("envoy.filters.http.ext_proc"); ext_proc_filter.mutable_typed_config()->PackFrom(proto_config_); - config_helper_.addFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_proc_filter)); + config_helper_.prependFilter(MessageUtil::getJsonStringFromMessageOrDie(ext_proc_filter)); }); // Make sure that we have control over when buffers will fill up. diff --git a/test/extensions/filters/http/fault/fault_filter_integration_test.cc b/test/extensions/filters/http/fault/fault_filter_integration_test.cc index 370c662438bee..ed4eaf587af6c 100644 --- a/test/extensions/filters/http/fault/fault_filter_integration_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_integration_test.cc @@ -13,7 +13,7 @@ class FaultIntegrationTest : public Event::TestUsingSimulatedTime, public HttpProtocolIntegrationTest { public: void initializeFilter(const std::string& filter_config) { - config_helper_.addFilter(filter_config); + config_helper_.prependFilter(filter_config); initialize(); } diff --git a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc index 12e1b7b836d56..9a05608688b5d 100644 --- a/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc +++ b/test/extensions/filters/http/grpc_http1_reverse_bridge/reverse_bridge_integration_test.cc @@ -42,7 +42,7 @@ name: grpc_http1_reverse_bridge response_size_header: "{}" )EOF", response_size_header ? *response_size_header : ""); - config_helper_.addFilter(filter); + config_helper_.prependFilter(filter); auto vhost = config_helper_.createVirtualHost("disabled"); envoy::extensions::filters::http::grpc_http1_reverse_bridge::v3::FilterConfigPerRoute diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index 2dd86455f58b7..99baf6b1789b1 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -40,7 +40,7 @@ class GrpcJsonTranscoderIntegrationTest proto_descriptor : "{}" services : "bookstore.Bookstore" )EOF"; - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); } @@ -509,7 +509,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryGetError1) { services : "bookstore.Bookstore" ignore_unknown_query_parameters : true )EOF"; - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); HttpIntegrationTest::initialize(); testTranscoding( @@ -533,7 +533,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryErrorConvertedToJson) { services: "bookstore.Bookstore" convert_grpc_status: true )EOF"; - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); HttpIntegrationTest::initialize(); testTranscoding( @@ -558,7 +558,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, UnaryErrorInTrailerConvertedToJson) { services: "bookstore.Bookstore" convert_grpc_status: true )EOF"; - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); HttpIntegrationTest::initialize(); testTranscoding( @@ -583,7 +583,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, StreamingErrorConvertedToJson) { services: "bookstore.Bookstore" convert_grpc_status: true )EOF"; - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); HttpIntegrationTest::initialize(); testTranscoding( @@ -977,7 +977,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, RejectUnknownMethod) { request_validation_options: reject_unknown_method: true )EOF"; - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); HttpIntegrationTest::initialize(); @@ -1030,7 +1030,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, RejectUnknownQueryParam) { request_validation_options: reject_unknown_query_parameters: true )EOF"; - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); HttpIntegrationTest::initialize(); @@ -1086,7 +1086,7 @@ TEST_P(GrpcJsonTranscoderIntegrationTest, EnableRequestValidationIgnoreQueryPara reject_unknown_method: true reject_unknown_query_parameters: true )EOF"; - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); HttpIntegrationTest::initialize(); @@ -1258,7 +1258,7 @@ class OverrideConfigGrpcJsonTranscoderIntegrationTest : public GrpcJsonTranscode "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder "proto_descriptor": "" )EOF"; - config_helper_.addFilter(filter); + config_helper_.prependFilter(filter); } }; INSTANTIATE_TEST_SUITE_P(IpVersions, OverrideConfigGrpcJsonTranscoderIntegrationTest, @@ -1306,7 +1306,7 @@ class BufferLimitsDisabledGrpcJsonTranscoderIntegrationTest proto_descriptor : "{}" services : "bookstore.Bookstore" )EOF"; - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); // Disable runtime feature. diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc index b7ba884e65965..235168207001c 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_integration_test.cc @@ -27,7 +27,7 @@ class GrpcWebFilterIntegrationTest : public testing::TestWithParam, void SetUp() override { setUpstreamProtocol(Http::CodecType::HTTP2); - config_helper_.addFilter("name: envoy.filters.http.grpc_web"); + config_helper_.prependFilter("name: envoy.filters.http.grpc_web"); } void initialize() override { diff --git a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc index f0ef92af55616..98fea932bc868 100644 --- a/test/extensions/filters/http/jwt_authn/filter_integration_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_integration_test.cc @@ -107,7 +107,7 @@ INSTANTIATE_TEST_SUITE_P(Protocols, LocalJwksIntegrationTest, // With local Jwks, this test verifies a request is passed with a good Jwt token. TEST_P(LocalJwksIntegrationTest, WithGoodToken) { - config_helper_.addFilter(getFilterConfig(true)); + config_helper_.prependFilter(getFilterConfig(true)); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -135,7 +135,7 @@ TEST_P(LocalJwksIntegrationTest, WithGoodToken) { // With local Jwks, this test verifies a request is rejected with an expired Jwt token. TEST_P(LocalJwksIntegrationTest, ExpiredToken) { - config_helper_.addFilter(getFilterConfig(true)); + config_helper_.prependFilter(getFilterConfig(true)); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -158,7 +158,7 @@ TEST_P(LocalJwksIntegrationTest, ExpiredToken) { } TEST_P(LocalJwksIntegrationTest, MissingToken) { - config_helper_.addFilter(getFilterConfig(true)); + config_helper_.prependFilter(getFilterConfig(true)); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -179,7 +179,7 @@ TEST_P(LocalJwksIntegrationTest, MissingToken) { } TEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) { - config_helper_.addFilter(getFilterConfig(true)); + config_helper_.prependFilter(getFilterConfig(true)); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -205,7 +205,7 @@ TEST_P(LocalJwksIntegrationTest, ExpiredTokenHeadReply) { // This test verifies a request is passed with a path that don't match any requirements. TEST_P(LocalJwksIntegrationTest, NoRequiresPath) { - config_helper_.addFilter(getFilterConfig(true)); + config_helper_.prependFilter(getFilterConfig(true)); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -227,7 +227,7 @@ TEST_P(LocalJwksIntegrationTest, NoRequiresPath) { // This test verifies a CORS preflight request without JWT token is allowed. TEST_P(LocalJwksIntegrationTest, CorsPreflight) { - config_helper_.addFilter(getFilterConfig(true)); + config_helper_.prependFilter(getFilterConfig(true)); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -264,8 +264,8 @@ TEST_P(LocalJwksIntegrationTest, FilterStateRequirement) { provider_name: example_provider )"; - config_helper_.addFilter(getAuthFilterConfig(auth_filter_conf, true)); - config_helper_.addFilter(absl::StrCat("name: ", HeaderToFilterStateFilterName)); + config_helper_.prependFilter(getAuthFilterConfig(auth_filter_conf, true)); + config_helper_.prependFilter(absl::StrCat("name: ", HeaderToFilterStateFilterName)); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -336,7 +336,7 @@ class RemoteJwksIntegrationTest : public HttpProtocolIntegrationTest { } void initializeFilter(bool add_cluster) { - config_helper_.addFilter(getFilterConfig(false)); + config_helper_.prependFilter(getFilterConfig(false)); if (add_cluster) { config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { @@ -352,7 +352,7 @@ class RemoteJwksIntegrationTest : public HttpProtocolIntegrationTest { } void initializeAsyncFetchFilter(bool fast_listener) { - config_helper_.addFilter(getAsyncFetchFilterConfig(ExampleConfig, fast_listener)); + config_helper_.prependFilter(getAsyncFetchFilterConfig(ExampleConfig, fast_listener)); config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* jwks_cluster = bootstrap.mutable_static_resources()->add_clusters(); @@ -605,7 +605,7 @@ TEST_P(RemoteJwksIntegrationTest, WithFailedJwksAsyncFetchFast) { class PerRouteIntegrationTest : public HttpProtocolIntegrationTest { public: void setup(const std::string& filter_config, const PerRouteConfig& per_route) { - config_helper_.addFilter(getAuthFilterConfig(filter_config, true)); + config_helper_.prependFilter(getAuthFilterConfig(filter_config, true)); config_helper_.addConfigModifier( [per_route]( diff --git a/test/extensions/filters/http/kill_request/crash_integration_test.cc b/test/extensions/filters/http/kill_request/crash_integration_test.cc index ec68ab95e298e..6eda4c89effda 100644 --- a/test/extensions/filters/http/kill_request/crash_integration_test.cc +++ b/test/extensions/filters/http/kill_request/crash_integration_test.cc @@ -24,7 +24,7 @@ class CrashIntegrationTest : public Event::TestUsingSimulatedTime, public HttpProtocolIntegrationTest { protected: void initializeFilter(const std::string& filter_config) { - config_helper_.addFilter(filter_config); + config_helper_.prependFilter(filter_config); initialize(); } }; @@ -102,7 +102,7 @@ TEST_P(CrashIntegrationTestAllProtocols, DecodeContinueDoesNotAddTrackedObjectIf probability: numerator: 100 )EOF"; - config_helper_.addFilter(request_kill_config); + config_helper_.prependFilter(request_kill_config); // This will stop iteration, and continue via a callback. const std::string stop_and_continue_config = R"EOF( @@ -111,7 +111,7 @@ TEST_P(CrashIntegrationTestAllProtocols, DecodeContinueDoesNotAddTrackedObjectIf "@type": type.googleapis.com/test.integration.filters.StopAndContinueConfig installScopeTrackedObject: true )EOF"; - config_helper_.addFilter(stop_and_continue_config); + config_helper_.prependFilter(stop_and_continue_config); initialize(); @@ -137,7 +137,7 @@ TEST_P(CrashIntegrationTestAllProtocols, DecodeContinueAddsCrashContextIfNoneExi probability: numerator: 100 )EOF"; - config_helper_.addFilter(request_kill_config); + config_helper_.prependFilter(request_kill_config); // This will stop iteration, and continue via a callback. const std::string stop_and_continue_config = R"EOF( @@ -146,7 +146,7 @@ TEST_P(CrashIntegrationTestAllProtocols, DecodeContinueAddsCrashContextIfNoneExi "@type": type.googleapis.com/test.integration.filters.StopAndContinueConfig installScopeTrackedObject: false )EOF"; - config_helper_.addFilter(stop_and_continue_config); + config_helper_.prependFilter(stop_and_continue_config); initialize(); @@ -172,7 +172,7 @@ TEST_P(CrashIntegrationTestAllProtocols, EncodeContinueDoesNotAddTrackedObjectIf "@type": type.googleapis.com/test.integration.filters.StopAndContinueConfig installScopeTrackedObject: true )EOF"; - config_helper_.addFilter(stop_and_continue_config); + config_helper_.prependFilter(stop_and_continue_config); const std::string request_kill_config = R"EOF( @@ -183,7 +183,7 @@ TEST_P(CrashIntegrationTestAllProtocols, EncodeContinueDoesNotAddTrackedObjectIf numerator: 100 direction: RESPONSE )EOF"; - config_helper_.addFilter(request_kill_config); + config_helper_.prependFilter(request_kill_config); initialize(); @@ -205,7 +205,7 @@ TEST_P(CrashIntegrationTestAllProtocols, EncodeContinueAddsCrashContextIfNoneExi "@type": type.googleapis.com/test.integration.filters.StopAndContinueConfig installScopeTrackedObject: false )EOF"; - config_helper_.addFilter(stop_and_continue_config); + config_helper_.prependFilter(stop_and_continue_config); const std::string request_kill_config = R"EOF( @@ -216,7 +216,7 @@ TEST_P(CrashIntegrationTestAllProtocols, EncodeContinueAddsCrashContextIfNoneExi numerator: 100 direction: RESPONSE )EOF"; - config_helper_.addFilter(request_kill_config); + config_helper_.prependFilter(request_kill_config); initialize(); diff --git a/test/extensions/filters/http/kill_request/kill_request_filter_integration_test.cc b/test/extensions/filters/http/kill_request/kill_request_filter_integration_test.cc index d995c10aad043..6d9a173b99658 100644 --- a/test/extensions/filters/http/kill_request/kill_request_filter_integration_test.cc +++ b/test/extensions/filters/http/kill_request/kill_request_filter_integration_test.cc @@ -12,7 +12,7 @@ class KillRequestFilterIntegrationTest : public Event::TestUsingSimulatedTime, public HttpProtocolIntegrationTest { protected: void initializeFilter(const std::string& filter_config) { - config_helper_.addFilter(filter_config); + config_helper_.prependFilter(filter_config); initialize(); } diff --git a/test/extensions/filters/http/local_ratelimit/local_ratelimit_integration_test.cc b/test/extensions/filters/http/local_ratelimit/local_ratelimit_integration_test.cc index 06a8b0ca1b3f3..784dc656f3cb9 100644 --- a/test/extensions/filters/http/local_ratelimit/local_ratelimit_integration_test.cc +++ b/test/extensions/filters/http/local_ratelimit/local_ratelimit_integration_test.cc @@ -9,7 +9,7 @@ class LocalRateLimitFilterIntegrationTest : public Event::TestUsingSimulatedTime public HttpProtocolIntegrationTest { protected: void initializeFilter(const std::string& filter_config) { - config_helper_.addFilter(filter_config); + config_helper_.prependFilter(filter_config); initialize(); } diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index a1ec02924953c..35bd0a9267670 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -26,7 +26,7 @@ class LuaIntegrationTest : public testing::TestWithParamPackFrom(proto_config_); - config_helper_.addFilter(MessageUtil::getJsonStringFromMessageOrDie(ratelimit_filter)); + config_helper_.prependFilter(MessageUtil::getJsonStringFromMessageOrDie(ratelimit_filter)); }); config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& diff --git a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc index 90bbeb81006d4..242ad7684071d 100644 --- a/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc +++ b/test/extensions/filters/http/rbac/rbac_filter_integration_test.cc @@ -152,7 +152,7 @@ INSTANTIATE_TEST_SUITE_P(Protocols, RBACIntegrationTest, TEST_P(RBACIntegrationTest, Allowed) { useAccessLog("%RESPONSE_CODE_DETAILS%"); - config_helper_.addFilter(RBAC_CONFIG); + config_helper_.prependFilter(RBAC_CONFIG); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -177,7 +177,7 @@ TEST_P(RBACIntegrationTest, Allowed) { TEST_P(RBACIntegrationTest, Denied) { useAccessLog("%RESPONSE_CODE_DETAILS%"); - config_helper_.addFilter(RBAC_CONFIG); + config_helper_.prependFilter(RBAC_CONFIG); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -200,7 +200,7 @@ TEST_P(RBACIntegrationTest, Denied) { TEST_P(RBACIntegrationTest, DeniedWithDenyAction) { useAccessLog("%RESPONSE_CODE_DETAILS%"); - config_helper_.addFilter(RBAC_CONFIG_WITH_DENY_ACTION); + config_helper_.prependFilter(RBAC_CONFIG_WITH_DENY_ACTION); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -226,7 +226,7 @@ TEST_P(RBACIntegrationTest, DeniedWithPrefixRule) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& cfg) { cfg.mutable_normalize_path()->set_value(false); }); - config_helper_.addFilter(RBAC_CONFIG_WITH_PREFIX_MATCH); + config_helper_.prependFilter(RBAC_CONFIG_WITH_PREFIX_MATCH); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -252,7 +252,7 @@ TEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& cfg) { cfg.mutable_normalize_path()->set_value(true); }); - config_helper_.addFilter(RBAC_CONFIG_WITH_PREFIX_MATCH); + config_helper_.prependFilter(RBAC_CONFIG_WITH_PREFIX_MATCH); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -273,7 +273,7 @@ TEST_P(RBACIntegrationTest, RbacPrefixRuleUseNormalizePath) { } TEST_P(RBACIntegrationTest, DeniedHeadReply) { - config_helper_.addFilter(RBAC_CONFIG); + config_helper_.prependFilter(RBAC_CONFIG); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -309,7 +309,7 @@ TEST_P(RBACIntegrationTest, RouteOverride) { (*config)["envoy.filters.http.rbac"].PackFrom(per_route_config); }); - config_helper_.addFilter(RBAC_CONFIG); + config_helper_.prependFilter(RBAC_CONFIG); initialize(); @@ -333,7 +333,7 @@ TEST_P(RBACIntegrationTest, RouteOverride) { } TEST_P(RBACIntegrationTest, PathWithQueryAndFragmentWithOverride) { - config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_EXACT_MATCH); + config_helper_.prependFilter(RBAC_CONFIG_WITH_PATH_EXACT_MATCH); config_helper_.addRuntimeOverride("envoy.reloadable_features.http_reject_path_with_fragment", "false"); initialize(); @@ -362,7 +362,7 @@ TEST_P(RBACIntegrationTest, PathWithQueryAndFragmentWithOverride) { } TEST_P(RBACIntegrationTest, PathWithFragmentRejectedByDefault) { - config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_EXACT_MATCH); + config_helper_.prependFilter(RBAC_CONFIG_WITH_PATH_EXACT_MATCH); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -385,7 +385,7 @@ TEST_P(RBACIntegrationTest, PathWithFragmentRejectedByDefault) { // This test ensures that the exact match deny rule is not affected by fragment and query // when Envoy is configured to strip both fragment and query. TEST_P(RBACIntegrationTest, DenyExactMatchIgnoresQueryAndFragment) { - config_helper_.addFilter(RBAC_CONFIG_DENY_WITH_PATH_EXACT_MATCH); + config_helper_.prependFilter(RBAC_CONFIG_DENY_WITH_PATH_EXACT_MATCH); config_helper_.addRuntimeOverride("envoy.reloadable_features.http_reject_path_with_fragment", "false"); initialize(); @@ -418,7 +418,7 @@ TEST_P(RBACIntegrationTest, DenyExactMatchIgnoresQueryAndFragment) { } TEST_P(RBACIntegrationTest, PathIgnoreCase) { - config_helper_.addFilter(RBAC_CONFIG_WITH_PATH_IGNORE_CASE_MATCH); + config_helper_.prependFilter(RBAC_CONFIG_WITH_PATH_IGNORE_CASE_MATCH); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -445,7 +445,7 @@ TEST_P(RBACIntegrationTest, PathIgnoreCase) { } TEST_P(RBACIntegrationTest, LogConnectionAllow) { - config_helper_.addFilter(RBAC_CONFIG_WITH_LOG_ACTION); + config_helper_.prependFilter(RBAC_CONFIG_WITH_LOG_ACTION); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -469,7 +469,7 @@ TEST_P(RBACIntegrationTest, LogConnectionAllow) { // Basic CEL match on a header value. TEST_P(RBACIntegrationTest, HeaderMatchCondition) { - config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy")); + config_helper_.prependFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy")); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -494,7 +494,7 @@ TEST_P(RBACIntegrationTest, HeaderMatchCondition) { // CEL match on a header value in which the header is a duplicate. Verifies we handle string // copying correctly inside the CEL expression. TEST_P(RBACIntegrationTest, HeaderMatchConditionDuplicateHeaderNoMatch) { - config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy")); + config_helper_.prependFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy")); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -517,7 +517,7 @@ TEST_P(RBACIntegrationTest, HeaderMatchConditionDuplicateHeaderNoMatch) { // CEL match on a header value in which the header is a duplicate. Verifies we handle string // copying correctly inside the CEL expression. TEST_P(RBACIntegrationTest, HeaderMatchConditionDuplicateHeaderMatch) { - config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy,zzz")); + config_helper_.prependFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy,zzz")); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); diff --git a/test/extensions/filters/http/tap/tap_filter_integration_test.cc b/test/extensions/filters/http/tap/tap_filter_integration_test.cc index 403c5ca023847..cfd7f764453f5 100644 --- a/test/extensions/filters/http/tap/tap_filter_integration_test.cc +++ b/test/extensions/filters/http/tap/tap_filter_integration_test.cc @@ -26,7 +26,7 @@ class TapIntegrationTest : public testing::TestWithParamwaitForCounterGe("listener_manager.listener_create_success", 2); diff --git a/test/extensions/http/header_formatters/preserve_case/preserve_case_formatter_integration_test.cc b/test/extensions/http/header_formatters/preserve_case/preserve_case_formatter_integration_test.cc index 92c72f43bb660..c4a82dea9eadc 100644 --- a/test/extensions/http/header_formatters/preserve_case/preserve_case_formatter_integration_test.cc +++ b/test/extensions/http/header_formatters/preserve_case/preserve_case_formatter_integration_test.cc @@ -70,7 +70,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, PreserveCaseIntegrationTest, // Verify that we preserve case in both directions. TEST_P(PreserveCaseIntegrationTest, EndToEnd) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: preserve-case-filter )EOF"); initialize(); diff --git a/test/extensions/transport_sockets/alts/alts_integration_test.cc b/test/extensions/transport_sockets/alts/alts_integration_test.cc index bb572db3cde8d..62a960272a8e7 100644 --- a/test/extensions/transport_sockets/alts/alts_integration_test.cc +++ b/test/extensions/transport_sockets/alts/alts_integration_test.cc @@ -115,7 +115,7 @@ class AltsIntegrationTestBase : public Event::TestUsingSimulatedTime, transport_socket->mutable_typed_config()->PackFrom(alts_config); }); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: decode-dynamic-metadata-filter typed_config: "@type": type.googleapis.com/google.protobuf.Empty diff --git a/test/integration/README.md b/test/integration/README.md index e36b9d4d031eb..99badc23e76b3 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -57,7 +57,7 @@ or ```c++ // Add a buffering filter on the request path -config_helper_.addFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); +config_helper_.prependFilter(ConfigHelper::DEFAULT_BUFFER_FILTER); ``` For other edits which are less likely reusable, one can add config modifiers. Config modifiers diff --git a/test/integration/drain_close_integration_test.cc b/test/integration/drain_close_integration_test.cc index de57d7fcba64c..65b84f28bf0e9 100644 --- a/test/integration/drain_close_integration_test.cc +++ b/test/integration/drain_close_integration_test.cc @@ -11,7 +11,7 @@ TEST_P(DrainCloseIntegrationTest, DrainCloseGradual) { // the probability will be very low, but the rapid retries prevent this from // increasing total test time. drain_time_ = std::chrono::seconds(100); - config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); absl::Notification drain_sequence_started; @@ -45,7 +45,7 @@ TEST_P(DrainCloseIntegrationTest, DrainCloseGradual) { TEST_P(DrainCloseIntegrationTest, DrainCloseImmediate) { drain_strategy_ = Server::DrainStrategy::Immediate; drain_time_ = std::chrono::seconds(100); - config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); absl::Notification drain_sequence_started; diff --git a/test/integration/eds_integration_test.cc b/test/integration/eds_integration_test.cc index 2f2b47d69302d..bfe7ec4c24a06 100644 --- a/test/integration/eds_integration_test.cc +++ b/test/integration/eds_integration_test.cc @@ -408,7 +408,7 @@ TEST_P(EdsIntegrationTest, BatchMemberUpdateCb) { } TEST_P(EdsIntegrationTest, StatsReadyFilter) { - config_helper_.addFilter("name: eds-ready-filter"); + config_helper_.prependFilter("name: eds-ready-filter"); initializeTest(false); // Initial state: no healthy endpoints diff --git a/test/integration/http2_flood_integration_test.cc b/test/integration/http2_flood_integration_test.cc index 64db06468019e..a5c9d864fb7d2 100644 --- a/test/integration/http2_flood_integration_test.cc +++ b/test/integration/http2_flood_integration_test.cc @@ -577,7 +577,7 @@ TEST_P(Http2FloodMitigationTest, Trailers) { // Verify flood detection by the WINDOW_UPDATE frame when a decoder filter is resuming reading from // the downstream via DecoderFilterBelowWriteBufferLowWatermark. TEST_P(Http2FloodMitigationTest, WindowUpdateOnLowWatermarkFlood) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: backpressure-filter )EOF"); config_helper_.setBufferLimits(1024 * 1024 * 1024, 1024 * 1024 * 1024); diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 3cb411a0c730a..4403d39b23171 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -1000,7 +1000,7 @@ void HttpIntegrationTest::testEnvoyProxying1xx(bool continue_before_upstream_com bool with_multiple_1xx_headers) { if (with_encoder_filter) { // Add a filter to make sure 100s play well with them. - config_helper_.addFilter("name: passthrough-filter"); + config_helper_.prependFilter("name: passthrough-filter"); } config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -1071,7 +1071,7 @@ void HttpIntegrationTest::testTwoRequests(bool network_backup) { // created while the socket appears to be in the high watermark state, and regression tests that // flow control will be corrected as the socket "becomes unblocked" if (network_backup) { - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(R"EOF( name: pause-filter{} typed_config: diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index 9a8e0f75e1972..46cbe957e6ae5 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -194,7 +194,7 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterDownstreamHeaders) { // Per-stream idle timeout with reads disabled. TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutWithLargeBuffer) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: backpressure-filter )EOF"); enable_per_stream_idle_timeout_ = true; @@ -413,7 +413,7 @@ TEST_P(IdleTimeoutIntegrationTest, RequestTimeoutIsNotDisarmedByEncode100Continu // Per-stream idle timeout reset from within a filter. TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutResetFromFilter) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: reset-idle-timer-filter )EOF"); enable_per_stream_idle_timeout_ = true; diff --git a/test/integration/integration_admin_test.cc b/test/integration/integration_admin_test.cc index 578017e301112..6fdc3bd7467cd 100644 --- a/test/integration/integration_admin_test.cc +++ b/test/integration/integration_admin_test.cc @@ -69,7 +69,7 @@ TEST_P(IntegrationAdminTest, HealthCheckWithoutServerStats) { } TEST_P(IntegrationAdminTest, HealthCheckWithBufferFilter) { - config_helper_.addFilter(ConfigHelper::defaultBufferFilter()); + config_helper_.prependFilter(ConfigHelper::defaultBufferFilter()); initialize(); BufferingStreamDecoderPtr response; diff --git a/test/integration/integration_admin_test.h b/test/integration/integration_admin_test.h index 194c54d02ae00..b190cef6edbaf 100644 --- a/test/integration/integration_admin_test.h +++ b/test/integration/integration_admin_test.h @@ -15,7 +15,7 @@ namespace Envoy { class IntegrationAdminTest : public HttpProtocolIntegrationTest { public: void initialize() override { - config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); config_helper_.addConfigModifier( [](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto& hist_settings = diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 450e787dd71e1..794a49adda065 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -303,7 +303,7 @@ TEST_P(IntegrationTest, RouterDirectResponseEmptyBody) { } TEST_P(IntegrationTest, ConnectionClose) { - config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -419,7 +419,7 @@ TEST_P(IntegrationTest, EnvoyProxyingLate100ContinueWithEncoderFilter) { // Regression test for https://github.com/envoyproxy/envoy/issues/10923. TEST_P(IntegrationTest, EnvoyProxying100ContinueWithDecodeDataPause) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: stop-iteration-and-continue-filter typed_config: "@type": type.googleapis.com/test.integration.filters.StopAndContinueConfig @@ -433,7 +433,7 @@ TEST_P(IntegrationTest, MatchingHttpFilterConstruction) { concurrency_ = 2; config_helper_.addRuntimeOverride("envoy.reloadable_features.experimental_matching_api", "true"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: matcher typed_config: "@type": type.googleapis.com/envoy.extensions.common.matching.v3.ExtensionWithMatcher @@ -500,7 +500,7 @@ TEST_P(IntegrationTest, MatchingHttpFilterConstructionNewProto) { concurrency_ = 2; config_helper_.addRuntimeOverride("envoy.reloadable_features.experimental_matching_api", "true"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: matcher typed_config: "@type": type.googleapis.com/envoy.extensions.common.matching.v3.ExtensionWithMatcher @@ -625,7 +625,7 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { // Test hitting the bridge filter with too many response bytes to buffer. Given // the headers are not proxied, the connection manager will send a local error reply. TEST_P(IntegrationTest, HittingGrpcFilterLimitBufferingHeaders) { - config_helper_.addFilter( + config_helper_.prependFilter( "{ name: grpc_http1_bridge, typed_config: { \"@type\": " "type.googleapis.com/envoy.extensions.filters.http.grpc_http1_bridge.v3.Config } }"); config_helper_.setBufferLimits(1024, 1024); @@ -1480,8 +1480,8 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) { hcm) { hcm.mutable_delayed_close_timeout()->set_seconds(1); }); // This test will trigger an early 413 Payload Too Large response due to buffer limits being // exceeded. The following filter is needed since the router filter will never trigger a 413. - config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); config_helper_.setBufferLimits(1024, 1024); initialize(); @@ -1513,8 +1513,8 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownOnGracefulClose) { // Test configuration of the delayed close timeout on downstream HTTP/1.1 connections. A value of 0 // disables delayed close processing. TEST_P(IntegrationTest, TestDelayedConnectionTeardownConfig) { - config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); config_helper_.setBufferLimits(1024, 1024); config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -1547,8 +1547,8 @@ TEST_P(IntegrationTest, TestDelayedConnectionTeardownConfig) { // Test that if the route cache is cleared, it doesn't cause problems. TEST_P(IntegrationTest, TestClearingRouteCacheFilter) { - config_helper_.addFilter("{ name: clear-route-cache, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: clear-route-cache, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 0); @@ -1585,8 +1585,8 @@ TEST_P(IntegrationTest, NoConnectionPoolsFree) { } TEST_P(IntegrationTest, ProcessObjectHealthy) { - config_helper_.addFilter("{ name: process-context-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: process-context-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); ProcessObjectForFilter healthy_object(true); process_object_ = healthy_object; @@ -1606,8 +1606,8 @@ TEST_P(IntegrationTest, ProcessObjectHealthy) { } TEST_P(IntegrationTest, ProcessObjectUnealthy) { - config_helper_.addFilter("{ name: process-context-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: process-context-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); ProcessObjectForFilter unhealthy_object(false); process_object_ = unhealthy_object; @@ -2103,7 +2103,7 @@ TEST_P(IntegrationTest, RandomPreconnect) { TEST_P(IntegrationTest, SetRouteToDelegatingRouteWithClusterOverride) { useAccessLog("%UPSTREAM_CLUSTER%\n"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: set-route-filter )EOF"); diff --git a/test/integration/multiplexed_integration_test.cc b/test/integration/multiplexed_integration_test.cc index c55fc1dabfbeb..0357402a6993a 100644 --- a/test/integration/multiplexed_integration_test.cc +++ b/test/integration/multiplexed_integration_test.cc @@ -408,7 +408,7 @@ void verifyExpectedMetadata(Http::MetadataMap metadata_map, std::set void { hcm.set_proxy_100_continue(true); }); @@ -652,7 +652,7 @@ name: request-metadata-filter )EOF"; TEST_P(Http2MetadataIntegrationTest, ConsumeAndInsertRequestMetadata) { - addFilters({request_metadata_filter}); + prependFilters({request_metadata_filter}); config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { hcm.set_proxy_100_continue(true); }); @@ -799,7 +799,7 @@ void Http2MetadataIntegrationTest::verifyHeadersOnlyTest() { } TEST_P(Http2MetadataIntegrationTest, HeadersOnlyRequestWithRequestMetadata) { - addFilters({request_metadata_filter}); + prependFilters({request_metadata_filter}); // Send a headers only request. runHeaderOnlyTest(false, 0); verifyHeadersOnlyTest(); @@ -842,17 +842,17 @@ name: metadata-stop-all-filter )EOF"; TEST_P(Http2MetadataIntegrationTest, RequestMetadataWithStopAllFilterBeforeMetadataFilter) { - addFilters({request_metadata_filter, metadata_stop_all_filter}); + prependFilters({request_metadata_filter, metadata_stop_all_filter}); testRequestMetadataWithStopAllFilter(); } TEST_P(Http2MetadataIntegrationTest, RequestMetadataWithStopAllFilterAfterMetadataFilter) { - addFilters({metadata_stop_all_filter, request_metadata_filter}); + prependFilters({metadata_stop_all_filter, request_metadata_filter}); testRequestMetadataWithStopAllFilter(); } TEST_P(Http2MetadataIntegrationTest, TestAddEncodedMetadata) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: encode-headers-return-stop-all-filter )EOF"); @@ -957,7 +957,7 @@ TEST_P(Http2IntegrationTest, BadFrame) { // response are received. TEST_P(Http2IntegrationTest, GoAway) { EXCLUDE_DOWNSTREAM_HTTP3; // QuicHttpClientConnectionImpl::goAway NOT_REACHED_GCOVR_EXCL_LINE - config_helper_.addFilter(ConfigHelper::defaultHealthCheckFilter()); + config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -1319,7 +1319,7 @@ TEST_P(Http2IntegrationTest, DelayedCloseDisabled) { } TEST_P(Http2IntegrationTest, PauseAndResume) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: stop-iteration-and-continue-filter typed_config: "@type": type.googleapis.com/test.integration.filters.StopAndContinueConfig @@ -1349,7 +1349,7 @@ TEST_P(Http2IntegrationTest, PauseAndResume) { } TEST_P(Http2IntegrationTest, PauseAndResumeHeadersOnly) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: stop-iteration-and-continue-filter typed_config: "@type": type.googleapis.com/test.integration.filters.StopAndContinueConfig @@ -1829,7 +1829,7 @@ name: on-local-reply-filter )EOF"; TEST_P(Http2IntegrationTest, OnLocalReply) { - config_helper_.addFilter(on_local_reply_filter); + config_helper_.prependFilter(on_local_reply_filter); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); diff --git a/test/integration/multiplexed_integration_test.h b/test/integration/multiplexed_integration_test.h index f2bd7ce3267b3..81876304b44c6 100644 --- a/test/integration/multiplexed_integration_test.h +++ b/test/integration/multiplexed_integration_test.h @@ -16,10 +16,11 @@ class Http2IntegrationTest : public HttpProtocolIntegrationTest { void simultaneousRequest(int32_t request1_bytes, int32_t request2_bytes); protected: - // Utility function to add filters. - void addFilters(std::vector filters) { + // Utility function to prepend filters. Note that the filters + // are added in reverse order. + void prependFilters(std::vector filters) { for (const auto& filter : filters) { - config_helper_.addFilter(filter); + config_helper_.prependFilter(filter); } } }; diff --git a/test/integration/multiplexed_upstream_integration_test.cc b/test/integration/multiplexed_upstream_integration_test.cc index 694aebb9d41c7..6dedddbdea612 100644 --- a/test/integration/multiplexed_upstream_integration_test.cc +++ b/test/integration/multiplexed_upstream_integration_test.cc @@ -334,7 +334,7 @@ TEST_P(Http2UpstreamIntegrationTest, ManyLargeSimultaneousRequestWithRandomBacku // receiving flow control window updates. return; } - config_helper_.addFilter( + config_helper_.prependFilter( fmt::format(R"EOF( name: pause-filter{} typed_config: @@ -432,8 +432,8 @@ name: router // As with ProtocolIntegrationTest.HittingEncoderFilterLimit use a filter // which buffers response data but in this case, make sure the sendLocalReply // is gRPC. - config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); config_helper_.setBufferLimits(1024, 1024); initialize(); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 2b1bfb5485efe..e6349bd3b8f88 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -172,7 +172,7 @@ TEST_P(ProtocolIntegrationTest, UnknownResponsecode) { // Add a health check filter and verify correct computation of health based on upstream status. TEST_P(DownstreamProtocolIntegrationTest, ComputedHealthCheck) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: health_check typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck @@ -193,7 +193,7 @@ name: health_check // Add a health check filter and verify correct computation of health based on upstream status. TEST_P(DownstreamProtocolIntegrationTest, ModifyBuffer) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: health_check typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck @@ -214,11 +214,10 @@ name: health_check // Verifies behavior for https://github.com/envoyproxy/envoy/pull/11248 TEST_P(ProtocolIntegrationTest, AddBodyToRequestAndWaitForIt) { - // filters are prepended, so add them in reverse order - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: wait-for-whole-request-and-response-filter )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: add-body-filter )EOF"); initialize(); @@ -238,11 +237,10 @@ TEST_P(ProtocolIntegrationTest, AddBodyToRequestAndWaitForIt) { } TEST_P(ProtocolIntegrationTest, AddBodyToResponseAndWaitForIt) { - // filters are prepended, so add them in reverse order - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: add-body-filter )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: wait-for-whole-request-and-response-filter )EOF"); initialize(); @@ -260,7 +258,7 @@ TEST_P(ProtocolIntegrationTest, AddBodyToResponseAndWaitForIt) { } TEST_P(ProtocolIntegrationTest, ContinueHeadersOnlyInjectBodyFilter) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: continue-headers-only-inject-body-filter typed_config: "@type": type.googleapis.com/google.protobuf.Empty @@ -288,7 +286,7 @@ TEST_P(ProtocolIntegrationTest, ContinueHeadersOnlyInjectBodyFilter) { } TEST_P(ProtocolIntegrationTest, AddEncodedTrailers) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: add-trailers-filter typed_config: "@type": type.googleapis.com/google.protobuf.Empty @@ -552,8 +550,8 @@ TEST_P(DownstreamProtocolIntegrationTest, DownstreamRequestWithFaultyFilter) { autonomous_upstream_ = true; } useAccessLog("%RESPONSE_CODE_DETAILS%"); - config_helper_.addFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -604,8 +602,8 @@ TEST_P(DownstreamProtocolIntegrationTest, FaultyFilterWithConnect) { old_listener->set_name("http_forward"); }); useAccessLog("%RESPONSE_CODE_DETAILS%"); - config_helper_.addFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -625,8 +623,8 @@ TEST_P(DownstreamProtocolIntegrationTest, FaultyFilterWithConnect) { TEST_P(DownstreamProtocolIntegrationTest, MissingHeadersLocalReply) { useAccessLog("%RESPONSE_CODE_DETAILS%"); - config_helper_.addFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -646,8 +644,8 @@ TEST_P(DownstreamProtocolIntegrationTest, MissingHeadersLocalReply) { TEST_P(DownstreamProtocolIntegrationTest, MissingHeadersLocalReplyWithBody) { useAccessLog("%RESPONSE_CODE_DETAILS%"); - config_helper_.addFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -1180,8 +1178,8 @@ TEST_P(ProtocolIntegrationTest, RetryHittingRouteLimits) { // Test hitting the decoder buffer filter with too many request bytes to buffer. Ensure the // connection manager sends a 413. TEST_P(DownstreamProtocolIntegrationTest, HittingDecoderFilterLimit) { - config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); config_helper_.setBufferLimits(1024, 1024); initialize(); @@ -1223,8 +1221,8 @@ TEST_P(ProtocolIntegrationTest, HittingEncoderFilterLimit) { }); useAccessLog(); - config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); config_helper_.setBufferLimits(1024, 1024); initialize(); @@ -1863,7 +1861,7 @@ TEST_P(DownstreamProtocolIntegrationTest, MultipleContentLengthsAllowed) { } TEST_P(DownstreamProtocolIntegrationTest, LocalReplyDuringEncoding) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: local-reply-during-encode )EOF"); initialize(); @@ -1887,7 +1885,7 @@ name: local-reply-during-encode } TEST_P(DownstreamProtocolIntegrationTest, LocalReplyDuringEncodingData) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: local-reply-during-encode-data )EOF"); initialize(); @@ -2125,13 +2123,13 @@ TEST_P(ProtocolIntegrationTest, LargeRequestMethod) { // Tests StopAllIterationAndBuffer. Verifies decode-headers-return-stop-all-filter calls decodeData // once after iteration is resumed. TEST_P(DownstreamProtocolIntegrationTest, TestDecodeHeadersReturnsStopAll) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: call-decodedata-once-filter )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: decode-headers-return-stop-all-filter )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: passthrough-filter )EOF"); @@ -2176,10 +2174,10 @@ name: passthrough-filter // Tests StopAllIterationAndWatermark. decode-headers-return-stop-all-watermark-filter sets buffer // limit to 100. Verifies data pause when limit is reached, and resume after iteration continues. TEST_P(DownstreamProtocolIntegrationTest, TestDecodeHeadersReturnsStopAllWatermark) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: decode-headers-return-stop-all-filter )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: passthrough-filter )EOF"); @@ -2234,13 +2232,13 @@ name: passthrough-filter // Test two filters that return StopAllIterationAndBuffer back-to-back. TEST_P(DownstreamProtocolIntegrationTest, TestTwoFiltersDecodeHeadersReturnsStopAll) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: decode-headers-return-stop-all-filter )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: decode-headers-return-stop-all-filter )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: passthrough-filter )EOF"); @@ -2282,7 +2280,7 @@ name: passthrough-filter // Tests encodeHeaders() returns StopAllIterationAndBuffer. TEST_P(DownstreamProtocolIntegrationTest, TestEncodeHeadersReturnsStopAll) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: encode-headers-return-stop-all-filter )EOF"); config_helper_.addConfigModifier( @@ -2316,7 +2314,7 @@ name: encode-headers-return-stop-all-filter // Tests encodeHeaders() returns StopAllIterationAndWatermark. TEST_P(DownstreamProtocolIntegrationTest, TestEncodeHeadersReturnsStopAllWatermark) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: encode-headers-return-stop-all-filter )EOF"); config_helper_.addConfigModifier( @@ -2386,8 +2384,8 @@ TEST_P(ProtocolIntegrationTest, MultipleCookiesAndSetCookies) { // Test that delay closed connections are eventually force closed when the timeout triggers. TEST_P(DownstreamProtocolIntegrationTest, TestDelayedConnectionTeardownTimeoutTrigger) { - config_helper_.addFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " - "type.googleapis.com/google.protobuf.Empty } }"); + config_helper_.prependFilter("{ name: encoder-decoder-buffer-filter, typed_config: { \"@type\": " + "type.googleapis.com/google.protobuf.Empty } }"); config_helper_.setBufferLimits(1024, 1024); config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& @@ -2730,7 +2728,7 @@ TEST_P(DownstreamProtocolIntegrationTest, HeaderNormalizationRejection) { // Tests a filter that returns a FilterHeadersStatus::Continue after a local reply without // processing new metadata generated in decodeHeader TEST_P(DownstreamProtocolIntegrationTest, LocalReplyWithMetadata) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: local-reply-with-metadata-filter typed_config: "@type": type.googleapis.com/google.protobuf.Empty @@ -2807,7 +2805,7 @@ name: remove-response-headers-filter )EOF"; TEST_P(ProtocolIntegrationTest, HeadersOnlyRequestWithRemoveResponseHeadersFilter) { - config_helper_.addFilter(remove_response_headers_filter); + config_helper_.prependFilter(remove_response_headers_filter); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -2824,7 +2822,7 @@ TEST_P(ProtocolIntegrationTest, HeadersOnlyRequestWithRemoveResponseHeadersFilte } TEST_P(ProtocolIntegrationTest, RemoveResponseHeadersFilter) { - config_helper_.addFilter(remove_response_headers_filter); + config_helper_.prependFilter(remove_response_headers_filter); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -2866,14 +2864,14 @@ TEST_P(ProtocolIntegrationTest, ReqRespSizeStats) { // filter chain is aborted and 500 is sent to the client. TEST_P(ProtocolIntegrationTest, OverflowEncoderBufferFromEncodeHeaders) { config_helper_.setBufferLimits(64 * 1024, 64 * 1024); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: add-body-filter typed_config: "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig where_to_add_body: ENCODE_HEADERS body_size: 70000 )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: crash-filter typed_config: "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig @@ -2898,10 +2896,10 @@ TEST_P(ProtocolIntegrationTest, OverflowEncoderBufferFromEncodeDataWithResponseH config_helper_.setBufferLimits(64 * 1024, 64 * 1024); // Buffer filter will stop iteration from encodeHeaders preventing response headers from being // sent downstream. - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: encoder-decoder-buffer-filter )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: crash-filter typed_config: "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig @@ -2930,7 +2928,7 @@ TEST_P(ProtocolIntegrationTest, OverflowEncoderBufferFromEncodeDataWithResponseH TEST_P(ProtocolIntegrationTest, OverflowEncoderBufferFromEncodeData) { config_helper_.setBufferLimits(64 * 1024, 64 * 1024); // Make the add-body-filter stop iteration from encodeData. Headers should be sent to the client. - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: add-body-filter typed_config: "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig @@ -2938,7 +2936,7 @@ TEST_P(ProtocolIntegrationTest, OverflowEncoderBufferFromEncodeData) { where_to_stop_and_buffer: ENCODE_DATA body_size: 16384 )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: crash-filter typed_config: "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig @@ -2965,13 +2963,13 @@ TEST_P(ProtocolIntegrationTest, OverflowEncoderBufferFromEncodeData) { // filter chain is aborted and 413 is sent to the client. TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeHeaders) { config_helper_.setBufferLimits(64 * 1024, 64 * 1024); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: crash-filter typed_config: "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig crash_in_decode_headers: true )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: add-body-filter typed_config: "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig @@ -2991,7 +2989,7 @@ TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeHeaders // filter chain is aborted and 413 is sent to the client. TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeData) { config_helper_.setBufferLimits(64 * 1024, 64 * 1024); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: crash-filter typed_config: "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig @@ -2999,7 +2997,7 @@ TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeData) { crash_in_decode_data: true )EOF"); // Buffer filter causes filter manager to buffer data - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: encoder-decoder-buffer-filter )EOF"); initialize(); @@ -3027,21 +3025,21 @@ TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeData) { // manager's internal state is slightly different. TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeDataContinueIteration) { config_helper_.setBufferLimits(64 * 1024, 64 * 1024); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: crash-filter typed_config: "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig crash_in_decode_headers: false crash_in_decode_data: true )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: add-body-filter typed_config: "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig where_to_add_body: DECODE_DATA body_size: 70000 )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: encoder-decoder-buffer-filter )EOF"); initialize(); @@ -3073,7 +3071,7 @@ TEST_P(DownstreamProtocolIntegrationTest, return; } config_helper_.setBufferLimits(64 * 1024, 64 * 1024); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: add-body-filter typed_config: "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig @@ -3112,7 +3110,7 @@ TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeTrailer return; } config_helper_.setBufferLimits(64 * 1024, 64 * 1024); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: crash-filter typed_config: "@type": type.googleapis.com/test.integration.filters.CrashFilterConfig @@ -3120,7 +3118,7 @@ TEST_P(DownstreamProtocolIntegrationTest, OverflowDecoderBufferFromDecodeTrailer crash_in_decode_data: true crash_in_decode_trailers: true )EOF"); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: add-body-filter typed_config: "@type": type.googleapis.com/test.integration.filters.AddBodyFilterConfig diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index ff48c8922b704..6c9f983438f28 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -535,7 +535,7 @@ TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithResponseBody) { config_helper_.addConfigModifier( [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) { hcm.set_via("via_value"); }); - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: pause-filter typed_config: "@type": type.googleapis.com/google.protobuf.Empty diff --git a/test/integration/scoped_rds_integration_test.cc b/test/integration/scoped_rds_integration_test.cc index c1a96010e6c57..b76a18cda3b37 100644 --- a/test/integration/scoped_rds_integration_test.cc +++ b/test/integration/scoped_rds_integration_test.cc @@ -648,7 +648,7 @@ route_configuration_name: foo_route1 // Test that a scoped route config update is performed on demand and http request will succeed. TEST_P(ScopedRdsIntegrationTest, OnDemandUpdateSuccess) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: envoy.filters.http.on_demand )EOF"); const std::string scope_route1 = R"EOF( @@ -700,7 +700,7 @@ on_demand: true // With on demand update filter configured, scope not match should still return 404 TEST_P(ScopedRdsIntegrationTest, OnDemandUpdateScopeNotMatch) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: envoy.filters.http.on_demand )EOF"); @@ -750,7 +750,7 @@ route_configuration_name: {} // return 404 TEST_P(ScopedRdsIntegrationTest, OnDemandUpdatePrimaryVirtualHostNotMatch) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: envoy.filters.http.on_demand )EOF"); @@ -800,7 +800,7 @@ route_configuration_name: {} // return 404 TEST_P(ScopedRdsIntegrationTest, OnDemandUpdateVirtualHostNotMatch) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: envoy.filters.http.on_demand )EOF"); @@ -854,7 +854,7 @@ on_demand: true // Eager and lazy scopes share the same route configuration TEST_P(ScopedRdsIntegrationTest, DifferentPriorityScopeShareRoute) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: envoy.filters.http.on_demand )EOF"); @@ -910,7 +910,7 @@ on_demand: true } TEST_P(ScopedRdsIntegrationTest, OnDemandUpdateAfterActiveStreamDestroyed) { - config_helper_.addFilter(R"EOF( + config_helper_.prependFilter(R"EOF( name: envoy.filters.http.on_demand )EOF"); const std::string scope_route1 = R"EOF( diff --git a/test/integration/sds_generic_secret_integration_test.cc b/test/integration/sds_generic_secret_integration_test.cc index 5d9c64c921b2b..629213397112a 100644 --- a/test/integration/sds_generic_secret_integration_test.cc +++ b/test/integration/sds_generic_secret_integration_test.cc @@ -101,7 +101,7 @@ class SdsGenericSecretIntegrationTest : public Grpc::GrpcClientIntegrationParamT ConfigHelper::setHttp2(*sds_cluster); }); - config_helper_.addFilter("{ name: sds-generic-secret-test }"); + config_helper_.prependFilter("{ name: sds-generic-secret-test }"); create_xds_upstream_ = true; HttpIntegrationTest::initialize(); diff --git a/test/integration/version_integration_test.cc b/test/integration/version_integration_test.cc index 2ee09525e7519..a4362b80e5595 100644 --- a/test/integration/version_integration_test.cc +++ b/test/integration/version_integration_test.cc @@ -26,25 +26,25 @@ const char ExampleIpTaggingConfig[] = R"EOF( // envoy.filters.http.ip_tagging from v3 TypedStruct config. TEST_P(VersionIntegrationTest, IpTaggingV3StaticTypedStructConfig) { - config_helper_.addFilter(absl::StrCat(R"EOF( + config_helper_.prependFilter(absl::StrCat(R"EOF( name: ip_tagging typed_config: "@type": type.googleapis.com/udpa.type.v1.TypedStruct type_url: type.googleapis.com/envoy.extensions.filters.http.ip_tagging.v3.IPTagging value: )EOF", - ExampleIpTaggingConfig)); + ExampleIpTaggingConfig)); initialize(); } // envoy.filters.http.ip_tagging from v3 typed Any config. TEST_P(VersionIntegrationTest, IpTaggingV3StaticTypedConfig) { - config_helper_.addFilter(absl::StrCat(R"EOF( + config_helper_.prependFilter(absl::StrCat(R"EOF( name: ip_tagging typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.ip_tagging.v3.IPTagging )EOF", - ExampleIpTaggingConfig)); + ExampleIpTaggingConfig)); initialize(); } diff --git a/test/integration/websocket_integration_test.cc b/test/integration/websocket_integration_test.cc index c7159200ab9e0..1ac084679cd4f 100644 --- a/test/integration/websocket_integration_test.cc +++ b/test/integration/websocket_integration_test.cc @@ -350,7 +350,7 @@ TEST_P(WebsocketIntegrationTest, WebsocketCustomFilterChain) { // Add a small buffer filter to the standard HTTP filter chain. Websocket // upgrades will use the HTTP filter chain so will also have small buffers. - config_helper_.addFilter(ConfigHelper::smallBufferFilter()); + config_helper_.prependFilter(ConfigHelper::smallBufferFilter()); // Add a second upgrade type which goes directly to the router filter. config_helper_.addConfigModifier( From cc5ef14e2e93012ca028502de1806837c09f6e53 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Mon, 13 Sep 2021 22:27:40 +0700 Subject: [PATCH 023/121] ext_authz: Use 403 as default for denied response (#18010) Before this, when a gRPC server sends out DeniedResponse as a check response for a request but without setting the HttpResponse.DeniedResponse.Status, HTTP ext_authz filter translates that as "0" (empty/unknown HTTP status code). This patch makes sure we reply with a valid 403 Forbidden HTTP status code (the current default status code for denied response). Signed-off-by: Dhi Aurrahman Signed-off-by: gayang --- api/envoy/service/auth/v3/external_auth.proto | 10 +++--- docs/root/version_history/current.rst | 3 +- .../envoy/service/auth/v3/external_auth.proto | 10 +++--- .../common/ext_authz/ext_authz_grpc_impl.cc | 12 ++++--- .../ext_authz/ext_authz_grpc_impl_test.cc | 33 +++++++++++++++++++ 5 files changed, 55 insertions(+), 13 deletions(-) diff --git a/api/envoy/service/auth/v3/external_auth.proto b/api/envoy/service/auth/v3/external_auth.proto index b627fcb314751..31adbc161b881 100644 --- a/api/envoy/service/auth/v3/external_auth.proto +++ b/api/envoy/service/auth/v3/external_auth.proto @@ -46,9 +46,9 @@ message DeniedHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.DeniedHttpResponse"; - // This field allows the authorization service to send a HTTP response status - // code to the downstream client other than 403 (Forbidden). - type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; + // This field allows the authorization service to send an HTTP response status code to the + // downstream client. If not set, Envoy sends ``403 Forbidden`` HTTP status code by default. + type.v3.HttpStatus status = 1; // This field allows the authorization service to send HTTP response headers // to the downstream client. Note that the :ref:`append field in HeaderValueOption ` defaults to @@ -110,7 +110,9 @@ message CheckResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.CheckResponse"; - // Status `OK` allows the request. Any other status indicates the request should be denied. + // Status `OK` allows the request. Any other status indicates the request should be denied, and + // for HTTP filter, if not overridden by :ref:`denied HTTP response status ` + // Envoy sends ``403 Forbidden`` HTTP status code by default. google.rpc.Status status = 1; // An message that contains HTTP response attributes. This message is diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index bdc1e5bb7e920..d340597c8366a 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -79,7 +79,8 @@ Bug Fixes * compressor: fix a bug where if trailers were added and a subsequent filter paused the filter chain, the request could be stalled. This behavior can be reverted by setting ``envoy.reloadable_features.fix_added_trailers`` to false. * dynamic forward proxy: fixing a validation bug where san and sni checks were not applied setting :ref:`http_protocol_options ` via :ref:`typed_extension_protocol_options `. * ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. -* ext_authz: the network ext_authz filter now correctly sets dynamic metdata returned by the authorization service for non-OK responses. This behavior now matches the http ext_authz filter. +* ext_authz: fix the HTTP ext_authz filter to respond with ``403 Forbidden`` when a gRPC auth server sends a denied check response with an empty HTTP status code. +* ext_authz: the network ext_authz filter now correctly sets dynamic metadata returned by the authorization service for non-OK responses. This behavior now matches the http ext_authz filter. * hcm: remove deprecation for :ref:`xff_num_trusted_hops ` and forbid mixing ip detection extensions with old related knobs. * http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. * listener: fixed an issue on Windows where connections are not handled by all worker threads. diff --git a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto index b627fcb314751..31adbc161b881 100644 --- a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto +++ b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto @@ -46,9 +46,9 @@ message DeniedHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.DeniedHttpResponse"; - // This field allows the authorization service to send a HTTP response status - // code to the downstream client other than 403 (Forbidden). - type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; + // This field allows the authorization service to send an HTTP response status code to the + // downstream client. If not set, Envoy sends ``403 Forbidden`` HTTP status code by default. + type.v3.HttpStatus status = 1; // This field allows the authorization service to send HTTP response headers // to the downstream client. Note that the :ref:`append field in HeaderValueOption ` defaults to @@ -110,7 +110,9 @@ message CheckResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.CheckResponse"; - // Status `OK` allows the request. Any other status indicates the request should be denied. + // Status `OK` allows the request. Any other status indicates the request should be denied, and + // for HTTP filter, if not overridden by :ref:`denied HTTP response status ` + // Envoy sends ``403 Forbidden`` HTTP status code by default. google.rpc.Status status = 1; // An message that contains HTTP response attributes. This message is diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index d462a5179572b..a1155ac0528dd 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -67,13 +67,17 @@ void GrpcClientImpl::onSuccess(std::unique_ptrstatus = CheckStatus::Denied; + + // The default HTTP status code for denied response is 403 Forbidden. + authz_response->status_code = Http::Code::Forbidden; if (response->has_denied_response()) { toAuthzResponseHeader(authz_response, response->denied_response().headers()); - authz_response->status_code = - static_cast(response->denied_response().status().code()); + + const uint32_t status_code = response->denied_response().status().code(); + if (status_code > 0) { + authz_response->status_code = static_cast(status_code); + } authz_response->body = response->denied_response().body(); - } else { - authz_response->status_code = Http::Code::Forbidden; } } diff --git a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc index 8d776a73b061f..ca45b70ca26b3 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_grpc_impl_test.cc @@ -205,6 +205,39 @@ TEST_F(ExtAuthzGrpcClientTest, AuthorizationDeniedWithAllAttributes) { client_->onSuccess(std::move(check_response), span_); } +// Test the client when a denied response with unknown HTTP status code (i.e. if +// DeniedResponse.status is not set by the auth server implementation). The response sent to client +// is set with the default HTTP status code for denied response (403 Forbidden). +TEST_F(ExtAuthzGrpcClientTest, AuthorizationDeniedWithEmptyDeniedResponseStatus) { + initialize(); + + const std::string expected_body{"test"}; + const auto expected_headers = + TestCommon::makeHeaderValueOption({{"foo", "bar", false}, {"foobar", "bar", true}}); + const auto expected_downstream_headers = TestCommon::makeHeaderValueOption({}); + auto check_response = TestCommon::makeCheckResponse( + Grpc::Status::WellKnownGrpcStatus::PermissionDenied, envoy::type::v3::Empty, expected_body, + expected_headers, expected_downstream_headers); + // When the check response gives unknown denied response HTTP status code, the filter sets the + // response HTTP status code with 403 Forbidden (default). + auto authz_response = + TestCommon::makeAuthzResponse(CheckStatus::Denied, Http::Code::Forbidden, expected_body, + expected_headers, expected_downstream_headers); + + envoy::service::auth::v3::CheckRequest request; + expectCallSend(request); + client_->check(request_callbacks_, request, Tracing::NullSpan::instance(), stream_info_); + + Http::TestRequestHeaderMapImpl headers; + client_->onCreateInitialMetadata(headers); + EXPECT_EQ(nullptr, headers.RequestId()); + EXPECT_CALL(span_, setTag(Eq("ext_authz_status"), Eq("ext_authz_unauthorized"))); + EXPECT_CALL(request_callbacks_, + onComplete_(WhenDynamicCastTo(AuthzDeniedResponse(authz_response)))); + + client_->onSuccess(std::move(check_response), span_); +} + // Test the client when an unknown error occurs. TEST_F(ExtAuthzGrpcClientTest, UnknownError) { initialize(); From bcb4869a4532b25117c261e5da552bf5a976bb82 Mon Sep 17 00:00:00 2001 From: David Schinazi Date: Mon, 13 Sep 2021 08:41:28 -0700 Subject: [PATCH 024/121] Update QUICHE to e8ddc3873182355137862b4d6417add2b2b8a31d (#18060) Signed-off-by: David Schinazi Signed-off-by: gayang --- bazel/external/quiche.BUILD | 1 + bazel/repository_locations.bzl | 7 +++---- source/common/quic/envoy_quic_proof_source.cc | 5 ++++- source/common/quic/envoy_quic_proof_source.h | 3 ++- .../quic/envoy_quic_server_connection.cc | 3 ++- test/common/quic/envoy_quic_alarm_test.cc | 2 +- .../quic/envoy_quic_proof_source_test.cc | 21 +++++++++++++------ test/common/quic/test_proof_source.h | 5 +++-- 8 files changed, 31 insertions(+), 16 deletions(-) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index f8a1079ac93cd..662ae2cd1ef88 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -1266,6 +1266,7 @@ envoy_cc_library( visibility = ["//visibility:public"], deps = [ ":quic_core_arena_scoped_ptr_lib", + ":quic_core_connection_context_lib", ":quic_core_time_lib", ], ) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index e6aa827110114..b01a14f881ba1 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -822,13 +822,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://github.com/google/quiche", - version = "8d5eb27ee2e3f009f7180e8ace0ff97830d9c3e9", - sha256 = "88cc71556b96bbec953a716a12c26f88b8af4d5e9a83cf3ec38aba4caed6bf52", - # Static snapshot of https://quiche.googlesource.com/quiche/+archive/{version}.tar.gz + version = "e8ddc3873182355137862b4d6417add2b2b8a31d", + sha256 = "f1d17b033a9e7449ef84f0c7392319061981439fa15c5be3007c4dea4b58ebc3", urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"], strip_prefix = "quiche-{version}", use_category = ["dataplane_core"], - release_date = "2021-08-31", + release_date = "2021-09-09", cpe = "N/A", ), com_googlesource_googleurl = dict( diff --git a/source/common/quic/envoy_quic_proof_source.cc b/source/common/quic/envoy_quic_proof_source.cc index 67d9e0ce3ce55..d755be071e9f5 100644 --- a/source/common/quic/envoy_quic_proof_source.cc +++ b/source/common/quic/envoy_quic_proof_source.cc @@ -16,7 +16,10 @@ namespace Quic { quic::QuicReferenceCountedPointer EnvoyQuicProofSource::GetCertChain(const quic::QuicSocketAddress& server_address, const quic::QuicSocketAddress& client_address, - const std::string& hostname) { + const std::string& hostname, bool* cert_matched_sni) { + // TODO(DavidSchinazi) parse the certificate to correctly fill in |cert_matched_sni|. + *cert_matched_sni = false; + CertConfigWithFilterChain res = getTlsCertConfigAndFilterChain(server_address, client_address, hostname); absl::optional> cert_config_ref = diff --git a/source/common/quic/envoy_quic_proof_source.h b/source/common/quic/envoy_quic_proof_source.h index fcf388c609140..69d62fd549184 100644 --- a/source/common/quic/envoy_quic_proof_source.h +++ b/source/common/quic/envoy_quic_proof_source.h @@ -22,7 +22,8 @@ class EnvoyQuicProofSource : public EnvoyQuicProofSourceBase { // quic::ProofSource quic::QuicReferenceCountedPointer GetCertChain(const quic::QuicSocketAddress& server_address, - const quic::QuicSocketAddress& client_address, const std::string& hostname) override; + const quic::QuicSocketAddress& client_address, const std::string& hostname, + bool* cert_matched_sni) override; protected: // quic::ProofSource diff --git a/source/common/quic/envoy_quic_server_connection.cc b/source/common/quic/envoy_quic_server_connection.cc index 963032e041f00..6a4d691009361 100644 --- a/source/common/quic/envoy_quic_server_connection.cc +++ b/source/common/quic/envoy_quic_server_connection.cc @@ -38,7 +38,8 @@ bool EnvoyQuicServerConnection::OnPacketHeader(const quic::QuicPacketHeader& hea std::unique_ptr EnvoyQuicServerConnection::MakeSelfIssuedConnectionIdManager() { return std::make_unique( - quic::kMinNumOfActiveConnectionIds, connection_id(), clock(), alarm_factory(), this); + quic::kMinNumOfActiveConnectionIds, connection_id(), clock(), alarm_factory(), this, + context()); } quic::QuicConnectionId EnvoyQuicSelfIssuedConnectionIdManager::GenerateNewConnectionId( diff --git a/test/common/quic/envoy_quic_alarm_test.cc b/test/common/quic/envoy_quic_alarm_test.cc index f90f8eb2a4e69..529902376d843 100644 --- a/test/common/quic/envoy_quic_alarm_test.cc +++ b/test/common/quic/envoy_quic_alarm_test.cc @@ -14,7 +14,7 @@ using quic::QuicTime; namespace Envoy { namespace Quic { -class TestDelegate : public quic::QuicAlarm::Delegate { +class TestDelegate : public quic::QuicAlarm::DelegateWithoutContext { public: TestDelegate() = default; diff --git a/test/common/quic/envoy_quic_proof_source_test.cc b/test/common/quic/envoy_quic_proof_source_test.cc index 75230db0f504f..a2deedcb29d16 100644 --- a/test/common/quic/envoy_quic_proof_source_test.cc +++ b/test/common/quic/envoy_quic_proof_source_test.cc @@ -193,8 +193,9 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { TEST_F(EnvoyQuicProofSourceTest, TestGetCerChainAndSignatureAndVerify) { expectCertChainAndPrivateKey(expected_certs_, true); + bool cert_matched_sni; quic::QuicReferenceCountedPointer chain = - proof_source_.GetCertChain(server_address_, client_address_, hostname_); + proof_source_.GetCertChain(server_address_, client_address_, hostname_, &cert_matched_sni); EXPECT_EQ(2, chain->certs.size()); std::string error_details; @@ -216,7 +217,9 @@ TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailBadConfig) { EXPECT_CALL(listen_socket_, ioHandle()).Times(3); EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) .WillOnce(Invoke([&](const Network::ConnectionSocket&) { return nullptr; })); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + bool cert_matched_sni; + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); // Cert not ready. EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) @@ -224,7 +227,8 @@ TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailBadConfig) { EXPECT_CALL(filter_chain_, transportSocketFactory()) .WillOnce(ReturnRef(*transport_socket_factory_)); EXPECT_CALL(*mock_context_config_, isReady()).WillOnce(Return(false)); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); // No certs in config. EXPECT_CALL(filter_chain_manager_, findFilterChain(_)) @@ -242,7 +246,8 @@ TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailBadConfig) { EXPECT_CALL(*mock_context_config_, isReady()).WillOnce(Return(true)); std::vector> tls_cert_configs{}; EXPECT_CALL(*mock_context_config_, tlsCertificates()).WillOnce(Return(tls_cert_configs)); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); } TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailInvalidCert) { @@ -250,7 +255,9 @@ TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailInvalidCert) { invalid certificate -----END CERTIFICATE-----)"}; expectCertChainAndPrivateKey(invalid_cert, false); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + bool cert_matched_sni; + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); } TEST_F(EnvoyQuicProofSourceTest, GetCertChainFailInvalidPublicKeyInCert) { @@ -275,7 +282,9 @@ x96rVeUbRJ/qU4//nNM/XQa9vIAIcTZ0jFhmb0c3R4rmoqqC3vkSDwtaE5yuS5T4 GUy+n0vQNB0cXGzgcGI= -----END CERTIFICATE-----)"}; expectCertChainAndPrivateKey(cert_with_rsa_1024, false); - EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_)); + bool cert_matched_sni; + EXPECT_EQ(nullptr, proof_source_.GetCertChain(server_address_, client_address_, hostname_, + &cert_matched_sni)); } TEST_F(EnvoyQuicProofSourceTest, ComputeSignatureFailNoFilterChain) { diff --git a/test/common/quic/test_proof_source.h b/test/common/quic/test_proof_source.h index b4a8a8348223b..434c15fecada5 100644 --- a/test/common/quic/test_proof_source.h +++ b/test/common/quic/test_proof_source.h @@ -27,8 +27,9 @@ class TestProofSource : public EnvoyQuicProofSourceBase { public: quic::QuicReferenceCountedPointer GetCertChain(const quic::QuicSocketAddress& /*server_address*/, - const quic::QuicSocketAddress& /*client_address*/, - const std::string& /*hostname*/) override { + const quic::QuicSocketAddress& /*client_address*/, const std::string& /*hostname*/, + bool* cert_matched_sni) override { + *cert_matched_sni = true; return cert_chain_; } From 5ba8363c894feabcc17c4e2e857d37bb572d396f Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Mon, 13 Sep 2021 09:14:42 -0700 Subject: [PATCH 025/121] stats: comments clean up (#18077) Signed-off-by: Yuchen Dai Signed-off-by: gayang --- envoy/stats/symbol_table.h | 4 +-- source/common/stats/allocator_impl.cc | 4 +-- source/common/stats/thread_local_store.cc | 33 ++--------------------- source/common/stats/utility.cc | 5 ---- source/common/stats/utility.h | 16 ----------- 5 files changed, 6 insertions(+), 56 deletions(-) diff --git a/envoy/stats/symbol_table.h b/envoy/stats/symbol_table.h index e7f171b6f8c91..e7fcb3526b487 100644 --- a/envoy/stats/symbol_table.h +++ b/envoy/stats/symbol_table.h @@ -56,7 +56,7 @@ class SymbolTable { * into the SymbolTable, which will not be optimal, but in practice appears * to be pretty good. * - * This is exposed in the interface for the benefit of join(), which which is + * This is exposed in the interface for the benefit of join(), which is * used in the hot-path to append two stat-names into a temp without taking * locks. This is used then in thread-local cache lookup, so that once warm, * no locks are taken when looking up stats. @@ -128,7 +128,7 @@ class SymbolTable { * * @param names A pointer to the first name in an array, allocated by the caller. * @param num_names The number of names. - * @param symbol_table The symbol table in which to encode the names. + * @param list The StatNameList representing the stat names. */ virtual void populateList(const StatName* names, uint32_t num_names, StatNameList& list) PURE; diff --git a/source/common/stats/allocator_impl.cc b/source/common/stats/allocator_impl.cc index 4464f41a344e6..9e8a37705e4d2 100644 --- a/source/common/stats/allocator_impl.cc +++ b/source/common/stats/allocator_impl.cc @@ -65,8 +65,8 @@ void AllocatorImpl::debugPrint() { // which we need in order to clean up the counter and gauge maps in that class // when they are destroyed. // -// We implement the RefcountInterface API, using 16 bits that would otherwise be -// wasted in the alignment padding next to flags_. +// We implement the RefcountInterface API to avoid weak counter and destructor overhead in +// shared_ptr. template class StatsSharedImpl : public MetricImpl { public: StatsSharedImpl(StatName name, AllocatorImpl& alloc, StatName tag_extracted_name, diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 0e88476e5e47f..2b79daa7ee86b 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -539,14 +539,6 @@ Counter& ThreadLocalStoreImpl::ScopeImpl::counterFromStatNameWithTags( } // Determine the final name based on the prefix and the passed name. - // - // Note that we can do map.find(final_name.c_str()), but we cannot do - // map[final_name.c_str()] as the char*-keyed maps would then save the pointer - // to a temporary, and address sanitization errors would follow. Instead we - // must do a find() first, using the value if it succeeds. If it fails, then - // after we construct the stat we can insert it into the required maps. This - // strategy costs an extra hash lookup for each miss, but saves time - // re-copying the string and significant memory overhead. TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable()); Stats::StatName final_stat_name = joiner.nameWithTags(); @@ -600,12 +592,6 @@ Gauge& ThreadLocalStoreImpl::ScopeImpl::gaugeFromStatNameWithTags( // See comments in counter(). There is no super clean way (via templates or otherwise) to // share this code so I'm leaving it largely duplicated for now. - // - // Note that we can do map.find(final_name.c_str()), but we cannot do - // map[final_name.c_str()] as the char*-keyed maps would then save the pointer to - // a temporary, and address sanitization errors would follow. Instead we must - // do a find() first, using that if it succeeds. If it fails, then after we - // construct the stat we can insert it into the required maps. TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable()); StatName final_stat_name = joiner.nameWithTags(); @@ -617,7 +603,7 @@ Gauge& ThreadLocalStoreImpl::ScopeImpl::gaugeFromStatNameWithTags( StatRefMap* tls_cache = nullptr; StatNameHashSet* tls_rejected_stats = nullptr; if (!parent_.shutting_down_ && parent_.tls_cache_) { - TlsCacheEntry& entry = parent_.tlsCache().scope_cache_[this->scope_id_]; + TlsCacheEntry& entry = parent_.tlsCache().insertScope(this->scope_id_); tls_cache = &entry.gauges_; tls_rejected_stats = &entry.rejected_stats_; } @@ -642,13 +628,6 @@ Histogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatNameWithTags( // See comments in counter(). There is no super clean way (via templates or otherwise) to // share this code so I'm leaving it largely duplicated for now. - // - // Note that we can do map.find(final_name.c_str()), but we cannot do - // map[final_name.c_str()] as the char*-keyed maps would then save the pointer to - // a temporary, and address sanitization errors would follow. Instead we must - // do a find() first, using that if it succeeds. If it fails, then after we - // construct the stat we can insert it into the required maps. - TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable()); StatName final_stat_name = joiner.nameWithTags(); @@ -660,7 +639,7 @@ Histogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatNameWithTags( StatNameHashMap* tls_cache = nullptr; StatNameHashSet* tls_rejected_stats = nullptr; if (!parent_.shutting_down_ && parent_.tls_cache_) { - TlsCacheEntry& entry = parent_.tlsCache().scope_cache_[this->scope_id_]; + TlsCacheEntry& entry = parent_.tlsCache().insertScope(this->scope_id_); tls_cache = &entry.parent_histograms_; auto iter = tls_cache->find(final_stat_name); if (iter != tls_cache->end()) { @@ -720,14 +699,6 @@ TextReadout& ThreadLocalStoreImpl::ScopeImpl::textReadoutFromStatNameWithTags( } // Determine the final name based on the prefix and the passed name. - // - // Note that we can do map.find(final_name.c_str()), but we cannot do - // map[final_name.c_str()] as the char*-keyed maps would then save the pointer - // to a temporary, and address sanitization errors would follow. Instead we - // must do a find() first, using the value if it succeeds. If it fails, then - // after we construct the stat we can insert it into the required maps. This - // strategy costs an extra hash lookup for each miss, but saves time - // re-copying the string and significant memory overhead. TagUtility::TagStatNameJoiner joiner(prefix_.statName(), name, stat_name_tags, symbolTable()); Stats::StatName final_stat_name = joiner.nameWithTags(); diff --git a/source/common/stats/utility.cc b/source/common/stats/utility.cc index a4e5574713e23..69fe60d516ba1 100644 --- a/source/common/stats/utility.cc +++ b/source/common/stats/utility.cc @@ -68,11 +68,6 @@ struct ElementVisitor { namespace Utility { -ScopePtr scopeFromElements(Scope& scope, const ElementVec& elements) { - ElementVisitor visitor(scope.symbolTable(), elements); - return scope.scopeFromStatName(visitor.statName()); -} - ScopePtr scopeFromStatNames(Scope& scope, const StatNameVec& elements) { SymbolTable::StoragePtr joined = scope.symbolTable().join(elements); return scope.scopeFromStatName(StatName(joined.get())); diff --git a/source/common/stats/utility.h b/source/common/stats/utility.h index 2ef817b61c171..5c067f628f103 100644 --- a/source/common/stats/utility.h +++ b/source/common/stats/utility.h @@ -63,22 +63,6 @@ std::string sanitizeStatsName(absl::string_view name); */ absl::optional findTag(const Metric& metric, StatName find_tag_name); -/** - * Creates a nested scope from a vector of tokens which are used to create the - * name. The tokens can be specified as DynamicName or StatName. For - * tokens specified as DynamicName, a dynamic StatName will be created. See - * https://github.com/envoyproxy/envoy/blob/main/source/docs/stats.md#dynamic-stat-tokens - * for more detail on why symbolic StatNames are preferred when possible. - * - * See also scopeFromStatNames, which is slightly faster but does not allow - * passing DynamicName(string)s as names. - * - * @param scope The scope in which to create the counter. - * @param elements The vector of mixed DynamicName and StatName - * @return A scope named using the joined elements. - */ -ScopePtr scopeFromElements(Scope& scope, const ElementVec& elements); - /** * Creates a nested scope from a vector of StatNames which are used to create the * name. From 0142d1e489383b9523b4205f39eaec35733a0952 Mon Sep 17 00:00:00 2001 From: phlax Date: Mon, 13 Sep 2021 18:45:51 +0100 Subject: [PATCH 026/121] dependabot: Updates (#18013) * build(deps): bump deprecated in /.github/actions/pr_notifier Bumps [deprecated](https://github.com/tantale/deprecated) from 1.2.12 to 1.2.13. - [Release notes](https://github.com/tantale/deprecated/releases) - [Changelog](https://github.com/tantale/deprecated/blob/master/CHANGELOG.rst) - [Commits](https://github.com/tantale/deprecated/compare/v1.2.12...v1.2.13) --- updated-dependencies: - dependency-name: deprecated dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: Ryan Northey * build(deps): bump deprecated from 1.2.12 to 1.2.13 in /tools/dependency Bumps [deprecated](https://github.com/tantale/deprecated) from 1.2.12 to 1.2.13. - [Release notes](https://github.com/tantale/deprecated/releases) - [Changelog](https://github.com/tantale/deprecated/blob/master/CHANGELOG.rst) - [Commits](https://github.com/tantale/deprecated/compare/v1.2.12...v1.2.13) --- updated-dependencies: - dependency-name: deprecated dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: Ryan Northey * build(deps): bump deprecated in /tools/deprecate_version Bumps [deprecated](https://github.com/tantale/deprecated) from 1.2.12 to 1.2.13. - [Release notes](https://github.com/tantale/deprecated/releases) - [Changelog](https://github.com/tantale/deprecated/blob/master/CHANGELOG.rst) - [Commits](https://github.com/tantale/deprecated/compare/v1.2.12...v1.2.13) --- updated-dependencies: - dependency-name: deprecated dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: Ryan Northey * build(deps): bump setuptools from 57.4.0 to 58.0.4 in /tools/base Bumps [setuptools](https://github.com/pypa/setuptools) from 57.4.0 to 58.0.4. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/CHANGES.rst) - [Commits](https://github.com/pypa/setuptools/compare/v57.4.0...v58.0.4) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: Ryan Northey Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: gayang --- .github/actions/pr_notifier/requirements.txt | 6 +- tools/base/requirements.txt | 106 +++++++++---------- tools/dependency/requirements.txt | 6 +- tools/deprecate_version/requirements.txt | 6 +- 4 files changed, 62 insertions(+), 62 deletions(-) diff --git a/.github/actions/pr_notifier/requirements.txt b/.github/actions/pr_notifier/requirements.txt index 2fa1aad74b299..ed12c964aeec4 100644 --- a/.github/actions/pr_notifier/requirements.txt +++ b/.github/actions/pr_notifier/requirements.txt @@ -63,9 +63,9 @@ chardet==4.0.0 \ --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 # via requests -deprecated==1.2.12 \ - --hash=sha256:08452d69b6b5bc66e8330adde0a4f8642e969b9e1702904d137eeb29c8ffc771 \ - --hash=sha256:6d2de2de7931a968874481ef30208fd4e08da39177d61d3d4ebdf4366e7dbca1 +deprecated==1.2.13 \ + --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d \ + --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d # via pygithub idna==2.10 \ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index 8d18996322e79..06e91962896a6 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -15,7 +15,7 @@ abstracts==0.0.12 \ aio.functional==0.0.9 \ --hash=sha256:824a997a394ad891bc9f403426babc13c9d0d1f4d1708c38e77d6aecae1cab1d # via - # -r tools/base/requirements.in + # -r requirements.in # aio.tasks # envoy.github.abstract # envoy.github.release @@ -162,12 +162,12 @@ charset-normalizer==2.0.4 \ colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 - # via -r tools/base/requirements.in + # via -r requirements.in coloredlogs==15.0.1 \ --hash=sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934 \ --hash=sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0 # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.base.runner coverage==5.5 \ --hash=sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c \ @@ -223,7 +223,7 @@ coverage==5.5 \ --hash=sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d \ --hash=sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6 # via - # -r tools/base/requirements.in + # -r requirements.in # pytest-cov cryptography==3.4.8 \ --hash=sha256:0a7dcbcd3f1913f664aca35d47c1331fce738d44ec34b7be8b9d332151b0b01e \ @@ -275,7 +275,7 @@ envoy.base.runner==0.0.4 \ envoy.base.utils==0.0.8 \ --hash=sha256:b82e18ab0535207b7136d6980239c9350f7113fa5da7dda781bcb6ad1e05b3ab # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.distribution.distrotest # envoy.github.release # envoy.gpg.sign @@ -284,10 +284,10 @@ envoy.distribution.distrotest==0.0.3 \ # via envoy.distribution.verify envoy.distribution.release==0.0.4 \ --hash=sha256:41037e0488f0593ce5173739fe0cd1b45a4775f5a47738b85d9d04024ca241a2 - # via -r tools/base/requirements.in + # via -r requirements.in envoy.distribution.verify==0.0.2 \ --hash=sha256:ae59134085de50203edf51c243dbf3301cbe5550db29f0ec6f9ea1c3b82fee1c - # via -r tools/base/requirements.in + # via -r requirements.in envoy.docker.utils==0.0.2 \ --hash=sha256:a12cb57f0b6e204d646cbf94f927b3a8f5a27ed15f60d0576176584ec16a4b76 # via envoy.distribution.distrotest @@ -304,23 +304,23 @@ envoy.gpg.identity==0.0.2 \ # via envoy.gpg.sign envoy.gpg.sign==0.0.3 \ --hash=sha256:31667931f5d7ff05fd809b89748f277511486311c777652af4cb8889bd641049 - # via -r tools/base/requirements.in -flake8-polyfill==1.0.2 \ - --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ - --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda - # via pep8-naming + # via -r requirements.in flake8==3.9.2 \ --hash=sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b \ --hash=sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907 # via - # -r tools/base/requirements.in + # -r requirements.in # flake8-polyfill # pep8-naming +flake8-polyfill==1.0.2 \ + --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ + --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda + # via pep8-naming frozendict==2.0.6 \ --hash=sha256:3f00de72805cf4c9e81b334f3f04809278b967d2fed84552313a0fcce511beb1 \ --hash=sha256:5d3f75832c35d4df041f0e19c268964cbef29c1eb34cd3517cf883f1c2d089b9 # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.base.runner gidgethub==5.0.1 \ --hash=sha256:3efbd6998600254ec7a2869318bd3ffde38edc3a0d37be0c14bc46b45947b682 \ @@ -335,7 +335,7 @@ gitdb==4.0.7 \ gitpython==3.1.18 \ --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b \ --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 - # via -r tools/base/requirements.in + # via -r requirements.in humanfriendly==9.2 \ --hash=sha256:332da98c24cc150efcc91b5508b19115209272bfdf4b0764a56795932f854271 \ --hash=sha256:f7dba53ac7935fd0b4a2fc9a29e316ddd9ea135fb3052d3d0279d10c18ff9c48 @@ -358,7 +358,7 @@ jinja2==3.0.1 \ --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 # via - # -r tools/base/requirements.in + # -r requirements.in # sphinx markupsafe==2.0.1 \ --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \ @@ -471,7 +471,7 @@ packaging==21.0 \ pep8-naming==0.12.1 \ --hash=sha256:4a8daeaeb33cfcde779309fc0c9c0a68a3bbe2ad8a8308b763c5068f86eb9f37 \ --hash=sha256:bb2455947757d162aa4cad55dba4ce029005cd1692f2899a21d51d8630ca7841 - # via -r tools/base/requirements.in + # via -r requirements.in pluggy==1.0.0 \ --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 @@ -495,7 +495,7 @@ pyflakes==2.3.1 \ pygithub==1.55 \ --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b - # via -r tools/base/requirements.in + # via -r requirements.in pygments==2.10.0 \ --hash=sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380 \ --hash=sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6 @@ -534,26 +534,26 @@ pyparsing==2.4.7 \ # via packaging pyreadline==2.1 \ --hash=sha256:4530592fc2e85b25b1a9f79664433da09237c1a270e4d78ea5aa3a2c7229e2d1 - # via -r tools/base/requirements.in + # via -r requirements.in +pytest==6.2.5 \ + --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 \ + --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 + # via + # -r requirements.in + # pytest-asyncio + # pytest-cov + # pytest-patches pytest-asyncio==0.15.1 \ --hash=sha256:2564ceb9612bbd560d19ca4b41347b54e7835c2f792c504f698e05395ed63f6f \ --hash=sha256:3042bcdf1c5d978f6b74d96a151c4cfb9dcece65006198389ccd7e6c60eb1eea - # via -r tools/base/requirements.in + # via -r requirements.in pytest-cov==2.12.1 \ --hash=sha256:261bb9e47e65bd099c89c3edf92972865210c36813f80ede5277dceb77a4a62a \ --hash=sha256:261ceeb8c227b726249b376b8526b600f38667ee314f910353fa318caa01f4d7 - # via -r tools/base/requirements.in + # via -r requirements.in pytest-patches==0.0.3 \ --hash=sha256:6f8cdc8641c708c4812f58ae48d410f373a6fd16cd6cc4dc4d3fb8951df9c92a - # via -r tools/base/requirements.in -pytest==6.2.5 \ - --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 \ - --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 - # via - # -r tools/base/requirements.in - # pytest-asyncio - # pytest-cov - # pytest-patches + # via -r requirements.in python-gnupg==0.4.7 \ --hash=sha256:2061f56b1942c29b92727bf9aecbd3cea3893acc9cccbdc7eb4604285efe4ac7 \ --hash=sha256:3ff5b1bf5e397de6e1fe41a7c0f403dad4e242ac92b345f440eaecfb72a7ebae @@ -593,7 +593,7 @@ pyyaml==5.4.1 \ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.base.utils requests==2.26.0 \ --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ @@ -615,28 +615,28 @@ snowballstemmer==2.1.0 \ --hash=sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2 \ --hash=sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914 # via sphinx -sphinx-copybutton==0.4.0 \ - --hash=sha256:4340d33c169dac6dd82dce2c83333412aa786a42dd01a81a8decac3b130dc8b0 \ - --hash=sha256:8daed13a87afd5013c3a9af3575cc4d5bec052075ccd3db243f895c07a689386 - # via -r tools/base/requirements.in -sphinx-rtd-theme==0.5.2 \ - --hash=sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a \ - --hash=sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f - # via -r tools/base/requirements.in -sphinx-tabs==3.2.0 \ - --hash=sha256:1e1b1846c80137bd81a78e4a69b02664b98b1e1da361beb30600b939dfc75065 \ - --hash=sha256:33137914ed9b276e6a686d7a337310ee77b1dae316fdcbce60476913a152e0a4 - # via -r tools/base/requirements.in sphinx==4.1.2 \ --hash=sha256:3092d929cd807926d846018f2ace47ba2f3b671b309c7a89cd3306e80c826b13 \ --hash=sha256:46d52c6cee13fec44744b8c01ed692c18a640f6910a725cbb938bc36e8d64544 # via - # -r tools/base/requirements.in + # -r requirements.in # sphinx-copybutton # sphinx-rtd-theme # sphinx-tabs # sphinxcontrib-httpdomain # sphinxext-rediraffe +sphinx-copybutton==0.4.0 \ + --hash=sha256:4340d33c169dac6dd82dce2c83333412aa786a42dd01a81a8decac3b130dc8b0 \ + --hash=sha256:8daed13a87afd5013c3a9af3575cc4d5bec052075ccd3db243f895c07a689386 + # via -r requirements.in +sphinx-rtd-theme==0.5.2 \ + --hash=sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a \ + --hash=sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f + # via -r requirements.in +sphinx-tabs==3.2.0 \ + --hash=sha256:1e1b1846c80137bd81a78e4a69b02664b98b1e1da361beb30600b939dfc75065 \ + --hash=sha256:33137914ed9b276e6a686d7a337310ee77b1dae316fdcbce60476913a152e0a4 + # via -r requirements.in sphinxcontrib-applehelp==1.0.2 \ --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 @@ -652,7 +652,7 @@ sphinxcontrib-htmlhelp==2.0.0 \ sphinxcontrib-httpdomain==1.7.0 \ --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 - # via -r tools/base/requirements.in + # via -r requirements.in sphinxcontrib-jsmath==1.0.1 \ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 @@ -665,12 +665,12 @@ sphinxcontrib-serializinghtml==1.1.5 \ --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \ --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 # via - # -r tools/base/requirements.in + # -r requirements.in # sphinx sphinxext-rediraffe==0.2.7 \ --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c - # via -r tools/base/requirements.in + # via -r requirements.in toml==0.10.2 \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f @@ -700,7 +700,7 @@ verboselogs==1.7 \ --hash=sha256:d63f23bf568295b95d3530c6864a0b580cec70e7ff974177dead1e4ffbc6ff49 \ --hash=sha256:e33ddedcdfdafcb3a174701150430b11b46ceb64c2a9a26198c76a156568e427 # via - # -r tools/base/requirements.in + # -r requirements.in # envoy.base.runner # envoy.github.abstract # envoy.github.release @@ -710,7 +710,7 @@ wrapt==1.12.1 \ yapf==0.31.0 \ --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \ --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e - # via -r tools/base/requirements.in + # via -r requirements.in yarl==1.6.3 \ --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ @@ -752,9 +752,9 @@ yarl==1.6.3 \ # via aiohttp # The following packages are considered to be unsafe in a requirements file: -setuptools==58.0.3 \ - --hash=sha256:1ceadf3ea9a821ef305505db995f2e21550ea62500900164278c4b23109204f3 \ - --hash=sha256:5e4c36f55012a46c1b3e4b67a8236d1d73856a90fc7b3207d29bedb7d2bac417 +setuptools==58.0.4 \ + --hash=sha256:69cc739bc2662098a68a9bc575cd974a57969e70c1d58ade89d104ab73d79770 \ + --hash=sha256:f10059f0152e0b7fb6b2edd77bcb1ecd4c9ed7048a826eb2d79f72fd2e6e237b # via - # -r tools/base/requirements.in + # -r requirements.in # sphinx diff --git a/tools/dependency/requirements.txt b/tools/dependency/requirements.txt index 1d841a10db6dc..f2e21e84a3d6e 100644 --- a/tools/dependency/requirements.txt +++ b/tools/dependency/requirements.txt @@ -63,9 +63,9 @@ colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 # via -r tools/dependency/requirements.txt -deprecated==1.2.12 \ - --hash=sha256:08452d69b6b5bc66e8330adde0a4f8642e969b9e1702904d137eeb29c8ffc771 \ - --hash=sha256:6d2de2de7931a968874481ef30208fd4e08da39177d61d3d4ebdf4366e7dbca1 +deprecated==1.2.13 \ + --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d \ + --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d # via # -r tools/dependency/requirements.txt # pygithub diff --git a/tools/deprecate_version/requirements.txt b/tools/deprecate_version/requirements.txt index e64b21c2feb4c..e3ce651eb5f15 100644 --- a/tools/deprecate_version/requirements.txt +++ b/tools/deprecate_version/requirements.txt @@ -55,9 +55,9 @@ chardet==4.0.0 \ # via # -r tools/deprecate_version/requirements.txt # requests -deprecated==1.2.12 \ - --hash=sha256:08452d69b6b5bc66e8330adde0a4f8642e969b9e1702904d137eeb29c8ffc771 \ - --hash=sha256:6d2de2de7931a968874481ef30208fd4e08da39177d61d3d4ebdf4366e7dbca1 +deprecated==1.2.13 \ + --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d \ + --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d # via # -r tools/deprecate_version/requirements.txt # pygithub From 80edb9cc48dd9fc2d5be8a7c72e6e82ce9cd6bed Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 13 Sep 2021 15:02:52 -0400 Subject: [PATCH 027/121] api: remove generated_api_shadow (#18091) Signed-off-by: Harvey Tuch Signed-off-by: gayang --- api/STYLE.md | 5 +- api/tools/generate_listeners_test.py | 2 +- api/tools/tap2pcap.py | 2 +- api/tools/tap2pcap_test.py | 2 +- bazel/api_binding.bzl | 9 +- ci/do_ci.sh | 14 +- docs/BUILD | 8 +- docs/root/operations/traffic_tapping.rst | 2 +- generated_api_shadow/BUILD | 268 --- generated_api_shadow/README.md | 6 - generated_api_shadow/bazel/BUILD | 17 - .../bazel/api_build_system.bzl | 199 -- .../bazel/envoy_http_archive.bzl | 22 - generated_api_shadow/bazel/external_deps.bzl | 143 -- .../bazel/external_proto_deps.bzl | 48 - generated_api_shadow/bazel/repositories.bzl | 171 -- .../bazel/repository_locations.bzl | 133 -- .../bazel/repository_locations_utils.bzl | 20 - .../extensions/filters/http/squash/v3/BUILD | 9 - .../filters/http/squash/v3/squash.proto | 60 - .../extensions/filters/http/sxg/v3alpha/BUILD | 12 - .../filters/http/sxg/v3alpha/sxg.proto | 67 - .../filters/network/kafka_broker/v3/BUILD | 9 - .../kafka_broker/v3/kafka_broker.proto | 24 - .../filters/network/kafka_mesh/v3alpha/BUILD | 9 - .../kafka_mesh/v3alpha/kafka_mesh.proto | 58 - .../filters/network/mysql_proxy/v3/BUILD | 9 - .../network/mysql_proxy/v3/mysql_proxy.proto | 29 - .../network/postgres_proxy/v3alpha/BUILD | 9 - .../v3alpha/postgres_proxy.proto | 41 - .../filters/network/rocketmq_proxy/v3/BUILD | 14 - .../rocketmq_proxy/v3/rocketmq_proxy.proto | 34 - .../network/rocketmq_proxy/v3/route.proto | 54 - .../envoy/admin/v2alpha/BUILD | 16 - .../envoy/admin/v2alpha/certs.proto | 62 - .../envoy/admin/v2alpha/clusters.proto | 153 -- .../envoy/admin/v2alpha/config_dump.proto | 291 --- .../envoy/admin/v2alpha/listeners.proto | 31 - .../envoy/admin/v2alpha/memory.proto | 44 - .../envoy/admin/v2alpha/metrics.proto | 29 - .../envoy/admin/v2alpha/mutex_stats.proto | 30 - .../envoy/admin/v2alpha/server_info.proto | 154 -- .../envoy/admin/v2alpha/tap.proto | 25 - generated_api_shadow/envoy/admin/v3/BUILD | 18 - .../envoy/admin/v3/certs.proto | 84 - .../envoy/admin/v3/clusters.proto | 177 -- .../envoy/admin/v3/config_dump.proto | 482 ---- .../envoy/admin/v3/init_dump.proto | 31 - .../envoy/admin/v3/listeners.proto | 36 - .../envoy/admin/v3/memory.proto | 47 - .../envoy/admin/v3/metrics.proto | 32 - .../envoy/admin/v3/mutex_stats.proto | 33 - .../envoy/admin/v3/server_info.proto | 205 -- generated_api_shadow/envoy/admin/v3/tap.proto | 28 - generated_api_shadow/envoy/annotations/BUILD | 5 - .../envoy/annotations/deprecation.proto | 33 - .../envoy/annotations/resource.proto | 16 - generated_api_shadow/envoy/api/v2/BUILD | 22 - generated_api_shadow/envoy/api/v2/README.md | 9 - generated_api_shadow/envoy/api/v2/auth/BUILD | 13 - .../envoy/api/v2/auth/cert.proto | 15 - .../envoy/api/v2/auth/common.proto | 327 --- .../envoy/api/v2/auth/secret.proto | 50 - .../envoy/api/v2/auth/tls.proto | 152 -- generated_api_shadow/envoy/api/v2/cds.proto | 43 - .../envoy/api/v2/cluster.proto | 867 ------- .../envoy/api/v2/cluster/BUILD | 13 - .../api/v2/cluster/circuit_breaker.proto | 99 - .../envoy/api/v2/cluster/filter.proto | 30 - .../api/v2/cluster/outlier_detection.proto | 151 -- generated_api_shadow/envoy/api/v2/core/BUILD | 14 - .../envoy/api/v2/core/address.proto | 134 -- .../envoy/api/v2/core/backoff.proto | 35 - .../envoy/api/v2/core/base.proto | 381 --- .../envoy/api/v2/core/config_source.proto | 185 -- .../api/v2/core/event_service_config.proto | 26 - .../envoy/api/v2/core/grpc_method_list.proto | 28 - .../envoy/api/v2/core/grpc_service.proto | 227 -- .../envoy/api/v2/core/health_check.proto | 308 --- .../envoy/api/v2/core/http_uri.proto | 55 - .../envoy/api/v2/core/protocol.proto | 297 --- .../envoy/api/v2/core/socket_option.proto | 55 - .../envoy/api/v2/discovery.proto | 234 -- generated_api_shadow/envoy/api/v2/eds.proto | 45 - .../envoy/api/v2/endpoint.proto | 119 - .../envoy/api/v2/endpoint/BUILD | 12 - .../envoy/api/v2/endpoint/endpoint.proto | 9 - .../api/v2/endpoint/endpoint_components.proto | 148 -- .../envoy/api/v2/endpoint/load_report.proto | 157 -- generated_api_shadow/envoy/api/v2/lds.proto | 47 - .../envoy/api/v2/listener.proto | 248 -- .../envoy/api/v2/listener/BUILD | 14 - .../envoy/api/v2/listener/listener.proto | 11 - .../api/v2/listener/listener_components.proto | 287 --- .../envoy/api/v2/listener/quic_config.proto | 35 - .../api/v2/listener/udp_listener_config.proto | 38 - .../envoy/api/v2/ratelimit/BUILD | 9 - .../envoy/api/v2/ratelimit/ratelimit.proto | 69 - generated_api_shadow/envoy/api/v2/rds.proto | 64 - generated_api_shadow/envoy/api/v2/route.proto | 113 - generated_api_shadow/envoy/api/v2/route/BUILD | 16 - .../envoy/api/v2/route/route.proto | 9 - .../envoy/api/v2/route/route_components.proto | 1628 ------------- .../envoy/api/v2/scoped_route.proto | 109 - generated_api_shadow/envoy/api/v2/srds.proto | 50 - generated_api_shadow/envoy/config/README.md | 3 - .../envoy/config/accesslog/v2/BUILD | 12 - .../envoy/config/accesslog/v2/als.proto | 75 - .../envoy/config/accesslog/v2/file.proto | 43 - .../envoy/config/accesslog/v3/BUILD | 17 - .../envoy/config/accesslog/v3/accesslog.proto | 327 --- .../envoy/config/bootstrap/v2/BUILD | 18 - .../envoy/config/bootstrap/v2/bootstrap.proto | 352 --- .../envoy/config/bootstrap/v3/BUILD | 22 - .../envoy/config/bootstrap/v3/bootstrap.proto | 648 ----- .../config/cluster/aggregate/v2alpha/BUILD | 9 - .../cluster/aggregate/v2alpha/cluster.proto | 24 - .../dynamic_forward_proxy/v2alpha/BUILD | 12 - .../v2alpha/cluster.proto | 29 - .../envoy/config/cluster/redis/BUILD | 9 - .../config/cluster/redis/redis_cluster.proto | 81 - .../envoy/config/cluster/v3/BUILD | 19 - .../config/cluster/v3/circuit_breaker.proto | 105 - .../envoy/config/cluster/v3/cluster.proto | 1163 --------- .../envoy/config/cluster/v3/filter.proto | 30 - .../config/cluster/v3/outlier_detection.proto | 157 -- .../dynamic_forward_proxy/v2alpha/BUILD | 12 - .../v2alpha/dns_cache.proto | 85 - .../envoy/config/common/matcher/v3/BUILD | 14 - .../config/common/matcher/v3/matcher.proto | 226 -- .../envoy/config/common/tap/v2alpha/BUILD | 12 - .../config/common/tap/v2alpha/common.proto | 39 - .../envoy/config/core/v3/BUILD | 16 - .../envoy/config/core/v3/address.proto | 160 -- .../envoy/config/core/v3/backoff.proto | 36 - .../envoy/config/core/v3/base.proto | 456 ---- .../envoy/config/core/v3/config_source.proto | 216 -- .../config/core/v3/event_service_config.proto | 28 - .../envoy/config/core/v3/extension.proto | 61 - .../config/core/v3/grpc_method_list.proto | 32 - .../envoy/config/core/v3/grpc_service.proto | 296 --- .../envoy/config/core/v3/health_check.proto | 377 --- .../envoy/config/core/v3/http_uri.proto | 56 - .../envoy/config/core/v3/protocol.proto | 494 ---- .../envoy/config/core/v3/proxy_protocol.proto | 25 - .../envoy/config/core/v3/resolver.proto | 41 - .../envoy/config/core/v3/socket_option.proto | 56 - .../core/v3/substitution_format_string.proto | 114 - .../config/core/v3/udp_socket_config.proto | 31 - .../envoy/config/endpoint/v3/BUILD | 16 - .../envoy/config/endpoint/v3/endpoint.proto | 120 - .../endpoint/v3/endpoint_components.proto | 188 -- .../config/endpoint/v3/load_report.proto | 167 -- .../envoy/config/filter/README.md | 4 - .../envoy/config/filter/accesslog/v2/BUILD | 14 - .../filter/accesslog/v2/accesslog.proto | 256 -- .../config/filter/dubbo/router/v2alpha1/BUILD | 9 - .../filter/dubbo/router/v2alpha1/router.proto | 19 - .../envoy/config/filter/fault/v2/BUILD | 13 - .../envoy/config/filter/fault/v2/fault.proto | 87 - .../http/adaptive_concurrency/v2alpha/BUILD | 13 - .../v2alpha/adaptive_concurrency.proto | 94 - .../filter/http/aws_lambda/v2alpha/BUILD | 9 - .../http/aws_lambda/v2alpha/aws_lambda.proto | 50 - .../http/aws_request_signing/v2alpha/BUILD | 9 - .../v2alpha/aws_request_signing.proto | 45 - .../envoy/config/filter/http/buffer/v2/BUILD | 9 - .../config/filter/http/buffer/v2/buffer.proto | 40 - .../config/filter/http/cache/v2alpha/BUILD | 13 - .../filter/http/cache/v2alpha/cache.proto | 77 - .../config/filter/http/compressor/v2/BUILD | 12 - .../http/compressor/v2/compressor.proto | 48 - .../envoy/config/filter/http/cors/v2/BUILD | 9 - .../config/filter/http/cors/v2/cors.proto | 20 - .../envoy/config/filter/http/csrf/v2/BUILD | 13 - .../config/filter/http/csrf/v2/csrf.proto | 52 - .../http/dynamic_forward_proxy/v2alpha/BUILD | 12 - .../v2alpha/dynamic_forward_proxy.proto | 61 - .../envoy/config/filter/http/dynamo/v2/BUILD | 9 - .../config/filter/http/dynamo/v2/dynamo.proto | 20 - .../config/filter/http/ext_authz/v2/BUILD | 15 - .../filter/http/ext_authz/v2/ext_authz.proto | 234 -- .../envoy/config/filter/http/fault/v2/BUILD | 14 - .../config/filter/http/fault/v2/fault.proto | 129 - .../filter/http/grpc_http1_bridge/v2/BUILD | 9 - .../http/grpc_http1_bridge/v2/config.proto | 21 - .../grpc_http1_reverse_bridge/v2alpha1/BUILD | 9 - .../v2alpha1/config.proto | 39 - .../filter/http/grpc_stats/v2alpha/BUILD | 12 - .../http/grpc_stats/v2alpha/config.proto | 62 - .../config/filter/http/grpc_web/v2/BUILD | 9 - .../filter/http/grpc_web/v2/grpc_web.proto | 21 - .../envoy/config/filter/http/gzip/v2/BUILD | 12 - .../config/filter/http/gzip/v2/gzip.proto | 96 - .../filter/http/header_to_metadata/v2/BUILD | 9 - .../v2/header_to_metadata.proto | 100 - .../config/filter/http/health_check/v2/BUILD | 13 - .../http/health_check/v2/health_check.proto | 51 - .../config/filter/http/ip_tagging/v2/BUILD | 12 - .../http/ip_tagging/v2/ip_tagging.proto | 57 - .../filter/http/jwt_authn/v2alpha/BUILD | 13 - .../filter/http/jwt_authn/v2alpha/README.md | 66 - .../http/jwt_authn/v2alpha/config.proto | 500 ---- .../envoy/config/filter/http/lua/v2/BUILD | 9 - .../envoy/config/filter/http/lua/v2/lua.proto | 25 - .../config/filter/http/on_demand/v2/BUILD | 9 - .../filter/http/on_demand/v2/on_demand.proto | 20 - .../filter/http/original_src/v2alpha1/BUILD | 9 - .../original_src/v2alpha1/original_src.proto | 27 - .../config/filter/http/rate_limit/v2/BUILD | 12 - .../http/rate_limit/v2/rate_limit.proto | 66 - .../envoy/config/filter/http/rbac/v2/BUILD | 12 - .../config/filter/http/rbac/v2/rbac.proto | 38 - .../envoy/config/filter/http/router/v2/BUILD | 12 - .../config/filter/http/router/v2/router.proto | 79 - .../envoy/config/filter/http/squash/v2/BUILD | 9 - .../config/filter/http/squash/v2/squash.proto | 58 - .../config/filter/http/tap/v2alpha/BUILD | 12 - .../config/filter/http/tap/v2alpha/tap.proto | 26 - .../config/filter/http/transcoder/v2/BUILD | 9 - .../http/transcoder/v2/transcoder.proto | 159 -- .../filter/listener/http_inspector/v2/BUILD | 9 - .../http_inspector/v2/http_inspector.proto | 20 - .../filter/listener/original_dst/v2/BUILD | 9 - .../original_dst/v2/original_dst.proto | 20 - .../listener/original_src/v2alpha1/BUILD | 9 - .../original_src/v2alpha1/original_src.proto | 31 - .../filter/listener/proxy_protocol/v2/BUILD | 9 - .../proxy_protocol/v2/proxy_protocol.proto | 20 - .../filter/listener/tls_inspector/v2/BUILD | 9 - .../tls_inspector/v2/tls_inspector.proto | 20 - .../filter/network/client_ssl_auth/v2/BUILD | 12 - .../client_ssl_auth/v2/client_ssl_auth.proto | 46 - .../filter/network/direct_response/v2/BUILD | 12 - .../network/direct_response/v2/config.proto | 24 - .../filter/network/dubbo_proxy/v2alpha1/BUILD | 14 - .../network/dubbo_proxy/v2alpha1/README.md | 1 - .../dubbo_proxy/v2alpha1/dubbo_proxy.proto | 66 - .../network/dubbo_proxy/v2alpha1/route.proto | 105 - .../envoy/config/filter/network/echo/v2/BUILD | 9 - .../config/filter/network/echo/v2/echo.proto | 19 - .../config/filter/network/ext_authz/v2/BUILD | 12 - .../network/ext_authz/v2/ext_authz.proto | 46 - .../network/http_connection_manager/v2/BUILD | 18 - .../v2/http_connection_manager.proto | 679 ------ .../network/kafka_broker/v2alpha1/BUILD | 9 - .../kafka_broker/v2alpha1/kafka_broker.proto | 23 - .../network/local_rate_limit/v2alpha/BUILD | 13 - .../v2alpha/local_rate_limit.proto | 45 - .../filter/network/mongo_proxy/v2/BUILD | 12 - .../network/mongo_proxy/v2/mongo_proxy.proto | 41 - .../filter/network/mysql_proxy/v1alpha1/BUILD | 9 - .../mysql_proxy/v1alpha1/mysql_proxy.proto | 28 - .../config/filter/network/rate_limit/v2/BUILD | 13 - .../network/rate_limit/v2/rate_limit.proto | 52 - .../envoy/config/filter/network/rbac/v2/BUILD | 12 - .../config/filter/network/rbac/v2/rbac.proto | 55 - .../filter/network/redis_proxy/v2/BUILD | 13 - .../network/redis_proxy/v2/redis_proxy.proto | 245 -- .../filter/network/sni_cluster/v2/BUILD | 9 - .../network/sni_cluster/v2/sni_cluster.proto | 20 - .../config/filter/network/tcp_proxy/v2/BUILD | 14 - .../network/tcp_proxy/v2/tcp_proxy.proto | 184 -- .../network/thrift_proxy/v2alpha1/BUILD | 13 - .../network/thrift_proxy/v2alpha1/README.md | 1 - .../network/thrift_proxy/v2alpha1/route.proto | 141 -- .../thrift_proxy/v2alpha1/thrift_proxy.proto | 121 - .../network/zookeeper_proxy/v1alpha1/BUILD | 9 - .../v1alpha1/zookeeper_proxy.proto | 40 - .../filter/thrift/rate_limit/v2alpha1/BUILD | 12 - .../rate_limit/v2alpha1/rate_limit.proto | 55 - .../filter/thrift/router/v2alpha1/BUILD | 9 - .../thrift/router/v2alpha1/router.proto | 17 - .../config/filter/udp/udp_proxy/v2alpha/BUILD | 9 - .../udp/udp_proxy/v2alpha/udp_proxy.proto | 37 - .../config/grpc_credential/v2alpha/BUILD | 12 - .../grpc_credential/v2alpha/aws_iam.proto | 31 - .../v2alpha/file_based_metadata.proto | 31 - .../envoy/config/grpc_credential/v3/BUILD | 12 - .../config/grpc_credential/v3/aws_iam.proto | 35 - .../v3/file_based_metadata.proto | 35 - .../config/health_checker/redis/v2/BUILD | 9 - .../health_checker/redis/v2/redis.proto | 22 - .../envoy/config/listener/v2/BUILD | 9 - .../config/listener/v2/api_listener.proto | 30 - .../envoy/config/listener/v3/BUILD | 20 - .../config/listener/v3/api_listener.proto | 33 - .../envoy/config/listener/v3/listener.proto | 318 --- .../listener/v3/listener_components.proto | 361 --- .../config/listener/v3/quic_config.proto | 62 - .../listener/v3/udp_listener_config.proto | 52 - .../envoy/config/metrics/v2/BUILD | 13 - .../config/metrics/v2/metrics_service.proto | 24 - .../envoy/config/metrics/v2/stats.proto | 339 --- .../envoy/config/metrics/v3/BUILD | 15 - .../config/metrics/v3/metrics_service.proto | 57 - .../envoy/config/metrics/v3/stats.proto | 409 ---- .../envoy/config/overload/v2alpha/BUILD | 9 - .../config/overload/v2alpha/overload.proto | 80 - .../envoy/config/overload/v3/BUILD | 14 - .../envoy/config/overload/v3/overload.proto | 180 -- .../envoy/config/ratelimit/v2/BUILD | 12 - .../envoy/config/ratelimit/v2/rls.proto | 25 - .../envoy/config/ratelimit/v3/BUILD | 12 - .../envoy/config/ratelimit/v3/rls.proto | 34 - .../envoy/config/rbac/v2/BUILD | 15 - .../envoy/config/rbac/v2/rbac.proto | 240 -- .../envoy/config/rbac/v3/BUILD | 18 - .../envoy/config/rbac/v3/rbac.proto | 306 --- .../resource_monitor/fixed_heap/v2alpha/BUILD | 9 - .../fixed_heap/v2alpha/fixed_heap.proto | 21 - .../injected_resource/v2alpha/BUILD | 9 - .../v2alpha/injected_resource.proto | 22 - .../config/retry/omit_canary_hosts/v2/BUILD | 9 - .../v2/omit_canary_hosts.proto | 19 - .../config/retry/omit_host_metadata/v2/BUILD | 12 - .../v2/omit_host_metadata_config.proto | 28 - .../config/retry/previous_hosts/v2/BUILD | 9 - .../previous_hosts/v2/previous_hosts.proto | 19 - .../config/retry/previous_priorities/BUILD | 9 - .../previous_priorities_config.proto | 56 - .../envoy/config/route/v3/BUILD | 19 - .../envoy/config/route/v3/route.proto | 142 -- .../config/route/v3/route_components.proto | 2106 ----------------- .../envoy/config/route/v3/scoped_route.proto | 120 - .../envoy/config/tap/v3/BUILD | 16 - .../envoy/config/tap/v3/common.proto | 280 --- .../envoy/config/trace/v2/BUILD | 14 - .../envoy/config/trace/v2/datadog.proto | 23 - .../envoy/config/trace/v2/dynamic_ot.proto | 29 - .../envoy/config/trace/v2/http_tracer.proto | 65 - .../envoy/config/trace/v2/lightstep.proto | 43 - .../envoy/config/trace/v2/opencensus.proto | 92 - .../envoy/config/trace/v2/service.proto | 21 - .../envoy/config/trace/v2/trace.proto | 15 - .../envoy/config/trace/v2/zipkin.proto | 64 - .../envoy/config/trace/v2alpha/BUILD | 12 - .../envoy/config/trace/v2alpha/xray.proto | 32 - .../envoy/config/trace/v3/BUILD | 16 - .../envoy/config/trace/v3/datadog.proto | 29 - .../envoy/config/trace/v3/dynamic_ot.proto | 36 - .../envoy/config/trace/v3/http_tracer.proto | 60 - .../envoy/config/trace/v3/lightstep.proto | 57 - .../envoy/config/trace/v3/opencensus.proto | 105 - .../envoy/config/trace/v3/service.proto | 25 - .../envoy/config/trace/v3/skywalking.proto | 65 - .../envoy/config/trace/v3/trace.proto | 15 - .../envoy/config/trace/v3/xray.proto | 55 - .../envoy/config/trace/v3/zipkin.proto | 73 - .../transport_socket/alts/v2alpha/BUILD | 9 - .../transport_socket/alts/v2alpha/alts.proto | 29 - .../transport_socket/raw_buffer/v2/BUILD | 9 - .../raw_buffer/v2/raw_buffer.proto | 20 - .../config/transport_socket/tap/v2alpha/BUILD | 13 - .../transport_socket/tap/v2alpha/tap.proto | 32 - .../envoy/data/accesslog/v2/BUILD | 12 - .../envoy/data/accesslog/v2/accesslog.proto | 378 --- .../envoy/data/accesslog/v3/BUILD | 13 - .../envoy/data/accesslog/v3/accesslog.proto | 433 ---- .../envoy/data/cluster/v2alpha/BUILD | 9 - .../v2alpha/outlier_detection_event.proto | 135 -- .../envoy/data/cluster/v3/BUILD | 9 - .../cluster/v3/outlier_detection_event.proto | 145 -- .../envoy/data/core/v2alpha/BUILD | 12 - .../core/v2alpha/health_check_event.proto | 88 - generated_api_shadow/envoy/data/core/v3/BUILD | 12 - .../data/core/v3/health_check_event.proto | 106 - .../envoy/data/dns/v2alpha/BUILD | 12 - .../envoy/data/dns/v2alpha/dns_table.proto | 74 - generated_api_shadow/envoy/data/dns/v3/BUILD | 13 - .../envoy/data/dns/v3/dns_table.proto | 156 -- .../envoy/data/tap/v2alpha/BUILD | 12 - .../envoy/data/tap/v2alpha/common.proto | 34 - .../envoy/data/tap/v2alpha/http.proto | 64 - .../envoy/data/tap/v2alpha/transport.proto | 102 - .../envoy/data/tap/v2alpha/wrapper.proto | 36 - generated_api_shadow/envoy/data/tap/v3/BUILD | 13 - .../envoy/data/tap/v3/common.proto | 37 - .../envoy/data/tap/v3/http.proto | 74 - .../envoy/data/tap/v3/transport.proto | 122 - .../envoy/data/tap/v3/wrapper.proto | 40 - .../extensions/access_loggers/file/v3/BUILD | 13 - .../access_loggers/file/v3/file.proto | 62 - .../extensions/access_loggers/grpc/v3/BUILD | 12 - .../access_loggers/grpc/v3/als.proto | 89 - .../open_telemetry/v3alpha/BUILD | 13 - .../open_telemetry/v3alpha/logs_service.proto | 42 - .../extensions/access_loggers/stream/v3/BUILD | 12 - .../access_loggers/stream/v3/stream.proto | 38 - .../extensions/access_loggers/wasm/v3/BUILD | 12 - .../access_loggers/wasm/v3/wasm.proto | 22 - .../cache/simple_http_cache/v3alpha/BUILD | 9 - .../simple_http_cache/v3alpha/config.proto | 16 - .../extensions/clusters/aggregate/v3/BUILD | 9 - .../clusters/aggregate/v3/cluster.proto | 26 - .../clusters/dynamic_forward_proxy/v3/BUILD | 12 - .../dynamic_forward_proxy/v3/cluster.proto | 35 - .../envoy/extensions/clusters/redis/v3/BUILD | 9 - .../clusters/redis/v3/redis_cluster.proto | 85 - .../common/dynamic_forward_proxy/v3/BUILD | 15 - .../dynamic_forward_proxy/v3/dns_cache.proto | 146 -- .../extensions/common/key_value/v3/BUILD | 12 - .../common/key_value/v3/config.proto | 22 - .../envoy/extensions/common/matching/v3/BUILD | 15 - .../matching/v3/extension_matcher.proto | 37 - .../extensions/common/ratelimit/v3/BUILD | 13 - .../common/ratelimit/v3/ratelimit.proto | 103 - .../envoy/extensions/common/tap/v3/BUILD | 12 - .../extensions/common/tap/v3/common.proto | 44 - .../compression/brotli/compressor/v3/BUILD | 9 - .../brotli/compressor/v3/brotli.proto | 54 - .../compression/brotli/decompressor/v3/BUILD | 9 - .../brotli/decompressor/v3/brotli.proto | 25 - .../compression/gzip/compressor/v3/BUILD | 9 - .../compression/gzip/compressor/v3/gzip.proto | 78 - .../compression/gzip/decompressor/v3/BUILD | 9 - .../gzip/decompressor/v3/gzip.proto | 29 - .../filters/common/dependency/v3/BUILD | 9 - .../common/dependency/v3/dependency.proto | 59 - .../extensions/filters/common/fault/v3/BUILD | 14 - .../filters/common/fault/v3/fault.proto | 101 - .../filters/common/matcher/action/v3/BUILD | 9 - .../matcher/action/v3/skip_action.proto | 24 - .../http/adaptive_concurrency/v3/BUILD | 13 - .../v3/adaptive_concurrency.proto | 107 - .../http/admission_control/v3alpha/BUILD | 13 - .../v3alpha/admission_control.proto | 103 - .../http/alternate_protocols_cache/v3/BUILD | 12 - .../v3/alternate_protocols_cache.proto | 27 - .../filters/http/aws_lambda/v3/BUILD | 9 - .../http/aws_lambda/v3/aws_lambda.proto | 53 - .../filters/http/aws_request_signing/v3/BUILD | 9 - .../v3/aws_request_signing.proto | 51 - .../http/bandwidth_limit/v3alpha/BUILD | 12 - .../v3alpha/bandwidth_limit.proto | 70 - .../extensions/filters/http/buffer/v3/BUILD | 9 - .../filters/http/buffer/v3/buffer.proto | 45 - .../filters/http/cache/v3alpha/BUILD | 13 - .../filters/http/cache/v3alpha/cache.proto | 82 - .../filters/http/cdn_loop/v3alpha/BUILD | 9 - .../http/cdn_loop/v3alpha/cdn_loop.proto | 36 - .../filters/http/composite/v3/BUILD | 12 - .../filters/http/composite/v3/composite.proto | 37 - .../filters/http/compressor/v3/BUILD | 13 - .../http/compressor/v3/compressor.proto | 125 - .../extensions/filters/http/cors/v3/BUILD | 9 - .../filters/http/cors/v3/cors.proto | 21 - .../extensions/filters/http/csrf/v3/BUILD | 13 - .../filters/http/csrf/v3/csrf.proto | 54 - .../filters/http/decompressor/v3/BUILD | 12 - .../http/decompressor/v3/decompressor.proto | 55 - .../http/dynamic_forward_proxy/v3/BUILD | 12 - .../v3/dynamic_forward_proxy.proto | 64 - .../extensions/filters/http/dynamo/v3/BUILD | 9 - .../filters/http/dynamo/v3/dynamo.proto | 21 - .../filters/http/ext_authz/v3/BUILD | 14 - .../filters/http/ext_authz/v3/ext_authz.proto | 317 --- .../filters/http/ext_proc/v3alpha/BUILD | 12 - .../http/ext_proc/v3alpha/ext_proc.proto | 186 -- .../ext_proc/v3alpha/processing_mode.proto | 74 - .../extensions/filters/http/fault/v3/BUILD | 14 - .../filters/http/fault/v3/fault.proto | 150 -- .../filters/http/grpc_http1_bridge/v3/BUILD | 9 - .../http/grpc_http1_bridge/v3/config.proto | 21 - .../http/grpc_http1_reverse_bridge/v3/BUILD | 9 - .../grpc_http1_reverse_bridge/v3/config.proto | 60 - .../http/grpc_json_transcoder/v3/BUILD | 9 - .../grpc_json_transcoder/v3/transcoder.proto | 235 -- .../filters/http/grpc_stats/v3/BUILD | 12 - .../filters/http/grpc_stats/v3/config.proto | 74 - .../extensions/filters/http/grpc_web/v3/BUILD | 9 - .../filters/http/grpc_web/v3/grpc_web.proto | 21 - .../extensions/filters/http/gzip/v3/BUILD | 12 - .../filters/http/gzip/v3/gzip.proto | 81 - .../filters/http/header_to_metadata/v3/BUILD | 12 - .../v3/header_to_metadata.proto | 132 -- .../filters/http/health_check/v3/BUILD | 13 - .../http/health_check/v3/health_check.proto | 52 - .../filters/http/ip_tagging/v3/BUILD | 12 - .../http/ip_tagging/v3/ip_tagging.proto | 61 - .../filters/http/jwt_authn/v3/BUILD | 13 - .../filters/http/jwt_authn/v3/config.proto | 678 ------ .../filters/http/kill_request/v3/BUILD | 12 - .../http/kill_request/v3/kill_request.proto | 36 - .../filters/http/local_ratelimit/v3/BUILD | 14 - .../local_ratelimit/v3/local_rate_limit.proto | 109 - .../extensions/filters/http/lua/v3/BUILD | 12 - .../extensions/filters/http/lua/v3/lua.proto | 65 - .../filters/http/oauth2/v3alpha/BUILD | 15 - .../filters/http/oauth2/v3alpha/oauth.proto | 89 - .../filters/http/on_demand/v3/BUILD | 9 - .../filters/http/on_demand/v3/on_demand.proto | 20 - .../filters/http/original_src/v3/BUILD | 9 - .../http/original_src/v3/original_src.proto | 28 - .../filters/http/ratelimit/v3/BUILD | 12 - .../http/ratelimit/v3/rate_limit.proto | 122 - .../extensions/filters/http/rbac/v3/BUILD | 12 - .../filters/http/rbac/v3/rbac.proto | 49 - .../extensions/filters/http/router/v3/BUILD | 12 - .../filters/http/router/v3/router.proto | 91 - .../filters/http/set_metadata/v3/BUILD | 9 - .../http/set_metadata/v3/set_metadata.proto | 30 - .../extensions/filters/http/tap/v3/BUILD | 12 - .../extensions/filters/http/tap/v3/tap.proto | 28 - .../extensions/filters/http/wasm/v3/BUILD | 12 - .../filters/http/wasm/v3/wasm.proto | 21 - .../filters/listener/http_inspector/v3/BUILD | 9 - .../http_inspector/v3/http_inspector.proto | 20 - .../filters/listener/original_dst/v3/BUILD | 9 - .../original_dst/v3/original_dst.proto | 20 - .../filters/listener/original_src/v3/BUILD | 9 - .../original_src/v3/original_src.proto | 32 - .../filters/listener/proxy_protocol/v3/BUILD | 9 - .../proxy_protocol/v3/proxy_protocol.proto | 43 - .../filters/listener/tls_inspector/v3/BUILD | 9 - .../tls_inspector/v3/tls_inspector.proto | 20 - .../filters/network/client_ssl_auth/v3/BUILD | 12 - .../client_ssl_auth/v3/client_ssl_auth.proto | 50 - .../filters/network/connection_limit/v3/BUILD | 12 - .../v3/connection_limit.proto | 41 - .../filters/network/direct_response/v3/BUILD | 12 - .../network/direct_response/v3/config.proto | 25 - .../network/dubbo_proxy/router/v3/BUILD | 9 - .../dubbo_proxy/router/v3/router.proto | 19 - .../filters/network/dubbo_proxy/v3/BUILD | 14 - .../network/dubbo_proxy/v3/dubbo_proxy.proto | 70 - .../network/dubbo_proxy/v3/route.proto | 129 - .../extensions/filters/network/echo/v3/BUILD | 9 - .../filters/network/echo/v3/echo.proto | 20 - .../filters/network/ext_authz/v3/BUILD | 13 - .../network/ext_authz/v3/ext_authz.proto | 64 - .../network/http_connection_manager/v3/BUILD | 20 - .../v3/http_connection_manager.proto | 1018 -------- .../filters/network/local_ratelimit/v3/BUILD | 13 - .../local_ratelimit/v3/local_rate_limit.proto | 46 - .../filters/network/mongo_proxy/v3/BUILD | 12 - .../network/mongo_proxy/v3/mongo_proxy.proto | 48 - .../filters/network/ratelimit/v3/BUILD | 13 - .../network/ratelimit/v3/rate_limit.proto | 53 - .../extensions/filters/network/rbac/v3/BUILD | 12 - .../filters/network/rbac/v3/rbac.proto | 64 - .../filters/network/redis_proxy/v3/BUILD | 14 - .../network/redis_proxy/v3/redis_proxy.proto | 324 --- .../filters/network/sni_cluster/v3/BUILD | 9 - .../network/sni_cluster/v3/sni_cluster.proto | 20 - .../sni_dynamic_forward_proxy/v3alpha/BUILD | 12 - .../v3alpha/sni_dynamic_forward_proxy.proto | 36 - .../filters/network/tcp_proxy/v3/BUILD | 16 - .../network/tcp_proxy/v3/tcp_proxy.proto | 179 -- .../thrift_proxy/filters/ratelimit/v3/BUILD | 12 - .../filters/ratelimit/v3/rate_limit.proto | 56 - .../network/thrift_proxy/router/v3/BUILD | 12 - .../thrift_proxy/router/v3/router.proto | 20 - .../filters/network/thrift_proxy/v3/BUILD | 15 - .../network/thrift_proxy/v3/route.proto | 183 -- .../thrift_proxy/v3/thrift_proxy.proto | 141 -- .../extensions/filters/network/wasm/v3/BUILD | 12 - .../filters/network/wasm/v3/wasm.proto | 21 - .../filters/network/zookeeper_proxy/v3/BUILD | 9 - .../zookeeper_proxy/v3/zookeeper_proxy.proto | 41 - .../filters/udp/dns_filter/v3alpha/BUILD | 14 - .../udp/dns_filter/v3alpha/dns_filter.proto | 90 - .../extensions/filters/udp/udp_proxy/v3/BUILD | 12 - .../filters/udp/udp_proxy/v3/udp_proxy.proto | 85 - .../extensions/formatter/metadata/v3/BUILD | 9 - .../formatter/metadata/v3/metadata.proto | 56 - .../formatter/req_without_query/v3/BUILD | 9 - .../v3/req_without_query.proto | 29 - .../extensions/health_checkers/redis/v3/BUILD | 12 - .../health_checkers/redis/v3/redis.proto | 26 - .../header_formatters/preserve_case/v3/BUILD | 9 - .../preserve_case/v3/preserve_case.proto | 19 - .../custom_header/v3/BUILD | 12 - .../custom_header/v3/custom_header.proto | 43 - .../http/original_ip_detection/xff/v3/BUILD | 9 - .../original_ip_detection/xff/v3/xff.proto | 25 - .../allow_listed_routes/v3/BUILD | 9 - .../v3/allow_listed_routes_config.proto | 23 - .../previous_routes/v3/BUILD | 9 - .../v3/previous_routes_config.proto | 18 - .../safe_cross_scheme/v3/BUILD | 9 - .../v3/safe_cross_scheme_config.proto | 21 - .../extensions/key_value/file_based/v3/BUILD | 9 - .../key_value/file_based/v3/config.proto | 27 - .../environment_variable/v3/BUILD | 9 - .../environment_variable/v3/input.proto | 20 - .../consistent_hashing/v3/BUILD | 9 - .../v3/consistent_hashing.proto | 37 - .../matching/input_matchers/ip/v3/BUILD | 12 - .../matching/input_matchers/ip/v3/ip.proto | 38 - .../network/socket_interface/v3/BUILD | 9 - .../v3/default_socket_interface.proto | 17 - .../extensions/quic/crypto_stream/v3/BUILD | 9 - .../quic/crypto_stream/v3/crypto_stream.proto | 17 - .../extensions/quic/proof_source/v3/BUILD | 9 - .../quic/proof_source/v3/proof_source.proto | 17 - .../rate_limit_descriptors/expr/v3/BUILD | 12 - .../rate_limit_descriptors/expr/v3/expr.proto | 41 - .../envoy/extensions/request_id/uuid/v3/BUILD | 9 - .../extensions/request_id/uuid/v3/uuid.proto | 48 - .../resource_monitors/fixed_heap/v3/BUILD | 12 - .../fixed_heap/v3/fixed_heap.proto | 25 - .../injected_resource/v3/BUILD | 12 - .../v3/injected_resource.proto | 26 - .../retry/host/omit_canary_hosts/v3/BUILD | 12 - .../v3/omit_canary_hosts.proto | 19 - .../retry/host/omit_host_metadata/v3/BUILD | 12 - .../v3/omit_host_metadata_config.proto | 29 - .../retry/host/previous_hosts/v3/BUILD | 12 - .../previous_hosts/v3/previous_hosts.proto | 19 - .../priority/previous_priorities/v3/BUILD | 9 - .../v3/previous_priorities_config.proto | 57 - .../stat_sinks/graphite_statsd/v3/BUILD | 12 - .../graphite_statsd/v3/graphite_statsd.proto | 44 - .../envoy/extensions/stat_sinks/wasm/v3/BUILD | 12 - .../extensions/stat_sinks/wasm/v3/wasm.proto | 21 - .../transport_sockets/alts/v3/BUILD | 12 - .../transport_sockets/alts/v3/alts.proto | 31 - .../transport_sockets/proxy_protocol/v3/BUILD | 12 - .../v3/upstream_proxy_protocol.proto | 26 - .../transport_sockets/quic/v3/BUILD | 12 - .../quic/v3/quic_transport.proto | 27 - .../transport_sockets/raw_buffer/v3/BUILD | 9 - .../raw_buffer/v3/raw_buffer.proto | 20 - .../transport_sockets/s2a/v3alpha/BUILD | 9 - .../transport_sockets/s2a/v3alpha/s2a.proto | 22 - .../transport_sockets/starttls/v3/BUILD | 13 - .../starttls/v3/starttls.proto | 51 - .../extensions/transport_sockets/tap/v3/BUILD | 13 - .../transport_sockets/tap/v3/tap.proto | 32 - .../extensions/transport_sockets/tls/v3/BUILD | 15 - .../transport_sockets/tls/v3/cert.proto | 11 - .../transport_sockets/tls/v3/common.proto | 441 ---- .../transport_sockets/tls/v3/secret.proto | 55 - .../transport_sockets/tls/v3/tls.proto | 302 --- .../tls/v3/tls_spiffe_validator_config.proto | 59 - .../upstreams/http/generic/v3/BUILD | 9 - .../generic/v3/generic_connection_pool.proto | 18 - .../extensions/upstreams/http/http/v3/BUILD | 9 - .../http/http/v3/http_connection_pool.proto | 17 - .../extensions/upstreams/http/tcp/v3/BUILD | 9 - .../http/tcp/v3/tcp_connection_pool.proto | 17 - .../envoy/extensions/upstreams/http/v3/BUILD | 12 - .../http/v3/http_protocol_options.proto | 151 -- .../extensions/upstreams/tcp/generic/v3/BUILD | 9 - .../generic/v3/generic_connection_pool.proto | 18 - .../envoy/extensions/wasm/v3/BUILD | 12 - .../envoy/extensions/wasm/v3/wasm.proto | 165 -- .../watchdog/profile_action/v3alpha/BUILD | 9 - .../v3alpha/profile_action.proto | 31 - generated_api_shadow/envoy/service/README.md | 3 - .../envoy/service/accesslog/v2/BUILD | 14 - .../envoy/service/accesslog/v2/als.proto | 72 - .../envoy/service/accesslog/v3/BUILD | 15 - .../envoy/service/accesslog/v3/als.proto | 87 - .../envoy/service/auth/v2/BUILD | 14 - .../service/auth/v2/attribute_context.proto | 160 -- .../envoy/service/auth/v2/external_auth.proto | 82 - .../envoy/service/auth/v2alpha/BUILD | 10 - .../service/auth/v2alpha/external_auth.proto | 23 - .../envoy/service/auth/v3/BUILD | 16 - .../service/auth/v3/attribute_context.proto | 177 -- .../envoy/service/auth/v3/external_auth.proto | 136 -- .../envoy/service/cluster/v3/BUILD | 15 - .../envoy/service/cluster/v3/cds.proto | 43 - .../envoy/service/discovery/v2/BUILD | 16 - .../envoy/service/discovery/v2/ads.proto | 42 - .../envoy/service/discovery/v2/hds.proto | 138 -- .../envoy/service/discovery/v2/rtds.proto | 54 - .../envoy/service/discovery/v2/sds.proto | 41 - .../envoy/service/discovery/v3/BUILD | 15 - .../envoy/service/discovery/v3/ads.proto | 44 - .../service/discovery/v3/discovery.proto | 279 --- .../envoy/service/endpoint/v3/BUILD | 15 - .../envoy/service/endpoint/v3/eds.proto | 45 - .../envoy/service/endpoint/v3/leds.proto | 37 - .../service/event_reporting/v2alpha/BUILD | 13 - .../v2alpha/event_reporting_service.proto | 62 - .../envoy/service/event_reporting/v3/BUILD | 13 - .../v3/event_reporting_service.proto | 69 - .../envoy/service/ext_proc/v3alpha/BUILD | 15 - .../ext_proc/v3alpha/external_processor.proto | 331 --- .../envoy/service/extension/v3/BUILD | 14 - .../extension/v3/config_discovery.proto | 43 - .../envoy/service/health/v3/BUILD | 17 - .../envoy/service/health/v3/hds.proto | 193 -- .../envoy/service/listener/v3/BUILD | 15 - .../envoy/service/listener/v3/lds.proto | 47 - .../envoy/service/load_stats/v2/BUILD | 14 - .../envoy/service/load_stats/v2/lrs.proto | 88 - .../envoy/service/load_stats/v3/BUILD | 15 - .../envoy/service/load_stats/v3/lrs.proto | 102 - .../envoy/service/metrics/v2/BUILD | 14 - .../service/metrics/v2/metrics_service.proto | 44 - .../envoy/service/metrics/v3/BUILD | 15 - .../service/metrics/v3/metrics_service.proto | 53 - .../envoy/service/ratelimit/v2/BUILD | 14 - .../envoy/service/ratelimit/v2/rls.proto | 115 - .../envoy/service/ratelimit/v3/BUILD | 15 - .../envoy/service/ratelimit/v3/rls.proto | 196 -- .../envoy/service/route/v3/BUILD | 15 - .../envoy/service/route/v3/rds.proto | 65 - .../envoy/service/route/v3/srds.proto | 50 - .../envoy/service/runtime/v3/BUILD | 15 - .../envoy/service/runtime/v3/rtds.proto | 58 - .../envoy/service/secret/v3/BUILD | 15 - .../envoy/service/secret/v3/sds.proto | 43 - .../envoy/service/status/v2/BUILD | 15 - .../envoy/service/status/v2/csds.proto | 88 - .../envoy/service/status/v3/BUILD | 17 - .../envoy/service/status/v3/csds.proto | 191 -- .../envoy/service/tap/v2alpha/BUILD | 15 - .../envoy/service/tap/v2alpha/common.proto | 205 -- .../envoy/service/tap/v2alpha/tap.proto | 55 - .../envoy/service/tap/v3/BUILD | 15 - .../envoy/service/tap/v3/tap.proto | 64 - .../envoy/service/trace/v2/BUILD | 14 - .../service/trace/v2/trace_service.proto | 46 - .../envoy/service/trace/v3/BUILD | 14 - .../service/trace/v3/trace_service.proto | 55 - generated_api_shadow/envoy/type/BUILD | 9 - .../envoy/type/hash_policy.proto | 27 - generated_api_shadow/envoy/type/http.proto | 23 - generated_api_shadow/envoy/type/http/v3/BUILD | 9 - .../type/http/v3/path_transformation.proto | 57 - .../envoy/type/http_status.proto | 139 -- generated_api_shadow/envoy/type/matcher/BUILD | 13 - .../envoy/type/matcher/metadata.proto | 98 - .../envoy/type/matcher/node.proto | 25 - .../envoy/type/matcher/number.proto | 29 - .../envoy/type/matcher/path.proto | 27 - .../envoy/type/matcher/regex.proto | 78 - .../envoy/type/matcher/string.proto | 79 - .../envoy/type/matcher/struct.proto | 84 - .../envoy/type/matcher/v3/BUILD | 14 - .../envoy/type/matcher/v3/http_inputs.proto | 57 - .../envoy/type/matcher/v3/metadata.proto | 107 - .../envoy/type/matcher/v3/node.proto | 28 - .../envoy/type/matcher/v3/number.proto | 32 - .../envoy/type/matcher/v3/path.proto | 30 - .../envoy/type/matcher/v3/regex.proto | 89 - .../envoy/type/matcher/v3/string.proto | 81 - .../envoy/type/matcher/v3/struct.proto | 90 - .../envoy/type/matcher/v3/value.proto | 71 - .../envoy/type/matcher/value.proto | 64 - .../envoy/type/metadata/v2/BUILD | 9 - .../envoy/type/metadata/v2/metadata.proto | 99 - .../envoy/type/metadata/v3/BUILD | 12 - .../envoy/type/metadata/v3/metadata.proto | 114 - generated_api_shadow/envoy/type/percent.proto | 51 - generated_api_shadow/envoy/type/range.proto | 42 - .../envoy/type/semantic_version.proto | 23 - .../envoy/type/token_bucket.proto | 35 - .../envoy/type/tracing/v2/BUILD | 12 - .../envoy/type/tracing/v2/custom_tag.proto | 86 - .../envoy/type/tracing/v3/BUILD | 13 - .../envoy/type/tracing/v3/custom_tag.proto | 101 - generated_api_shadow/envoy/type/v3/BUILD | 12 - .../envoy/type/v3/hash_policy.proto | 31 - generated_api_shadow/envoy/type/v3/http.proto | 23 - .../envoy/type/v3/http_status.proto | 142 -- .../envoy/type/v3/percent.proto | 56 - .../envoy/type/v3/range.proto | 49 - .../envoy/type/v3/ratelimit_unit.proto | 30 - .../envoy/type/v3/semantic_version.proto | 26 - .../envoy/type/v3/token_bucket.proto | 38 - .../envoy/watchdog/v3alpha/BUILD | 9 - .../envoy/watchdog/v3alpha/abort_action.proto | 27 - .../config/subscription_factory_impl.cc | 2 +- source/common/router/config_impl.cc | 1 - source/common/tcp_proxy/tcp_proxy.cc | 68 +- source/common/tcp_proxy/tcp_proxy.h | 15 +- .../filters/http/jwt_authn/matcher.cc | 1 - .../extensions/tracers/zipkin/span_buffer.cc | 2 +- source/server/config_validation/server.cc | 4 +- test/common/config/BUILD | 6 - test/common/config/api_shadow_test.cc | 20 - .../upstream/cluster_manager_impl_test.cc | 5 +- .../tracers/zipkin/span_buffer_test.cc | 2 +- .../http_subset_lb_integration_test.cc | 3 +- test/server/options_impl_test.cc | 4 +- test/server/server_fuzz_test.cc | 3 - test/tools/router_check/router.cc | 6 - test/tools/type_whisperer/api_type_db_test.cc | 10 +- tools/api_boost/README.md | 28 - tools/api_boost/api_boost.py | 202 -- tools/api_boost/api_boost_test.py | 94 - tools/api_boost/testdata/BUILD | 66 - tools/api_boost/testdata/decl_ref_expr.cc | 44 - .../api_boost/testdata/decl_ref_expr.cc.gold | 45 - tools/api_boost/testdata/deprecate.cc | 11 - tools/api_boost/testdata/deprecate.cc.gold | 11 - tools/api_boost/testdata/elaborated_type.cc | 10 - .../testdata/elaborated_type.cc.gold | 10 - tools/api_boost/testdata/no_boost_file.cc | 12 - .../api_boost/testdata/no_boost_file.cc.gold | 12 - tools/api_boost/testdata/rename.cc | 7 - tools/api_boost/testdata/rename.cc.gold | 7 - tools/api_boost/testdata/using_decl.cc | 11 - tools/api_boost/testdata/using_decl.cc.gold | 11 - tools/api_boost/testdata/validate.cc | 10 - tools/api_boost/testdata/validate.cc.gold | 10 - .../api_proto_breaking_change_detector/BUILD | 2 +- .../detector_test.py | 3 +- tools/api_proto_plugin/utils.py | 8 +- tools/clang_tools/api_booster/BUILD | 37 - tools/clang_tools/api_booster/main.cc | 598 ----- .../api_booster/proto_cxx_utils.cc | 102 - .../clang_tools/api_booster/proto_cxx_utils.h | 47 - .../api_booster/proto_cxx_utils_test.cc | 72 - tools/code_format/check_format.py | 35 +- tools/dependency/BUILD | 4 +- tools/dependency/exports.py | 2 +- tools/dependency/validate.py | 2 +- tools/docs/generate_api_rst.py | 2 +- tools/proto_format/proto_format.sh | 17 +- tools/proto_format/proto_sync.py | 48 +- tools/protodoc/BUILD | 2 +- tools/protoxform/BUILD | 34 +- tools/protoxform/merge_active_shadow.py | 239 -- tools/protoxform/merge_active_shadow_test.py | 590 ----- tools/protoxform/migrate.py | 276 --- tools/protoxform/protoxform.bzl | 6 +- tools/protoxform/protoxform.py | 21 +- tools/protoxform/protoxform_test.sh | 11 - tools/protoxform/protoxform_test_helper.py | 2 - tools/testdata/protoxform/BUILD | 11 - .../envoy/active_non_terminal/v2/BUILD | 13 - .../v2/active_non_terminal.proto | 13 - ...e_non_terminal.proto.active_or_frozen.gold | 17 - ...ajor_version_candidate.envoy_internal.gold | 22 - ...al.proto.next_major_version_candidate.gold | 22 - .../protoxform/envoy/active_terminal/v2/BUILD | 10 - .../active_terminal/v2/active_terminal.proto | 11 - ...ctive_terminal.proto.active_or_frozen.gold | 14 - ...ajor_version_candidate.envoy_internal.gold | 0 ...al.proto.next_major_version_candidate.gold | 0 .../testdata/protoxform/envoy/frozen/v2/BUILD | 16 - .../protoxform/envoy/frozen/v2/frozen.proto | 12 - .../v2/frozen.proto.active_or_frozen.gold | 16 - ...ajor_version_candidate.envoy_internal.gold | 21 - ...en.proto.next_major_version_candidate.gold | 21 - .../v2/frozen_versioned_deprecation.proto | 14 - ...ed_deprecation.proto.active_or_frozen.gold | 19 - ...ajor_version_candidate.envoy_internal.gold | 25 - ...on.proto.next_major_version_candidate.gold | 22 - .../testdata/protoxform/envoy/frozen/v3/BUILD | 13 - .../protoxform/envoy/frozen/v3/frozen.proto | 12 - .../v3/frozen.proto.active_or_frozen.gold | 16 - ...ajor_version_candidate.envoy_internal.gold | 0 ...en.proto.next_major_version_candidate.gold | 0 .../v3/frozen_versioned_deprecation.proto | 12 - ...ed_deprecation.proto.active_or_frozen.gold | 16 - ...ajor_version_candidate.envoy_internal.gold | 0 ...on.proto.next_major_version_candidate.gold | 0 tools/testdata/protoxform/envoy/v2/BUILD | 15 - ...ajor_version_candidate.envoy_internal.gold | 39 - ...ce.proto.next_major_version_candidate.gold | 39 - ...ajor_version_candidate.envoy_internal.gold | 39 - ...es.proto.next_major_version_candidate.gold | 39 - ...ajor_version_candidate.envoy_internal.gold | 25 - ...of.proto.next_major_version_candidate.gold | 25 - ...ajor_version_candidate.envoy_internal.gold | 25 - ...ve.proto.next_major_version_candidate.gold | 25 - ...ajor_version_candidate.envoy_internal.gold | 63 - ...le.proto.next_major_version_candidate.gold | 42 - tools/type_whisperer/BUILD | 6 +- .../file_descriptor_set_text.bzl | 2 +- tools/type_whisperer/proto_cc_source.bzl | 2 +- tools/type_whisperer/type_database.bzl | 2 +- tools/type_whisperer/typedb_gen.py | 12 +- 871 files changed, 81 insertions(+), 53462 deletions(-) delete mode 100644 generated_api_shadow/BUILD delete mode 100644 generated_api_shadow/README.md delete mode 100644 generated_api_shadow/bazel/BUILD delete mode 100644 generated_api_shadow/bazel/api_build_system.bzl delete mode 100644 generated_api_shadow/bazel/envoy_http_archive.bzl delete mode 100644 generated_api_shadow/bazel/external_deps.bzl delete mode 100644 generated_api_shadow/bazel/external_proto_deps.bzl delete mode 100644 generated_api_shadow/bazel/repositories.bzl delete mode 100644 generated_api_shadow/bazel/repository_locations.bzl delete mode 100644 generated_api_shadow/bazel/repository_locations_utils.bzl delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/squash.proto delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto delete mode 100644 generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/certs.proto delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/clusters.proto delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/config_dump.proto delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/listeners.proto delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/memory.proto delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/metrics.proto delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/server_info.proto delete mode 100644 generated_api_shadow/envoy/admin/v2alpha/tap.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/BUILD delete mode 100644 generated_api_shadow/envoy/admin/v3/certs.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/clusters.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/config_dump.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/init_dump.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/listeners.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/memory.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/metrics.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/mutex_stats.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/server_info.proto delete mode 100644 generated_api_shadow/envoy/admin/v3/tap.proto delete mode 100644 generated_api_shadow/envoy/annotations/BUILD delete mode 100644 generated_api_shadow/envoy/annotations/deprecation.proto delete mode 100644 generated_api_shadow/envoy/annotations/resource.proto delete mode 100644 generated_api_shadow/envoy/api/v2/BUILD delete mode 100644 generated_api_shadow/envoy/api/v2/README.md delete mode 100644 generated_api_shadow/envoy/api/v2/auth/BUILD delete mode 100644 generated_api_shadow/envoy/api/v2/auth/cert.proto delete mode 100644 generated_api_shadow/envoy/api/v2/auth/common.proto delete mode 100644 generated_api_shadow/envoy/api/v2/auth/secret.proto delete mode 100644 generated_api_shadow/envoy/api/v2/auth/tls.proto delete mode 100644 generated_api_shadow/envoy/api/v2/cds.proto delete mode 100644 generated_api_shadow/envoy/api/v2/cluster.proto delete mode 100644 generated_api_shadow/envoy/api/v2/cluster/BUILD delete mode 100644 generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto delete mode 100644 generated_api_shadow/envoy/api/v2/cluster/filter.proto delete mode 100644 generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/BUILD delete mode 100644 generated_api_shadow/envoy/api/v2/core/address.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/backoff.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/base.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/config_source.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/event_service_config.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/grpc_service.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/health_check.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/http_uri.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/protocol.proto delete mode 100644 generated_api_shadow/envoy/api/v2/core/socket_option.proto delete mode 100644 generated_api_shadow/envoy/api/v2/discovery.proto delete mode 100644 generated_api_shadow/envoy/api/v2/eds.proto delete mode 100644 generated_api_shadow/envoy/api/v2/endpoint.proto delete mode 100644 generated_api_shadow/envoy/api/v2/endpoint/BUILD delete mode 100644 generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto delete mode 100644 generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto delete mode 100644 generated_api_shadow/envoy/api/v2/endpoint/load_report.proto delete mode 100644 generated_api_shadow/envoy/api/v2/lds.proto delete mode 100644 generated_api_shadow/envoy/api/v2/listener.proto delete mode 100644 generated_api_shadow/envoy/api/v2/listener/BUILD delete mode 100644 generated_api_shadow/envoy/api/v2/listener/listener.proto delete mode 100644 generated_api_shadow/envoy/api/v2/listener/listener_components.proto delete mode 100644 generated_api_shadow/envoy/api/v2/listener/quic_config.proto delete mode 100644 generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto delete mode 100644 generated_api_shadow/envoy/api/v2/ratelimit/BUILD delete mode 100644 generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto delete mode 100644 generated_api_shadow/envoy/api/v2/rds.proto delete mode 100644 generated_api_shadow/envoy/api/v2/route.proto delete mode 100644 generated_api_shadow/envoy/api/v2/route/BUILD delete mode 100644 generated_api_shadow/envoy/api/v2/route/route.proto delete mode 100644 generated_api_shadow/envoy/api/v2/route/route_components.proto delete mode 100644 generated_api_shadow/envoy/api/v2/scoped_route.proto delete mode 100644 generated_api_shadow/envoy/api/v2/srds.proto delete mode 100644 generated_api_shadow/envoy/config/README.md delete mode 100644 generated_api_shadow/envoy/config/accesslog/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/accesslog/v2/als.proto delete mode 100644 generated_api_shadow/envoy/config/accesslog/v2/file.proto delete mode 100644 generated_api_shadow/envoy/config/accesslog/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto delete mode 100644 generated_api_shadow/envoy/config/bootstrap/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto delete mode 100644 generated_api_shadow/envoy/config/bootstrap/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto delete mode 100644 generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto delete mode 100644 generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto delete mode 100644 generated_api_shadow/envoy/config/cluster/redis/BUILD delete mode 100644 generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto delete mode 100644 generated_api_shadow/envoy/config/cluster/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto delete mode 100644 generated_api_shadow/envoy/config/cluster/v3/cluster.proto delete mode 100644 generated_api_shadow/envoy/config/cluster/v3/filter.proto delete mode 100644 generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto delete mode 100644 generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto delete mode 100644 generated_api_shadow/envoy/config/common/matcher/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto delete mode 100644 generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/core/v3/address.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/backoff.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/base.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/config_source.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/event_service_config.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/extension.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/grpc_service.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/health_check.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/http_uri.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/protocol.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/resolver.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/socket_option.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto delete mode 100644 generated_api_shadow/envoy/config/core/v3/udp_socket_config.proto delete mode 100644 generated_api_shadow/envoy/config/endpoint/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto delete mode 100644 generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto delete mode 100644 generated_api_shadow/envoy/config/endpoint/v3/load_report.proto delete mode 100644 generated_api_shadow/envoy/config/filter/README.md delete mode 100644 generated_api_shadow/envoy/config/filter/accesslog/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto delete mode 100644 generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto delete mode 100644 generated_api_shadow/envoy/config/filter/fault/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/fault/v2/fault.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/buffer/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/cache/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/compressor/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/cors/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/csrf/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/dynamo/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/ext_authz/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/fault/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/grpc_web/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/gzip/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/health_check/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/README.md delete mode 100644 generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/lua/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/on_demand/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/rate_limit/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/rbac/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/router/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/router/v2/router.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/squash/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/tap/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto delete mode 100644 generated_api_shadow/envoy/config/filter/http/transcoder/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto delete mode 100644 generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto delete mode 100644 generated_api_shadow/envoy/config/filter/listener/original_dst/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto delete mode 100644 generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto delete mode 100644 generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto delete mode 100644 generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/direct_response/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md delete mode 100644 generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/echo/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/ext_authz/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/rate_limit/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/rbac/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md delete mode 100644 generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto delete mode 100644 generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto delete mode 100644 generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto delete mode 100644 generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto delete mode 100644 generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto delete mode 100644 generated_api_shadow/envoy/config/grpc_credential/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto delete mode 100644 generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto delete mode 100644 generated_api_shadow/envoy/config/grpc_credential/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto delete mode 100644 generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto delete mode 100644 generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto delete mode 100644 generated_api_shadow/envoy/config/listener/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/listener/v2/api_listener.proto delete mode 100644 generated_api_shadow/envoy/config/listener/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/listener/v3/api_listener.proto delete mode 100644 generated_api_shadow/envoy/config/listener/v3/listener.proto delete mode 100644 generated_api_shadow/envoy/config/listener/v3/listener_components.proto delete mode 100644 generated_api_shadow/envoy/config/listener/v3/quic_config.proto delete mode 100644 generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto delete mode 100644 generated_api_shadow/envoy/config/metrics/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto delete mode 100644 generated_api_shadow/envoy/config/metrics/v2/stats.proto delete mode 100644 generated_api_shadow/envoy/config/metrics/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto delete mode 100644 generated_api_shadow/envoy/config/metrics/v3/stats.proto delete mode 100644 generated_api_shadow/envoy/config/overload/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/overload/v2alpha/overload.proto delete mode 100644 generated_api_shadow/envoy/config/overload/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/overload/v3/overload.proto delete mode 100644 generated_api_shadow/envoy/config/ratelimit/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/ratelimit/v2/rls.proto delete mode 100644 generated_api_shadow/envoy/config/ratelimit/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/ratelimit/v3/rls.proto delete mode 100644 generated_api_shadow/envoy/config/rbac/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/rbac/v2/rbac.proto delete mode 100644 generated_api_shadow/envoy/config/rbac/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/rbac/v3/rbac.proto delete mode 100644 generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto delete mode 100644 generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto delete mode 100644 generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto delete mode 100644 generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto delete mode 100644 generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto delete mode 100644 generated_api_shadow/envoy/config/retry/previous_priorities/BUILD delete mode 100644 generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto delete mode 100644 generated_api_shadow/envoy/config/route/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/route/v3/route.proto delete mode 100644 generated_api_shadow/envoy/config/route/v3/route_components.proto delete mode 100644 generated_api_shadow/envoy/config/route/v3/scoped_route.proto delete mode 100644 generated_api_shadow/envoy/config/tap/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/tap/v3/common.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/trace/v2/datadog.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v2/dynamic_ot.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v2/http_tracer.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v2/lightstep.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v2/opencensus.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v2/service.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v2/trace.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v2/zipkin.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/trace/v2alpha/xray.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/BUILD delete mode 100644 generated_api_shadow/envoy/config/trace/v3/datadog.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/http_tracer.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/lightstep.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/opencensus.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/service.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/skywalking.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/trace.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/xray.proto delete mode 100644 generated_api_shadow/envoy/config/trace/v3/zipkin.proto delete mode 100644 generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto delete mode 100644 generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/BUILD delete mode 100644 generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto delete mode 100644 generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto delete mode 100644 generated_api_shadow/envoy/data/accesslog/v2/BUILD delete mode 100644 generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto delete mode 100644 generated_api_shadow/envoy/data/accesslog/v3/BUILD delete mode 100644 generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto delete mode 100644 generated_api_shadow/envoy/data/cluster/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto delete mode 100644 generated_api_shadow/envoy/data/cluster/v3/BUILD delete mode 100644 generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto delete mode 100644 generated_api_shadow/envoy/data/core/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto delete mode 100644 generated_api_shadow/envoy/data/core/v3/BUILD delete mode 100644 generated_api_shadow/envoy/data/core/v3/health_check_event.proto delete mode 100644 generated_api_shadow/envoy/data/dns/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto delete mode 100644 generated_api_shadow/envoy/data/dns/v3/BUILD delete mode 100644 generated_api_shadow/envoy/data/dns/v3/dns_table.proto delete mode 100644 generated_api_shadow/envoy/data/tap/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/data/tap/v2alpha/common.proto delete mode 100644 generated_api_shadow/envoy/data/tap/v2alpha/http.proto delete mode 100644 generated_api_shadow/envoy/data/tap/v2alpha/transport.proto delete mode 100644 generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto delete mode 100644 generated_api_shadow/envoy/data/tap/v3/BUILD delete mode 100644 generated_api_shadow/envoy/data/tap/v3/common.proto delete mode 100644 generated_api_shadow/envoy/data/tap/v3/http.proto delete mode 100644 generated_api_shadow/envoy/data/tap/v3/transport.proto delete mode 100644 generated_api_shadow/envoy/data/tap/v3/wrapper.proto delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/stream/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/stream/v3/stream.proto delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto delete mode 100644 generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto delete mode 100644 generated_api_shadow/envoy/extensions/clusters/aggregate/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto delete mode 100644 generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto delete mode 100644 generated_api_shadow/envoy/extensions/clusters/redis/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto delete mode 100644 generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto delete mode 100644 generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto delete mode 100644 generated_api_shadow/envoy/extensions/common/matching/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto delete mode 100644 generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto delete mode 100644 generated_api_shadow/envoy/extensions/common/tap/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/common/tap/v3/common.proto delete mode 100644 generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/brotli.proto delete mode 100644 generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/brotli.proto delete mode 100644 generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto delete mode 100644 generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/common/dependency/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/common/dependency/v3/dependency.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/common/fault/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/skip_action.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/buffer/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/composite/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/composite/v3/composite.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/cors/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/csrf/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/fault/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/kill_request.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/original_src/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/rbac/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/router/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/set_metadata.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/tap/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/connection_limit.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/echo/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/rbac/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/router.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto delete mode 100644 generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto delete mode 100644 generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto delete mode 100644 generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto delete mode 100644 generated_api_shadow/envoy/extensions/health_checkers/redis/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/health_checkers/redis/v3/redis.proto delete mode 100644 generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/preserve_case.proto delete mode 100644 generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/custom_header.proto delete mode 100644 generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto delete mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto delete mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto delete mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto delete mode 100644 generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto delete mode 100644 generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/input.proto delete mode 100644 generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto delete mode 100644 generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto delete mode 100644 generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto delete mode 100644 generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/crypto_stream.proto delete mode 100644 generated_api_shadow/envoy/extensions/quic/proof_source/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/quic/proof_source/v3/proof_source.proto delete mode 100644 generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/expr.proto delete mode 100644 generated_api_shadow/envoy/extensions/request_id/uuid/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/request_id/uuid/v3/uuid.proto delete mode 100644 generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/fixed_heap.proto delete mode 100644 generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/injected_resource.proto delete mode 100644 generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/omit_canary_hosts.proto delete mode 100644 generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto delete mode 100644 generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/previous_hosts.proto delete mode 100644 generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto delete mode 100644 generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/graphite_statsd.proto delete mode 100644 generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/starttls.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto delete mode 100644 generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/generic_connection_pool.proto delete mode 100644 generated_api_shadow/envoy/extensions/wasm/v3/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto delete mode 100644 generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto delete mode 100644 generated_api_shadow/envoy/service/README.md delete mode 100644 generated_api_shadow/envoy/service/accesslog/v2/BUILD delete mode 100644 generated_api_shadow/envoy/service/accesslog/v2/als.proto delete mode 100644 generated_api_shadow/envoy/service/accesslog/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/accesslog/v3/als.proto delete mode 100644 generated_api_shadow/envoy/service/auth/v2/BUILD delete mode 100644 generated_api_shadow/envoy/service/auth/v2/attribute_context.proto delete mode 100644 generated_api_shadow/envoy/service/auth/v2/external_auth.proto delete mode 100644 generated_api_shadow/envoy/service/auth/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/service/auth/v2alpha/external_auth.proto delete mode 100644 generated_api_shadow/envoy/service/auth/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/auth/v3/attribute_context.proto delete mode 100644 generated_api_shadow/envoy/service/auth/v3/external_auth.proto delete mode 100644 generated_api_shadow/envoy/service/cluster/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/cluster/v3/cds.proto delete mode 100644 generated_api_shadow/envoy/service/discovery/v2/BUILD delete mode 100644 generated_api_shadow/envoy/service/discovery/v2/ads.proto delete mode 100644 generated_api_shadow/envoy/service/discovery/v2/hds.proto delete mode 100644 generated_api_shadow/envoy/service/discovery/v2/rtds.proto delete mode 100644 generated_api_shadow/envoy/service/discovery/v2/sds.proto delete mode 100644 generated_api_shadow/envoy/service/discovery/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/discovery/v3/ads.proto delete mode 100644 generated_api_shadow/envoy/service/discovery/v3/discovery.proto delete mode 100644 generated_api_shadow/envoy/service/endpoint/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/endpoint/v3/eds.proto delete mode 100644 generated_api_shadow/envoy/service/endpoint/v3/leds.proto delete mode 100644 generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto delete mode 100644 generated_api_shadow/envoy/service/event_reporting/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto delete mode 100644 generated_api_shadow/envoy/service/ext_proc/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto delete mode 100644 generated_api_shadow/envoy/service/extension/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/extension/v3/config_discovery.proto delete mode 100644 generated_api_shadow/envoy/service/health/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/health/v3/hds.proto delete mode 100644 generated_api_shadow/envoy/service/listener/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/listener/v3/lds.proto delete mode 100644 generated_api_shadow/envoy/service/load_stats/v2/BUILD delete mode 100644 generated_api_shadow/envoy/service/load_stats/v2/lrs.proto delete mode 100644 generated_api_shadow/envoy/service/load_stats/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/load_stats/v3/lrs.proto delete mode 100644 generated_api_shadow/envoy/service/metrics/v2/BUILD delete mode 100644 generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto delete mode 100644 generated_api_shadow/envoy/service/metrics/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto delete mode 100644 generated_api_shadow/envoy/service/ratelimit/v2/BUILD delete mode 100644 generated_api_shadow/envoy/service/ratelimit/v2/rls.proto delete mode 100644 generated_api_shadow/envoy/service/ratelimit/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/ratelimit/v3/rls.proto delete mode 100644 generated_api_shadow/envoy/service/route/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/route/v3/rds.proto delete mode 100644 generated_api_shadow/envoy/service/route/v3/srds.proto delete mode 100644 generated_api_shadow/envoy/service/runtime/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/runtime/v3/rtds.proto delete mode 100644 generated_api_shadow/envoy/service/secret/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/secret/v3/sds.proto delete mode 100644 generated_api_shadow/envoy/service/status/v2/BUILD delete mode 100644 generated_api_shadow/envoy/service/status/v2/csds.proto delete mode 100644 generated_api_shadow/envoy/service/status/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/status/v3/csds.proto delete mode 100644 generated_api_shadow/envoy/service/tap/v2alpha/BUILD delete mode 100644 generated_api_shadow/envoy/service/tap/v2alpha/common.proto delete mode 100644 generated_api_shadow/envoy/service/tap/v2alpha/tap.proto delete mode 100644 generated_api_shadow/envoy/service/tap/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/tap/v3/tap.proto delete mode 100644 generated_api_shadow/envoy/service/trace/v2/BUILD delete mode 100644 generated_api_shadow/envoy/service/trace/v2/trace_service.proto delete mode 100644 generated_api_shadow/envoy/service/trace/v3/BUILD delete mode 100644 generated_api_shadow/envoy/service/trace/v3/trace_service.proto delete mode 100644 generated_api_shadow/envoy/type/BUILD delete mode 100644 generated_api_shadow/envoy/type/hash_policy.proto delete mode 100644 generated_api_shadow/envoy/type/http.proto delete mode 100644 generated_api_shadow/envoy/type/http/v3/BUILD delete mode 100644 generated_api_shadow/envoy/type/http/v3/path_transformation.proto delete mode 100644 generated_api_shadow/envoy/type/http_status.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/BUILD delete mode 100644 generated_api_shadow/envoy/type/matcher/metadata.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/node.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/number.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/path.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/regex.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/string.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/struct.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/BUILD delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/http_inputs.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/metadata.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/node.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/number.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/path.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/regex.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/string.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/struct.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/v3/value.proto delete mode 100644 generated_api_shadow/envoy/type/matcher/value.proto delete mode 100644 generated_api_shadow/envoy/type/metadata/v2/BUILD delete mode 100644 generated_api_shadow/envoy/type/metadata/v2/metadata.proto delete mode 100644 generated_api_shadow/envoy/type/metadata/v3/BUILD delete mode 100644 generated_api_shadow/envoy/type/metadata/v3/metadata.proto delete mode 100644 generated_api_shadow/envoy/type/percent.proto delete mode 100644 generated_api_shadow/envoy/type/range.proto delete mode 100644 generated_api_shadow/envoy/type/semantic_version.proto delete mode 100644 generated_api_shadow/envoy/type/token_bucket.proto delete mode 100644 generated_api_shadow/envoy/type/tracing/v2/BUILD delete mode 100644 generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto delete mode 100644 generated_api_shadow/envoy/type/tracing/v3/BUILD delete mode 100644 generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto delete mode 100644 generated_api_shadow/envoy/type/v3/BUILD delete mode 100644 generated_api_shadow/envoy/type/v3/hash_policy.proto delete mode 100644 generated_api_shadow/envoy/type/v3/http.proto delete mode 100644 generated_api_shadow/envoy/type/v3/http_status.proto delete mode 100644 generated_api_shadow/envoy/type/v3/percent.proto delete mode 100644 generated_api_shadow/envoy/type/v3/range.proto delete mode 100644 generated_api_shadow/envoy/type/v3/ratelimit_unit.proto delete mode 100644 generated_api_shadow/envoy/type/v3/semantic_version.proto delete mode 100644 generated_api_shadow/envoy/type/v3/token_bucket.proto delete mode 100644 generated_api_shadow/envoy/watchdog/v3alpha/BUILD delete mode 100644 generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto delete mode 100644 test/common/config/api_shadow_test.cc delete mode 100644 tools/api_boost/README.md delete mode 100755 tools/api_boost/api_boost.py delete mode 100755 tools/api_boost/api_boost_test.py delete mode 100644 tools/api_boost/testdata/BUILD delete mode 100644 tools/api_boost/testdata/decl_ref_expr.cc delete mode 100644 tools/api_boost/testdata/decl_ref_expr.cc.gold delete mode 100644 tools/api_boost/testdata/deprecate.cc delete mode 100644 tools/api_boost/testdata/deprecate.cc.gold delete mode 100644 tools/api_boost/testdata/elaborated_type.cc delete mode 100644 tools/api_boost/testdata/elaborated_type.cc.gold delete mode 100644 tools/api_boost/testdata/no_boost_file.cc delete mode 100644 tools/api_boost/testdata/no_boost_file.cc.gold delete mode 100644 tools/api_boost/testdata/rename.cc delete mode 100644 tools/api_boost/testdata/rename.cc.gold delete mode 100644 tools/api_boost/testdata/using_decl.cc delete mode 100644 tools/api_boost/testdata/using_decl.cc.gold delete mode 100644 tools/api_boost/testdata/validate.cc delete mode 100644 tools/api_boost/testdata/validate.cc.gold delete mode 100644 tools/clang_tools/api_booster/BUILD delete mode 100644 tools/clang_tools/api_booster/main.cc delete mode 100644 tools/clang_tools/api_booster/proto_cxx_utils.cc delete mode 100644 tools/clang_tools/api_booster/proto_cxx_utils.h delete mode 100644 tools/clang_tools/api_booster/proto_cxx_utils_test.cc delete mode 100644 tools/protoxform/merge_active_shadow.py delete mode 100644 tools/protoxform/merge_active_shadow_test.py delete mode 100644 tools/protoxform/migrate.py delete mode 100644 tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD delete mode 100644 tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto delete mode 100644 tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold delete mode 100644 tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/active_terminal/v2/BUILD delete mode 100644 tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto delete mode 100644 tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold delete mode 100644 tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v2/BUILD delete mode 100644 tools/testdata/protoxform/envoy/frozen/v2/frozen.proto delete mode 100644 tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto delete mode 100644 tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.active_or_frozen.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v3/BUILD delete mode 100644 tools/testdata/protoxform/envoy/frozen/v3/frozen.proto delete mode 100644 tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto delete mode 100644 tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.active_or_frozen.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold delete mode 100644 tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold diff --git a/api/STYLE.md b/api/STYLE.md index b185be97c9687..485ab5757c6bd 100644 --- a/api/STYLE.md +++ b/api/STYLE.md @@ -144,9 +144,8 @@ To add an extension config to the API, the steps below should be followed: (`option (udpa.annotations.file_status).package_version_status = ACTIVE;`). This is required to automatically include the config proto in [api/versioning/BUILD](versioning/BUILD). 1. Add a reference to the v3 extension config in (1) in [api/versioning/BUILD](versioning/BUILD) under `active_protos`. -1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file, - reformat `foobar.proto` as needed and also generate the shadow API protos. -1. `git add api/ generated_api_shadow/` to add any new files to your Git index. +1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file and + reformat `foobar.proto` as needed. ## API annotations diff --git a/api/tools/generate_listeners_test.py b/api/tools/generate_listeners_test.py index f67ef4bbb5aab..1defb3f666986 100644 --- a/api/tools/generate_listeners_test.py +++ b/api/tools/generate_listeners_test.py @@ -5,7 +5,7 @@ import generate_listeners if __name__ == "__main__": - srcdir = os.path.join(os.getenv("TEST_SRCDIR"), 'envoy_api_canonical') + srcdir = os.path.join(os.getenv("TEST_SRCDIR"), 'envoy_api') generate_listeners.generate_listeners( os.path.join(srcdir, "examples/service_envoy/listeners.pb"), "/dev/stdout", "/dev/stdout", iter([os.path.join(srcdir, "examples/service_envoy/http_connection_manager.pb")])) diff --git a/api/tools/tap2pcap.py b/api/tools/tap2pcap.py index 93a8610399285..bcb13fdf9a093 100644 --- a/api/tools/tap2pcap.py +++ b/api/tools/tap2pcap.py @@ -8,7 +8,7 @@ Usage: -bazel run @envoy_api_canonical//tools:tap2pcap +bazel run @envoy_api//tools:tap2pcap Known issues: - IPv6 PCAP generation has malformed TCP packets. This appears to be a text2pcap diff --git a/api/tools/tap2pcap_test.py b/api/tools/tap2pcap_test.py index fd13cf32ff694..c0151846f5e18 100644 --- a/api/tools/tap2pcap_test.py +++ b/api/tools/tap2pcap_test.py @@ -11,7 +11,7 @@ # a golden output file for the tshark dump. Since we run tap2pcap in a # subshell with a limited environment, the inferred time zone should be UTC. if __name__ == '__main__': - srcdir = os.path.join(os.getenv('TEST_SRCDIR'), 'envoy_api_canonical') + srcdir = os.path.join(os.getenv('TEST_SRCDIR'), 'envoy_api') tap_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.pb_text') expected_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.txt') pcap_path = os.path.join(os.getenv('TEST_TMPDIR'), 'generated.pcap') diff --git a/bazel/api_binding.bzl b/bazel/api_binding.bzl index 362e1803a1ef8..8d46d4c1827b8 100644 --- a/bazel/api_binding.bzl +++ b/bazel/api_binding.bzl @@ -24,14 +24,9 @@ _default_envoy_api = repository_rule( def envoy_api_binding(): # Treat the data plane API as an external repo, this simplifies exporting - # the API to https://github.com/envoyproxy/data-plane-api. This is the - # shadow API for Envoy internal use, see #9479. + # the API to https://github.com/envoyproxy/data-plane-api. if "envoy_api" not in native.existing_rules().keys(): - _default_envoy_api(name = "envoy_api", reldir = "generated_api_shadow") - - # We also provide the non-shadowed API for developer use (see #9479). - if "envoy_api_raw" not in native.existing_rules().keys(): - _default_envoy_api(name = "envoy_api_canonical", reldir = "api") + _default_envoy_api(name = "envoy_api", reldir = "api") # TODO(https://github.com/envoyproxy/envoy/issues/7719) need to remove both bindings and use canonical rules native.bind( diff --git a/ci/do_ci.sh b/ci/do_ci.sh index d507d36993da0..7f10e6612ea02 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -368,14 +368,11 @@ elif [[ "$CI_TARGET" == "bazel.api" ]]; then "${ENVOY_SRCDIR}"/tools/api/validate_structure.py echo "Validate Golang protobuf generation..." "${ENVOY_SRCDIR}"/tools/api/generate_go_protobuf.py - echo "Testing API and API Boosting..." - bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \ - @envoy_api_canonical//tools:tap2pcap_test @envoy_dev//clang_tools/api_booster/... + echo "Testing API..." + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api//test/... @envoy_api//tools/... \ + @envoy_api//tools:tap2pcap_test echo "Building API..." - bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//envoy/... - echo "Testing API boosting (golden C++ tests)..." - # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet. - BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" python3.8 "${ENVOY_SRCDIR}"/tools/api_boost/api_boost_test.py + bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api//envoy/... exit 0 elif [[ "$CI_TARGET" == "bazel.api_compat" ]]; then echo "Building buf..." @@ -481,9 +478,6 @@ elif [[ "$CI_TARGET" == "tooling" ]]; then echo "Run protoxform test" BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" ./tools/protoxform/protoxform_test.sh - echo "Run merge active shadow test" - bazel test "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:merge_active_shadow_test - echo "check_format_test..." "${ENVOY_SRCDIR}"/tools/code_format/check_format_test_helper.sh --log=WARN diff --git a/docs/BUILD b/docs/BUILD index 51f3e2b4a1cd3..ca6cff03defa8 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -132,8 +132,8 @@ genrule( name = "external_deps_rst", srcs = [ "//bazel:repository_locations.bzl", - "@envoy_api_canonical//bazel:repository_locations.bzl", - "@envoy_api_canonical//bazel:repository_locations_utils.bzl", + "@envoy_api//bazel:repository_locations.bzl", + "@envoy_api//bazel:repository_locations_utils.bzl", ], outs = ["external_deps_rst.tar"], cmd = "$(location //tools/docs:generate_external_deps_rst) $@", @@ -142,8 +142,8 @@ genrule( genquery( name = "v3_proto_srcs", - expression = "labels(srcs, labels(deps, @envoy_api_canonical//:v3_protos))", - scope = ["@envoy_api_canonical//:v3_protos"], + expression = "labels(srcs, labels(deps, @envoy_api//:v3_protos))", + scope = ["@envoy_api//:v3_protos"], ) genrule( diff --git a/docs/root/operations/traffic_tapping.rst b/docs/root/operations/traffic_tapping.rst index 9c95d7fad5e36..164b355f93f73 100644 --- a/docs/root/operations/traffic_tapping.rst +++ b/docs/root/operations/traffic_tapping.rst @@ -113,7 +113,7 @@ analysis with tools such as `Wireshark `_ with the .. code-block:: bash - bazel run @envoy_api_canonical//tools:tap2pcap /some/tap/path_0.pb path_0.pcap + bazel run @envoy_api//tools:tap2pcap /some/tap/path_0.pb path_0.pcap tshark -r path_0.pcap -d "tcp.port==10000,http2" -P 1 0.000000 127.0.0.1 → 127.0.0.1 HTTP2 157 Magic, SETTINGS, WINDOW_UPDATE, HEADERS 2 0.013713 127.0.0.1 → 127.0.0.1 HTTP2 91 SETTINGS, SETTINGS, WINDOW_UPDATE diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD deleted file mode 100644 index 93f9184a2b400..0000000000000 --- a/generated_api_shadow/BUILD +++ /dev/null @@ -1,268 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "v2_protos", - visibility = ["//visibility:public"], - deps = [ - "//envoy/admin/v2alpha:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/auth:pkg", - "//envoy/api/v2/cluster:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/endpoint:pkg", - "//envoy/api/v2/listener:pkg", - "//envoy/api/v2/ratelimit:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/config/bootstrap/v2:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/config/filter/fault/v2:pkg", - "//envoy/config/filter/network/http_connection_manager/v2:pkg", - "//envoy/config/filter/network/redis_proxy/v2:pkg", - "//envoy/config/filter/network/tcp_proxy/v2:pkg", - "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", - "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/health_checker/redis/v2:pkg", - "//envoy/config/listener/v2:pkg", - "//envoy/config/metrics/v2:pkg", - "//envoy/config/overload/v2alpha:pkg", - "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", - "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", - "//envoy/config/retry/omit_canary_hosts/v2:pkg", - "//envoy/config/retry/previous_hosts/v2:pkg", - "//envoy/config/trace/v2:pkg", - "//envoy/config/trace/v2alpha:pkg", - "//envoy/config/transport_socket/alts/v2alpha:pkg", - "//envoy/data/accesslog/v2:pkg", - "//envoy/data/tap/v2alpha:pkg", - "//envoy/service/accesslog/v2:pkg", - "//envoy/service/auth/v2:pkg", - "//envoy/service/discovery/v2:pkg", - "//envoy/service/load_stats/v2:pkg", - "//envoy/service/metrics/v2:pkg", - "//envoy/service/ratelimit/v2:pkg", - "//envoy/service/status/v2:pkg", - "//envoy/service/tap/v2alpha:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "//envoy/type/metadata/v2:pkg", - "//envoy/type/tracing/v2:pkg", - ], -) - -proto_library( - name = "v3_protos", - visibility = ["//visibility:public"], - deps = [ - "//contrib/envoy/extensions/filters/http/squash/v3:pkg", - "//contrib/envoy/extensions/filters/http/sxg/v3alpha:pkg", - "//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg", - "//contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha:pkg", - "//contrib/envoy/extensions/filters/network/mysql_proxy/v3:pkg", - "//contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", - "//contrib/envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", - "//envoy/admin/v3:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/bootstrap/v3:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/common/matcher/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "//envoy/config/grpc_credential/v3:pkg", - "//envoy/config/health_checker/redis/v2:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/config/metrics/v3:pkg", - "//envoy/config/overload/v3:pkg", - "//envoy/config/ratelimit/v3:pkg", - "//envoy/config/rbac/v3:pkg", - "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", - "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", - "//envoy/config/retry/omit_canary_hosts/v2:pkg", - "//envoy/config/retry/previous_hosts/v2:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/config/tap/v3:pkg", - "//envoy/config/trace/v3:pkg", - "//envoy/data/accesslog/v3:pkg", - "//envoy/data/cluster/v3:pkg", - "//envoy/data/core/v3:pkg", - "//envoy/data/dns/v3:pkg", - "//envoy/data/tap/v3:pkg", - "//envoy/extensions/access_loggers/file/v3:pkg", - "//envoy/extensions/access_loggers/grpc/v3:pkg", - "//envoy/extensions/access_loggers/open_telemetry/v3alpha:pkg", - "//envoy/extensions/access_loggers/stream/v3:pkg", - "//envoy/extensions/access_loggers/wasm/v3:pkg", - "//envoy/extensions/cache/simple_http_cache/v3alpha:pkg", - "//envoy/extensions/clusters/aggregate/v3:pkg", - "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/clusters/redis/v3:pkg", - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", - "//envoy/extensions/common/matching/v3:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "//envoy/extensions/common/tap/v3:pkg", - "//envoy/extensions/compression/brotli/compressor/v3:pkg", - "//envoy/extensions/compression/brotli/decompressor/v3:pkg", - "//envoy/extensions/compression/gzip/compressor/v3:pkg", - "//envoy/extensions/compression/gzip/decompressor/v3:pkg", - "//envoy/extensions/filters/common/dependency/v3:pkg", - "//envoy/extensions/filters/common/fault/v3:pkg", - "//envoy/extensions/filters/common/matcher/action/v3:pkg", - "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", - "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", - "//envoy/extensions/filters/http/alternate_protocols_cache/v3:pkg", - "//envoy/extensions/filters/http/aws_lambda/v3:pkg", - "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", - "//envoy/extensions/filters/http/bandwidth_limit/v3alpha:pkg", - "//envoy/extensions/filters/http/buffer/v3:pkg", - "//envoy/extensions/filters/http/cache/v3alpha:pkg", - "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", - "//envoy/extensions/filters/http/composite/v3:pkg", - "//envoy/extensions/filters/http/compressor/v3:pkg", - "//envoy/extensions/filters/http/cors/v3:pkg", - "//envoy/extensions/filters/http/csrf/v3:pkg", - "//envoy/extensions/filters/http/decompressor/v3:pkg", - "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/filters/http/dynamo/v3:pkg", - "//envoy/extensions/filters/http/ext_authz/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", - "//envoy/extensions/filters/http/fault/v3:pkg", - "//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg", - "//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg", - "//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg", - "//envoy/extensions/filters/http/grpc_stats/v3:pkg", - "//envoy/extensions/filters/http/grpc_web/v3:pkg", - "//envoy/extensions/filters/http/gzip/v3:pkg", - "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", - "//envoy/extensions/filters/http/health_check/v3:pkg", - "//envoy/extensions/filters/http/ip_tagging/v3:pkg", - "//envoy/extensions/filters/http/jwt_authn/v3:pkg", - "//envoy/extensions/filters/http/kill_request/v3:pkg", - "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", - "//envoy/extensions/filters/http/lua/v3:pkg", - "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", - "//envoy/extensions/filters/http/on_demand/v3:pkg", - "//envoy/extensions/filters/http/original_src/v3:pkg", - "//envoy/extensions/filters/http/ratelimit/v3:pkg", - "//envoy/extensions/filters/http/rbac/v3:pkg", - "//envoy/extensions/filters/http/router/v3:pkg", - "//envoy/extensions/filters/http/set_metadata/v3:pkg", - "//envoy/extensions/filters/http/tap/v3:pkg", - "//envoy/extensions/filters/http/wasm/v3:pkg", - "//envoy/extensions/filters/listener/http_inspector/v3:pkg", - "//envoy/extensions/filters/listener/original_dst/v3:pkg", - "//envoy/extensions/filters/listener/original_src/v3:pkg", - "//envoy/extensions/filters/listener/proxy_protocol/v3:pkg", - "//envoy/extensions/filters/listener/tls_inspector/v3:pkg", - "//envoy/extensions/filters/network/client_ssl_auth/v3:pkg", - "//envoy/extensions/filters/network/connection_limit/v3:pkg", - "//envoy/extensions/filters/network/direct_response/v3:pkg", - "//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg", - "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", - "//envoy/extensions/filters/network/echo/v3:pkg", - "//envoy/extensions/filters/network/ext_authz/v3:pkg", - "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", - "//envoy/extensions/filters/network/local_ratelimit/v3:pkg", - "//envoy/extensions/filters/network/mongo_proxy/v3:pkg", - "//envoy/extensions/filters/network/ratelimit/v3:pkg", - "//envoy/extensions/filters/network/rbac/v3:pkg", - "//envoy/extensions/filters/network/redis_proxy/v3:pkg", - "//envoy/extensions/filters/network/sni_cluster/v3:pkg", - "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", - "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", - "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", - "//envoy/extensions/filters/network/thrift_proxy/router/v3:pkg", - "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", - "//envoy/extensions/filters/network/wasm/v3:pkg", - "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", - "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", - "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", - "//envoy/extensions/formatter/metadata/v3:pkg", - "//envoy/extensions/formatter/req_without_query/v3:pkg", - "//envoy/extensions/health_checkers/redis/v3:pkg", - "//envoy/extensions/http/header_formatters/preserve_case/v3:pkg", - "//envoy/extensions/http/original_ip_detection/custom_header/v3:pkg", - "//envoy/extensions/http/original_ip_detection/xff/v3:pkg", - "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", - "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", - "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", - "//envoy/extensions/key_value/file_based/v3:pkg", - "//envoy/extensions/matching/common_inputs/environment_variable/v3:pkg", - "//envoy/extensions/matching/input_matchers/consistent_hashing/v3:pkg", - "//envoy/extensions/matching/input_matchers/ip/v3:pkg", - "//envoy/extensions/network/socket_interface/v3:pkg", - "//envoy/extensions/quic/crypto_stream/v3:pkg", - "//envoy/extensions/quic/proof_source/v3:pkg", - "//envoy/extensions/rate_limit_descriptors/expr/v3:pkg", - "//envoy/extensions/request_id/uuid/v3:pkg", - "//envoy/extensions/resource_monitors/fixed_heap/v3:pkg", - "//envoy/extensions/resource_monitors/injected_resource/v3:pkg", - "//envoy/extensions/retry/host/omit_canary_hosts/v3:pkg", - "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", - "//envoy/extensions/retry/host/previous_hosts/v3:pkg", - "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", - "//envoy/extensions/stat_sinks/graphite_statsd/v3:pkg", - "//envoy/extensions/stat_sinks/wasm/v3:pkg", - "//envoy/extensions/transport_sockets/alts/v3:pkg", - "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", - "//envoy/extensions/transport_sockets/quic/v3:pkg", - "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/s2a/v3alpha:pkg", - "//envoy/extensions/transport_sockets/starttls/v3:pkg", - "//envoy/extensions/transport_sockets/tap/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/extensions/upstreams/http/generic/v3:pkg", - "//envoy/extensions/upstreams/http/http/v3:pkg", - "//envoy/extensions/upstreams/http/tcp/v3:pkg", - "//envoy/extensions/upstreams/http/v3:pkg", - "//envoy/extensions/upstreams/tcp/generic/v3:pkg", - "//envoy/extensions/wasm/v3:pkg", - "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", - "//envoy/service/accesslog/v3:pkg", - "//envoy/service/auth/v3:pkg", - "//envoy/service/cluster/v3:pkg", - "//envoy/service/discovery/v3:pkg", - "//envoy/service/endpoint/v3:pkg", - "//envoy/service/event_reporting/v3:pkg", - "//envoy/service/ext_proc/v3alpha:pkg", - "//envoy/service/extension/v3:pkg", - "//envoy/service/health/v3:pkg", - "//envoy/service/listener/v3:pkg", - "//envoy/service/load_stats/v3:pkg", - "//envoy/service/metrics/v3:pkg", - "//envoy/service/ratelimit/v3:pkg", - "//envoy/service/route/v3:pkg", - "//envoy/service/runtime/v3:pkg", - "//envoy/service/secret/v3:pkg", - "//envoy/service/status/v3:pkg", - "//envoy/service/tap/v3:pkg", - "//envoy/service/trace/v3:pkg", - "//envoy/type/http/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/metadata/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "//envoy/watchdog/v3alpha:pkg", - ], -) - -proto_library( - name = "all_protos", - visibility = ["//visibility:public"], - deps = [ - ":v2_protos", - ":v3_protos", - ], -) - -filegroup( - name = "proto_breaking_change_detector_buf_config", - srcs = [ - "buf.yaml", - ], - visibility = ["//visibility:public"], -) diff --git a/generated_api_shadow/README.md b/generated_api_shadow/README.md deleted file mode 100644 index 04633c218a7c4..0000000000000 --- a/generated_api_shadow/README.md +++ /dev/null @@ -1,6 +0,0 @@ -This directory is for generated Envoy internal artifacts (via `proto_format`). - -Do not hand edit any file under `envoy/`. This shadow API may only be used in -the Envoy source tree. - -The `bazel/` tree is a symlink back to the official API Bazel rules. diff --git a/generated_api_shadow/bazel/BUILD b/generated_api_shadow/bazel/BUILD deleted file mode 100644 index 0e5c8aea75b01..0000000000000 --- a/generated_api_shadow/bazel/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") - -licenses(["notice"]) # Apache 2 - -exports_files([ - "repository_locations.bzl", - "repository_locations_utils.bzl", -]) - -go_proto_compiler( - name = "pgv_plugin_go", - options = ["lang=go"], - plugin = "@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate", - suffix = ".pb.validate.go", - valid_archive = False, - visibility = ["//visibility:public"], -) diff --git a/generated_api_shadow/bazel/api_build_system.bzl b/generated_api_shadow/bazel/api_build_system.bzl deleted file mode 100644 index 8a0e0bf71021e..0000000000000 --- a/generated_api_shadow/bazel/api_build_system.bzl +++ /dev/null @@ -1,199 +0,0 @@ -load("@rules_cc//cc:defs.bzl", "cc_test") -load("@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library") -load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library") -load("@com_google_protobuf//:protobuf.bzl", _py_proto_library = "py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") -load("@io_bazel_rules_go//go:def.bzl", "go_test") -load("@rules_proto//proto:defs.bzl", "proto_library") -load( - "//bazel:external_proto_deps.bzl", - "EXTERNAL_PROTO_CC_BAZEL_DEP_MAP", - "EXTERNAL_PROTO_GO_BAZEL_DEP_MAP", - "EXTERNAL_PROTO_PY_BAZEL_DEP_MAP", -) - -_PY_PROTO_SUFFIX = "_py_proto" -_CC_PROTO_SUFFIX = "_cc_proto" -_CC_GRPC_SUFFIX = "_cc_grpc" -_GO_PROTO_SUFFIX = "_go_proto" -_GO_IMPORTPATH_PREFIX = "github.com/envoyproxy/go-control-plane/" - -_COMMON_PROTO_DEPS = [ - "@com_google_protobuf//:any_proto", - "@com_google_protobuf//:descriptor_proto", - "@com_google_protobuf//:duration_proto", - "@com_google_protobuf//:empty_proto", - "@com_google_protobuf//:struct_proto", - "@com_google_protobuf//:timestamp_proto", - "@com_google_protobuf//:wrappers_proto", - "@com_google_googleapis//google/api:http_proto", - "@com_google_googleapis//google/api:httpbody_proto", - "@com_google_googleapis//google/api:annotations_proto", - "@com_google_googleapis//google/rpc:status_proto", - "@com_envoyproxy_protoc_gen_validate//validate:validate_proto", -] - -def _proto_mapping(dep, proto_dep_map, proto_suffix): - mapped = proto_dep_map.get(dep) - if mapped == None: - prefix = "@" + Label(dep).workspace_name if not dep.startswith("//") else "" - return prefix + "//" + Label(dep).package + ":" + Label(dep).name + proto_suffix - return mapped - -def _go_proto_mapping(dep): - return _proto_mapping(dep, EXTERNAL_PROTO_GO_BAZEL_DEP_MAP, _GO_PROTO_SUFFIX) - -def _cc_proto_mapping(dep): - return _proto_mapping(dep, EXTERNAL_PROTO_CC_BAZEL_DEP_MAP, _CC_PROTO_SUFFIX) - -def _py_proto_mapping(dep): - return _proto_mapping(dep, EXTERNAL_PROTO_PY_BAZEL_DEP_MAP, _PY_PROTO_SUFFIX) - -# TODO(htuch): Convert this to native py_proto_library once -# https://github.com/bazelbuild/bazel/issues/3935 and/or -# https://github.com/bazelbuild/bazel/issues/2626 are resolved. -def _api_py_proto_library(name, srcs = [], deps = []): - _py_proto_library( - name = name + _PY_PROTO_SUFFIX, - srcs = srcs, - default_runtime = "@com_google_protobuf//:protobuf_python", - protoc = "@com_google_protobuf//:protoc", - deps = [_py_proto_mapping(dep) for dep in deps] + [ - "@com_envoyproxy_protoc_gen_validate//validate:validate_py", - "@com_google_googleapis//google/rpc:status_py_proto", - "@com_google_googleapis//google/api:annotations_py_proto", - "@com_google_googleapis//google/api:http_py_proto", - "@com_google_googleapis//google/api:httpbody_py_proto", - ], - visibility = ["//visibility:public"], - ) - -# This defines googleapis py_proto_library. The repository does not provide its definition and requires -# overriding it in the consuming project (see https://github.com/grpc/grpc/issues/19255 for more details). -def py_proto_library(name, deps = [], plugin = None): - srcs = [dep[:-6] + ".proto" if dep.endswith("_proto") else dep for dep in deps] - proto_deps = [] - - # py_proto_library in googleapis specifies *_proto rules in dependencies. - # By rewriting *_proto to *.proto above, the dependencies in *_proto rules are not preserved. - # As a workaround, manually specify the proto dependencies for the imported python rules. - if name == "annotations_py_proto": - proto_deps = proto_deps + [":http_py_proto"] - - # checked.proto depends on syntax.proto, we have to add this dependency manually as well. - if name == "checked_py_proto": - proto_deps = proto_deps + [":syntax_py_proto"] - - # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0: - # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72. - # plugin should also be passed in here when gRPC version is greater than v1.25.x. - _py_proto_library( - name = name, - srcs = srcs, - default_runtime = "@com_google_protobuf//:protobuf_python", - protoc = "@com_google_protobuf//:protoc", - deps = proto_deps + ["@com_google_protobuf//:protobuf_python"], - visibility = ["//visibility:public"], - ) - -def _api_cc_grpc_library(name, proto, deps = []): - cc_grpc_library( - name = name, - srcs = [proto], - deps = deps, - proto_only = False, - grpc_only = True, - visibility = ["//visibility:public"], - ) - -def api_cc_py_proto_library( - name, - visibility = ["//visibility:private"], - srcs = [], - deps = [], - linkstatic = 0, - has_services = 0): - relative_name = ":" + name - proto_library( - name = name, - srcs = srcs, - deps = deps + _COMMON_PROTO_DEPS, - visibility = visibility, - ) - cc_proto_library_name = name + _CC_PROTO_SUFFIX - pgv_cc_proto_library( - name = cc_proto_library_name, - linkstatic = linkstatic, - cc_deps = [_cc_proto_mapping(dep) for dep in deps] + [ - "@com_google_googleapis//google/api:http_cc_proto", - "@com_google_googleapis//google/api:httpbody_cc_proto", - "@com_google_googleapis//google/api:annotations_cc_proto", - "@com_google_googleapis//google/rpc:status_cc_proto", - ], - deps = [relative_name], - visibility = ["//visibility:public"], - ) - _api_py_proto_library(name, srcs, deps) - - # Optionally define gRPC services - if has_services: - # TODO: when Python services are required, add to the below stub generations. - cc_grpc_name = name + _CC_GRPC_SUFFIX - cc_proto_deps = [cc_proto_library_name] + [_cc_proto_mapping(dep) for dep in deps] - _api_cc_grpc_library(name = cc_grpc_name, proto = relative_name, deps = cc_proto_deps) - -def api_cc_test(name, **kwargs): - cc_test( - name = name, - **kwargs - ) - -def api_go_test(name, **kwargs): - go_test( - name = name, - **kwargs - ) - -def api_proto_package( - name = "pkg", - srcs = [], - deps = [], - has_services = False, - visibility = ["//visibility:public"]): - if srcs == []: - srcs = native.glob(["*.proto"]) - - name = "pkg" - api_cc_py_proto_library( - name = name, - visibility = visibility, - srcs = srcs, - deps = deps, - has_services = has_services, - ) - - compilers = ["@io_bazel_rules_go//proto:go_proto", "@envoy_api//bazel:pgv_plugin_go"] - if has_services: - compilers = ["@io_bazel_rules_go//proto:go_grpc", "@envoy_api//bazel:pgv_plugin_go"] - - # Because RBAC proro depends on googleapis syntax.proto and checked.proto, - # which share the same go proto library, it causes duplicative dependencies. - # Thus, we use depset().to_list() to remove duplicated depenencies. - go_proto_library( - name = name + _GO_PROTO_SUFFIX, - compilers = compilers, - importpath = _GO_IMPORTPATH_PREFIX + native.package_name(), - proto = name, - visibility = ["//visibility:public"], - deps = depset([_go_proto_mapping(dep) for dep in deps] + [ - "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/api:annotations_go_proto", - "@go_googleapis//google/rpc:status_go_proto", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - ]).to_list(), - ) diff --git a/generated_api_shadow/bazel/envoy_http_archive.bzl b/generated_api_shadow/bazel/envoy_http_archive.bzl deleted file mode 100644 index 15fd65b2af278..0000000000000 --- a/generated_api_shadow/bazel/envoy_http_archive.bzl +++ /dev/null @@ -1,22 +0,0 @@ -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -def envoy_http_archive(name, locations, **kwargs): - # `existing_rule_keys` contains the names of repositories that have already - # been defined in the Bazel workspace. By skipping repos with existing keys, - # users can override dependency versions by using standard Bazel repository - # rules in their WORKSPACE files. - existing_rule_keys = native.existing_rules().keys() - if name in existing_rule_keys: - # This repository has already been defined, probably because the user - # wants to override the version. Do nothing. - return - location = locations[name] - - # HTTP tarball at a given URL. Add a BUILD file if requested. - http_archive( - name = name, - urls = location["urls"], - sha256 = location["sha256"], - strip_prefix = location.get("strip_prefix", ""), - **kwargs - ) diff --git a/generated_api_shadow/bazel/external_deps.bzl b/generated_api_shadow/bazel/external_deps.bzl deleted file mode 100644 index e8283e4fee106..0000000000000 --- a/generated_api_shadow/bazel/external_deps.bzl +++ /dev/null @@ -1,143 +0,0 @@ -load("@envoy_api//bazel:repository_locations_utils.bzl", "load_repository_locations_spec") - -# Envoy dependencies may be annotated with the following attributes: -DEPENDENCY_ANNOTATIONS = [ - # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID - # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See - # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements - # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned. - # This attribute is optional for components with use categories listed in the - # USE_CATEGORIES_WITH_CPE_OPTIONAL - "cpe", - - # A list of extensions when 'use_category' contains 'dataplane_ext' or 'observability_ext'. - "extensions", - - # Additional dependencies loaded transitively via this dependency that are not tracked in - # Envoy (see the external dependency at the given version for information). - "implied_untracked_deps", - - # Project metadata. - "project_desc", - "project_name", - "project_url", - - # Reflects the UTC date (YYYY-MM-DD format) for the dependency release. This - # is when the dependency was updated in its repository. For dependencies - # that have releases, this is the date of the release. For dependencies - # without releases or for scenarios where we temporarily need to use a - # commit, this date should be the date of the commit in UTC. - "release_date", - - # List of the categories describing how the dependency is being used. This attribute is used - # for automatic tracking of security posture of Envoy's dependencies. - # Possible values are documented in the USE_CATEGORIES list below. - # This attribute is mandatory for each dependecy. - "use_category", - - # The dependency version. This may be either a tagged release (preferred) - # or git SHA (as an exception when no release tagged version is suitable). - "version", -] - -# NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed -# to be declared. -USE_CATEGORIES = [ - # This dependency is used in API protos. - "api", - # This dependency is used in build process. - "build", - # This dependency is used to process xDS requests. - "controlplane", - # This dependency is used in processing downstream or upstream requests (core). - "dataplane_core", - # This dependency is used in processing downstream or upstream requests (extensions). - "dataplane_ext", - # This dependecy is used for logging, metrics or tracing (core). It may process unstrusted input. - "observability_core", - # This dependecy is used for logging, metrics or tracing (extensions). It may process unstrusted input. - "observability_ext", - # This dependency does not handle untrusted data and is used for various utility purposes. - "other", - # This dependency is used only in tests. - "test_only", - # Documentation generation - "docs", - # Developer tools (not used in build or docs) - "devtools", -] - -# Components with these use categories are not required to specify the 'cpe'. -USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only", "api"] - -def _fail_missing_attribute(attr, key): - fail("The '%s' attribute must be defined for external dependecy " % attr + key) - -# Method for verifying content of the repository location specifications. -# -# We also remove repository metadata attributes so that further consumers, e.g. -# http_archive, are not confused by them. -def load_repository_locations(repository_locations_spec): - locations = {} - for key, location in load_repository_locations_spec(repository_locations_spec).items(): - mutable_location = dict(location) - locations[key] = mutable_location - - if "sha256" not in location or len(location["sha256"]) == 0: - _fail_missing_attribute("sha256", key) - - if "project_name" not in location: - _fail_missing_attribute("project_name", key) - - if "project_desc" not in location: - _fail_missing_attribute("project_desc", key) - - if "project_url" not in location: - _fail_missing_attribute("project_url", key) - project_url = location["project_url"] - if not project_url.startswith("https://") and not project_url.startswith("http://"): - fail("project_url must start with https:// or http://: " + project_url) - - if "version" not in location: - _fail_missing_attribute("version", key) - - if "use_category" not in location: - _fail_missing_attribute("use_category", key) - use_category = location["use_category"] - - if "dataplane_ext" in use_category or "observability_ext" in use_category: - if "extensions" not in location: - _fail_missing_attribute("extensions", key) - - if "release_date" not in location: - _fail_missing_attribute("release_date", key) - release_date = location["release_date"] - - # Starlark doesn't have regexes. - if len(release_date) != 10 or release_date[4] != "-" or release_date[7] != "-": - fail("release_date must match YYYY-DD-MM: " + release_date) - - if "cpe" in location: - cpe = location["cpe"] - - # Starlark doesn't have regexes. - cpe_components = len(cpe.split(":")) - - # We allow cpe:2.3:a:foo:*:* and cpe:2.3.:a:foo:bar:* only. - cpe_components_valid = (cpe_components == 6) - cpe_matches = (cpe == "N/A" or (cpe.startswith("cpe:2.3:a:") and cpe.endswith(":*") and cpe_components_valid)) - if not cpe_matches: - fail("CPE must match cpe:2.3:a:::*: " + cpe) - elif not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]: - _fail_missing_attribute("cpe", key) - - for category in location["use_category"]: - if category not in USE_CATEGORIES: - fail("Unknown use_category value '" + category + "' for dependecy " + key) - - # Remove any extra annotations that we add, so that we don't confuse http_archive etc. - for annotation in DEPENDENCY_ANNOTATIONS: - if annotation in mutable_location: - mutable_location.pop(annotation) - - return locations diff --git a/generated_api_shadow/bazel/external_proto_deps.bzl b/generated_api_shadow/bazel/external_proto_deps.bzl deleted file mode 100644 index 6b11495d3c0dc..0000000000000 --- a/generated_api_shadow/bazel/external_proto_deps.bzl +++ /dev/null @@ -1,48 +0,0 @@ -# Any external dependency imported in the api/ .protos requires entries in -# the maps below, to allow the Bazel proto and language specific bindings to be -# inferred from the import directives. -# -# This file needs to be interpreted as both Python 3 and Starlark, so only the -# common subset of Python should be used. - -# This maps from .proto import directive path to the Bazel dependency path for -# external dependencies. Since BUILD files are generated, this is the canonical -# place to define this mapping. -EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { - "google/api/expr/v1alpha1/checked.proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", - "google/api/expr/v1alpha1/syntax.proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - "io/prometheus/client/metrics.proto": "@prometheus_metrics_model//:client_model", - "opencensus/proto/trace/v1/trace.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", - "opencensus/proto/trace/v1/trace_config.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", - "opentelemetry/proto/common/v1/common.proto": "@opentelemetry_proto//:common", -} - -# This maps from the Bazel proto_library target to the Go language binding target for external dependencies. -EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", - "@opentelemetry_proto//:logs": "@opentelemetry_proto//:logs_go_proto", - "@opentelemetry_proto//:common": "@opentelemetry_proto//:common_go_proto", -} - -# This maps from the Bazel proto_library target to the C++ language binding target for external dependencies. -EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_cc_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_cc", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_cc", - "@opentelemetry_proto//:logs": "@opentelemetry_proto//:logs_cc_proto", - "@opentelemetry_proto//:common": "@opentelemetry_proto//:common_cc_proto", -} - -# This maps from the Bazel proto_library target to the Python language binding target for external dependencies. -EXTERNAL_PROTO_PY_BAZEL_DEP_MAP = { - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_py_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_py", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_py", - "@opentelemetry_proto//:logs": "@opentelemetry_proto//:logs_py_proto", - "@opentelemetry_proto//:common": "@opentelemetry_proto//:common_py_proto", -} diff --git a/generated_api_shadow/bazel/repositories.bzl b/generated_api_shadow/bazel/repositories.bzl deleted file mode 100644 index ef92aa45f0064..0000000000000 --- a/generated_api_shadow/bazel/repositories.bzl +++ /dev/null @@ -1,171 +0,0 @@ -load(":envoy_http_archive.bzl", "envoy_http_archive") -load(":external_deps.bzl", "load_repository_locations") -load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC") - -REPOSITORY_LOCATIONS = load_repository_locations(REPOSITORY_LOCATIONS_SPEC) - -# Use this macro to reference any HTTP archive from bazel/repository_locations.bzl. -def external_http_archive(name, **kwargs): - envoy_http_archive( - name, - locations = REPOSITORY_LOCATIONS, - **kwargs - ) - -def api_dependencies(): - external_http_archive( - name = "bazel_skylib", - ) - external_http_archive( - name = "com_envoyproxy_protoc_gen_validate", - ) - external_http_archive( - name = "com_google_googleapis", - ) - external_http_archive( - name = "com_github_bazelbuild_buildtools", - ) - external_http_archive( - name = "com_github_cncf_udpa", - ) - - external_http_archive( - name = "prometheus_metrics_model", - build_file_content = PROMETHEUSMETRICS_BUILD_CONTENT, - ) - external_http_archive( - name = "opencensus_proto", - ) - external_http_archive( - name = "rules_proto", - ) - external_http_archive( - name = "com_github_openzipkin_zipkinapi", - build_file_content = ZIPKINAPI_BUILD_CONTENT, - ) - external_http_archive( - name = "opentelemetry_proto", - build_file_content = OPENTELEMETRY_LOGS_BUILD_CONTENT, - ) - external_http_archive( - name = "com_github_bufbuild_buf", - build_file_content = BUF_BUILD_CONTENT, - tags = ["manual"], - ) - -PROMETHEUSMETRICS_BUILD_CONTENT = """ -load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -api_cc_py_proto_library( - name = "client_model", - srcs = [ - "io/prometheus/client/metrics.proto", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "client_model_go_proto", - importpath = "github.com/prometheus/client_model/go", - proto = ":client_model", - visibility = ["//visibility:public"], -) -""" - -OPENCENSUSTRACE_BUILD_CONTENT = """ -load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -api_cc_py_proto_library( - name = "trace_model", - srcs = [ - "trace.proto", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "trace_model_go_proto", - importpath = "trace_model", - proto = ":trace_model", - visibility = ["//visibility:public"], -) -""" - -ZIPKINAPI_BUILD_CONTENT = """ - -load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -api_cc_py_proto_library( - name = "zipkin", - srcs = [ - "zipkin-jsonv2.proto", - "zipkin.proto", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "zipkin_go_proto", - proto = ":zipkin", - visibility = ["//visibility:public"], -) -""" - -OPENTELEMETRY_LOGS_BUILD_CONTENT = """ -load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -api_cc_py_proto_library( - name = "common", - srcs = [ - "opentelemetry/proto/common/v1/common.proto", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "common_go_proto", - importpath = "go.opentelemetry.io/proto/otlp/common/v1", - proto = ":common", - visibility = ["//visibility:public"], -) - -# TODO(snowp): Generating one Go package from all of these protos could cause problems in the future, -# but nothing references symbols from collector or resource so we're fine for now. -api_cc_py_proto_library( - name = "logs", - srcs = [ - "opentelemetry/proto/collector/logs/v1/logs_service.proto", - "opentelemetry/proto/logs/v1/logs.proto", - "opentelemetry/proto/resource/v1/resource.proto", - ], - deps = [ - "//:common", - ], - visibility = ["//visibility:public"], -) - -go_proto_library( - name = "logs_go_proto", - importpath = "go.opentelemetry.io/proto/otlp/logs/v1", - proto = ":logs", - visibility = ["//visibility:public"], -) -""" - -BUF_BUILD_CONTENT = """ -package( - default_visibility = ["//visibility:public"], -) - -filegroup( - name = "buf", - srcs = [ - "@com_github_bufbuild_buf//:bin/buf", - ], - tags = ["manual"], # buf is downloaded as a linux binary; tagged manual to prevent build for non-linux users -) -""" diff --git a/generated_api_shadow/bazel/repository_locations.bzl b/generated_api_shadow/bazel/repository_locations.bzl deleted file mode 100644 index be1e9c9789e4b..0000000000000 --- a/generated_api_shadow/bazel/repository_locations.bzl +++ /dev/null @@ -1,133 +0,0 @@ -# This should match the schema defined in external_deps.bzl. -REPOSITORY_LOCATIONS_SPEC = dict( - bazel_skylib = dict( - project_name = "bazel-skylib", - project_desc = "Common useful functions and rules for Bazel", - project_url = "https://github.com/bazelbuild/bazel-skylib", - version = "1.0.3", - sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", - release_date = "2020-08-27", - urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"], - use_category = ["api"], - ), - com_envoyproxy_protoc_gen_validate = dict( - project_name = "protoc-gen-validate (PGV)", - project_desc = "protoc plugin to generate polyglot message validators", - project_url = "https://github.com/envoyproxy/protoc-gen-validate", - version = "0.6.1", - sha256 = "c695fc5a2e5a1b52904cd8a58ce7a1c3a80f7f50719496fd606e551685c01101", - release_date = "2021-04-26", - strip_prefix = "protoc-gen-validate-{version}", - urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/v{version}.tar.gz"], - use_category = ["api"], - implied_untracked_deps = [ - "com_github_iancoleman_strcase", - "com_github_lyft_protoc_gen_star", - "com_github_spf13_afero", - "org_golang_google_genproto", - "org_golang_x_text", - ], - ), - com_github_bazelbuild_buildtools = dict( - project_name = "Bazel build tools", - project_desc = "Developer tools for working with Google's bazel buildtool.", - project_url = "https://github.com/bazelbuild/buildtools", - version = "4.0.1", - sha256 = "c28eef4d30ba1a195c6837acf6c75a4034981f5b4002dda3c5aa6e48ce023cf1", - release_date = "2021-03-01", - strip_prefix = "buildtools-{version}", - urls = ["https://github.com/bazelbuild/buildtools/archive/{version}.tar.gz"], - use_category = ["api"], - ), - com_github_cncf_udpa = dict( - project_name = "xDS API", - project_desc = "xDS API Working Group (xDS-WG)", - project_url = "https://github.com/cncf/xds", - # During the UDPA -> xDS migration, we aren't working with releases. - version = "dd25fe81a44506ab21ea666fb70b3b1c4bb183ee", - sha256 = "9184235cd31272679e4c7f9232c341d4ea75351ded74d3fbba28b05c290bfa71", - release_date = "2021-07-22", - strip_prefix = "xds-{version}", - urls = ["https://github.com/cncf/xds/archive/{version}.tar.gz"], - use_category = ["api"], - ), - com_github_openzipkin_zipkinapi = dict( - project_name = "Zipkin API", - project_desc = "Zipkin's language independent model and HTTP Api Definitions", - project_url = "https://github.com/openzipkin/zipkin-api", - version = "1.0.0", - sha256 = "6c8ee2014cf0746ba452e5f2c01f038df60e85eb2d910b226f9aa27ddc0e44cf", - release_date = "2020-11-22", - strip_prefix = "zipkin-api-{version}", - urls = ["https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz"], - use_category = ["api"], - ), - com_google_googleapis = dict( - # TODO(dio): Consider writing a Starlark macro for importing Google API proto. - project_name = "Google APIs", - project_desc = "Public interface definitions of Google APIs", - project_url = "https://github.com/googleapis/googleapis", - version = "82944da21578a53b74e547774cf62ed31a05b841", - sha256 = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405", - release_date = "2019-12-02", - strip_prefix = "googleapis-{version}", - urls = ["https://github.com/googleapis/googleapis/archive/{version}.tar.gz"], - use_category = ["api"], - ), - opencensus_proto = dict( - project_name = "OpenCensus Proto", - project_desc = "Language Independent Interface Types For OpenCensus", - project_url = "https://github.com/census-instrumentation/opencensus-proto", - version = "0.3.0", - sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0", - release_date = "2020-07-21", - strip_prefix = "opencensus-proto-{version}/src", - urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz"], - use_category = ["api"], - ), - prometheus_metrics_model = dict( - project_name = "Prometheus client model", - project_desc = "Data model artifacts for Prometheus", - project_url = "https://github.com/prometheus/client_model", - version = "147c58e9608a4f9628b53b6cc863325ca746f63a", - sha256 = "f7da30879dcdfae367fa65af1969945c3148cfbfc462b30b7d36f17134675047", - release_date = "2021-06-07", - strip_prefix = "client_model-{version}", - urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"], - use_category = ["api"], - ), - rules_proto = dict( - project_name = "Protobuf Rules for Bazel", - project_desc = "Protocol buffer rules for Bazel", - project_url = "https://github.com/bazelbuild/rules_proto", - version = "f7a30f6f80006b591fa7c437fe5a951eb10bcbcf", - sha256 = "9fc210a34f0f9e7cc31598d109b5d069ef44911a82f507d5a88716db171615a8", - release_date = "2021-02-09", - strip_prefix = "rules_proto-{version}", - urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"], - use_category = ["api"], - ), - opentelemetry_proto = dict( - project_name = "OpenTelemetry Proto", - project_desc = "Language Independent Interface Types For OpenTelemetry", - project_url = "https://github.com/open-telemetry/opentelemetry-proto", - version = "0.9.0", - sha256 = "9ec38ab51eedbd7601979b0eda962cf37bc8a4dc35fcef604801e463f01dcc00", - release_date = "2021-05-12", - strip_prefix = "opentelemetry-proto-{version}", - urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/v{version}.tar.gz"], - use_category = ["api"], - ), - com_github_bufbuild_buf = dict( - project_name = "buf", - project_desc = "A new way of working with Protocol Buffers.", # Used for breaking change detection in API protobufs - project_url = "https://buf.build", - version = "0.53.0", - sha256 = "888bb52d358e34a8d6a57ecff426bed896bdf478ad13c78a70a9e1a9a2d75715", - strip_prefix = "buf", - urls = ["https://github.com/bufbuild/buf/releases/download/v{version}/buf-Linux-x86_64.tar.gz"], - release_date = "2021-08-25", - use_category = ["api"], - tags = ["manual"], - ), -) diff --git a/generated_api_shadow/bazel/repository_locations_utils.bzl b/generated_api_shadow/bazel/repository_locations_utils.bzl deleted file mode 100644 index 3b984e1bc580a..0000000000000 --- a/generated_api_shadow/bazel/repository_locations_utils.bzl +++ /dev/null @@ -1,20 +0,0 @@ -def _format_version(s, version): - return s.format(version = version, dash_version = version.replace(".", "-"), underscore_version = version.replace(".", "_")) - -# Generate a "repository location specification" from raw repository -# specification. The information should match the format required by -# external_deps.bzl. This function mostly does interpolation of {version} in -# the repository info fields. This code should be capable of running in both -# Python and Starlark. -def load_repository_locations_spec(repository_locations_spec): - locations = {} - for key, location in repository_locations_spec.items(): - mutable_location = dict(location) - locations[key] = mutable_location - - # Fixup with version information. - if "version" in location: - if "strip_prefix" in location: - mutable_location["strip_prefix"] = _format_version(location["strip_prefix"], location["version"]) - mutable_location["urls"] = [_format_version(url, location["version"]) for url in location["urls"]] - return locations diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/squash.proto b/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/squash.proto deleted file mode 100644 index f9bc9cceceb99..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/http/squash/v3/squash.proto +++ /dev/null @@ -1,60 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.squash.v3; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.squash.v3"; -option java_outer_classname = "SquashProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Squash] -// Squash :ref:`configuration overview `. -// [#extension: envoy.filters.http.squash] - -// [#next-free-field: 6] -message Squash { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.squash.v2.Squash"; - - // The name of the cluster that hosts the Squash server. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // When the filter requests the Squash server to create a DebugAttachment, it will use this - // structure as template for the body of the request. It can contain reference to environment - // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server - // with more information to find the process to attach the debugger to. For example, in a - // Istio/k8s environment, this will contain information on the pod: - // - // .. code-block:: json - // - // { - // "spec": { - // "attachment": { - // "pod": "{{ POD_NAME }}", - // "namespace": "{{ POD_NAMESPACE }}" - // }, - // "match_request": true - // } - // } - // - // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) - google.protobuf.Struct attachment_template = 2; - - // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second. - google.protobuf.Duration request_timeout = 3; - - // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60 - // seconds. - google.protobuf.Duration attachment_timeout = 4; - - // Amount of time to poll for the status of the attachment object in the Squash server - // (to check if has been attached). Defaults to 1 second. - google.protobuf.Duration attachment_poll_period = 5; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD deleted file mode 100644 index 3ca8242f77801..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto b/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto deleted file mode 100644 index b9efc278e6de8..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/http/sxg/v3alpha/sxg.proto +++ /dev/null @@ -1,67 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.sxg.v3alpha; - -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.sxg.v3alpha"; -option java_outer_classname = "SxgProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Signed HTTP Exchange Filter] -// SXG :ref:`configuration overview `. -// [#extension: envoy.filters.http.sxg] - -// [#next-free-field: 10] -message SXG { - // The SDS configuration for the public key data for the SSL certificate that will be used to sign the - // SXG response. - transport_sockets.tls.v3.SdsSecretConfig certificate = 1; - - // The SDS configuration for the private key data for the SSL certificate that will be used to sign the - // SXG response. - transport_sockets.tls.v3.SdsSecretConfig private_key = 2; - - // The duration for which the generated SXG package will be valid. Default is 604800s (7 days in seconds). - // Note that in order to account for clock skew, the timestamp will be backdated by a day. So, if duration - // is set to 7 days, that will be 7 days from 24 hours ago (6 days from now). Also note that while 6/7 days - // is appropriate for most content, if the downstream service is serving Javascript, or HTML with inline - // Javascript, 1 day (so, with backdated expiry, 2 days, or 172800 seconds) is more appropriate. - google.protobuf.Duration duration = 3; - - // The SXG response payload is Merkle Integrity Content Encoding (MICE) encoded (specification is [here](https://datatracker.ietf.org/doc/html/draft-thomson-http-mice-03)) - // This value indicates the record size in the encoded payload. The default value is 4096. - uint64 mi_record_size = 4; - - // The URI of certificate CBOR file published. Since it is required that the certificate CBOR file - // be served from the same domain as the SXG document, this should be a relative URI. - string cbor_url = 5 [(validate.rules).string = {min_len: 1 prefix: "/"}]; - - // URL to retrieve validity data for signature, a CBOR map. See specification [here](https://tools.ietf.org/html/draft-yasskin-httpbis-origin-signed-exchanges-impl-00#section-3.6) - string validity_url = 6 [(validate.rules).string = {min_len: 1 prefix: "/"}]; - - // Header that will be set if it is determined that the client can accept SXG (typically `accept: application/signed-exchange;v=b3) - // If not set, filter will default to: `x-client-can-accept-sxg` - string client_can_accept_sxg_header = 7 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true} - ]; - - // Header set by downstream service to signal that the response should be transformed to SXG If not set, - // filter will default to: `x-should-encode-sxg` - string should_encode_sxg_header = 8 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true} - ]; - - // Headers that will be stripped from the SXG document, by listing a prefix (i.e. `x-custom-` will cause - // all headers prefixed by `x-custom-` to be omitted from the SXG document) - repeated string header_prefix_filters = 9 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto deleted file mode 100644 index 0fac07427d0c0..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.kafka_broker.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_broker.v3"; -option java_outer_classname = "KafkaBrokerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Kafka Broker] -// Kafka Broker :ref:`configuration overview `. -// [#extension: envoy.filters.network.kafka_broker] - -message KafkaBroker { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker"; - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto deleted file mode 100644 index 03a6522852ab5..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/kafka_mesh/v3alpha/kafka_mesh.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.kafka_mesh.v3alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_mesh.v3alpha"; -option java_outer_classname = "KafkaMeshProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Kafka Mesh] -// Kafka Mesh :ref:`configuration overview `. -// [#extension: envoy.filters.network.kafka_mesh] - -message KafkaMesh { - // Envoy's host that's advertised to clients. - // Has the same meaning as corresponding Kafka broker properties. - // Usually equal to filter chain's listener config, but needs to be reachable by clients - // (so 0.0.0.0 will not work). - string advertised_host = 1 [(validate.rules).string = {min_len: 1}]; - - // Envoy's port that's advertised to clients. - int32 advertised_port = 2 [(validate.rules).int32 = {gt: 0}]; - - // Upstream clusters this filter will connect to. - repeated KafkaClusterDefinition upstream_clusters = 3; - - // Rules that will decide which cluster gets which request. - repeated ForwardingRule forwarding_rules = 4; -} - -message KafkaClusterDefinition { - // Cluster name. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // Kafka cluster address. - string bootstrap_servers = 2 [(validate.rules).string = {min_len: 1}]; - - // Default number of partitions present in this cluster. - // This is especially important for clients that do not specify partition in their payloads and depend on this value for hashing. - int32 partition_count = 3 [(validate.rules).int32 = {gt: 0}]; - - // Custom configuration passed to Kafka producer. - map producer_config = 4; -} - -message ForwardingRule { - // Cluster name. - string target_cluster = 1; - - oneof trigger { - // Intended place for future types of forwarding rules. - string topic_prefix = 2; - } -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto deleted file mode 100644 index 9dfdb14d3f11a..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.mysql_proxy.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.mysql_proxy.v3"; -option java_outer_classname = "MysqlProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: MySQL proxy] -// MySQL Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.mysql_proxy] - -message MySQLProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy"; - - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // [#not-implemented-hide:] The optional path to use for writing MySQL access logs. - // If the access log field is empty, access logs will not be written. - string access_log = 2; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto deleted file mode 100644 index 8fe98f269626d..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.postgres_proxy.v3alpha; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.postgres_proxy.v3alpha"; -option java_outer_classname = "PostgresProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Postgres proxy] -// Postgres Proxy :ref:`configuration overview -// `. -// [#extension: envoy.filters.network.postgres_proxy] - -message PostgresProxy { - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Controls whether SQL statements received in Frontend Query messages - // are parsed. Parsing is required to produce Postgres proxy filter - // metadata. Defaults to true. - google.protobuf.BoolValue enable_sql_parsing = 2; - - // Controls whether to terminate SSL session initiated by a client. - // If the value is false, the Postgres proxy filter will not try to - // terminate SSL session, but will pass all the packets to the upstream server. - // If the value is true, the Postgres proxy filter will try to terminate SSL - // session. In order to do that, the filter chain must use :ref:`starttls transport socket - // `. - // If the filter does not manage to terminate the SSL session, it will close the connection from the client. - // Refer to official documentation for details - // `SSL Session Encryption Message Flow `_. - bool terminate_ssl = 3; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD deleted file mode 100644 index 2f90ace882d93..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto deleted file mode 100644 index 12438751fada6..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rocketmq_proxy.v3; - -import "contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; -option java_outer_classname = "RocketmqProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: RocketMQ Proxy] -// RocketMQ Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.rocketmq_proxy] - -message RocketmqProxy { - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The route table for the connection manager is specified in this property. - RouteConfiguration route_config = 2; - - // The largest duration transient object expected to live, more than 10s is recommended. - google.protobuf.Duration transient_object_life_span = 3; - - // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting - // facility without considering backward compatibility of exiting RocketMQ client SDK. - bool develop_mode = 4; -} diff --git a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto b/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto deleted file mode 100644 index 6ec6c71c5627d..0000000000000 --- a/generated_api_shadow/contrib/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rocketmq_proxy.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rocketmq Proxy Route Configuration] -// Rocketmq Proxy :ref:`configuration overview `. - -message RouteConfiguration { - // The name of the route configuration. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - // The name of the topic. - type.matcher.v3.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated config.route.v3.HeaderMatcher headers = 2; -} - -message RouteAction { - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. - config.core.v3.Metadata metadata_match = 2; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/BUILD b/generated_api_shadow/envoy/admin/v2alpha/BUILD deleted file mode 100644 index 6fe8cb995d343..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/config/bootstrap/v2:pkg", - "//envoy/service/tap/v2alpha:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/admin/v2alpha/certs.proto b/generated_api_shadow/envoy/admin/v2alpha/certs.proto deleted file mode 100644 index c7b568ca1e58a..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/certs.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "CertsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Certificates] - -// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to -// display certificate information. See :ref:`/certs ` for more -// information. -message Certificates { - // List of certificates known to an Envoy. - repeated Certificate certificates = 1; -} - -message Certificate { - // Details of CA certificate. - repeated CertificateDetails ca_cert = 1; - - // Details of Certificate Chain - repeated CertificateDetails cert_chain = 2; -} - -// [#next-free-field: 7] -message CertificateDetails { - // Path of the certificate. - string path = 1; - - // Certificate Serial Number. - string serial_number = 2; - - // List of Subject Alternate names. - repeated SubjectAlternateName subject_alt_names = 3; - - // Minimum of days until expiration of certificate and it's chain. - uint64 days_until_expiration = 4; - - // Indicates the time from which the certificate is valid. - google.protobuf.Timestamp valid_from = 5; - - // Indicates the time at which the certificate expires. - google.protobuf.Timestamp expiration_time = 6; -} - -message SubjectAlternateName { - // Subject Alternate Name. - oneof name { - string dns = 1; - - string uri = 2; - - string ip_address = 3; - } -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/clusters.proto b/generated_api_shadow/envoy/admin/v2alpha/clusters.proto deleted file mode 100644 index 3b7ec029aa630..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/clusters.proto +++ /dev/null @@ -1,153 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "envoy/admin/v2alpha/metrics.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/health_check.proto"; -import "envoy/type/percent.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "ClustersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Clusters] - -// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. -// See :ref:`/clusters ` for more information. -message Clusters { - // Mapping from cluster name to each cluster's status. - repeated ClusterStatus cluster_statuses = 1; -} - -// Details an individual cluster's current status. -// [#next-free-field: 6] -message ClusterStatus { - // Name of the cluster. - string name = 1; - - // Denotes whether this cluster was added via API or configured statically. - bool added_via_api = 2; - - // The success rate threshold used in the last interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used to calculate the threshold. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used to calculate the threshold. - // The threshold is used to eject hosts based on their success rate. See - // :ref:`Cluster outlier detection ` documentation for details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.Percent success_rate_ejection_threshold = 3; - - // Mapping from host address to the host's current status. - repeated HostStatus host_statuses = 4; - - // The success rate threshold used in the last interval when only locally originated failures were - // taken into account and externally originated errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. The threshold is used to eject hosts based on their success rate. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.Percent local_origin_success_rate_ejection_threshold = 5; -} - -// Current state of a particular host. -// [#next-free-field: 10] -message HostStatus { - // Address of this host. - api.v2.core.Address address = 1; - - // List of stats specific to this host. - repeated SimpleMetric stats = 2; - - // The host's current health status. - HostHealthStatus health_status = 3; - - // Request success rate for this host over the last calculated interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used in success rate - // calculation. If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used in success rate calculation. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.Percent success_rate = 4; - - // The host's weight. If not configured, the value defaults to 1. - uint32 weight = 5; - - // The hostname of the host, if applicable. - string hostname = 6; - - // The host's priority. If not configured, the value defaults to 0 (highest priority). - uint32 priority = 7; - - // Request success rate for this host over the last calculated - // interval when only locally originated errors are taken into account and externally originated - // errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.Percent local_origin_success_rate = 8; - - // locality of the host. - api.v2.core.Locality locality = 9; -} - -// Health status for a host. -// [#next-free-field: 7] -message HostHealthStatus { - // The host is currently failing active health checks. - bool failed_active_health_check = 1; - - // The host is currently considered an outlier and has been ejected. - bool failed_outlier_check = 2; - - // The host is currently being marked as degraded through active health checking. - bool failed_active_degraded_check = 4; - - // The host has been removed from service discovery, but is being stabilized due to active - // health checking. - bool pending_dynamic_removal = 5; - - // The host has not yet been health checked. - bool pending_active_hc = 6; - - // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported - // here. - // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] - api.v2.core.HealthStatus eds_health_status = 3; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto b/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto deleted file mode 100644 index 833c015fb4749..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/config_dump.proto +++ /dev/null @@ -1,291 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "envoy/config/bootstrap/v2/bootstrap.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "ConfigDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: ConfigDump] - -// The :ref:`/config_dump ` admin endpoint uses this wrapper -// message to maintain and serve arbitrary configuration information from any component in Envoy. -message ConfigDump { - // This list is serialized and dumped in its entirety at the - // :ref:`/config_dump ` endpoint. - // - // The following configurations are currently supported and will be dumped in the order given - // below: - // - // * *bootstrap*: :ref:`BootstrapConfigDump ` - // * *clusters*: :ref:`ClustersConfigDump ` - // * *listeners*: :ref:`ListenersConfigDump ` - // * *routes*: :ref:`RoutesConfigDump ` - // - // You can filter output with the resource and mask query parameters. - // See :ref:`/config_dump?resource={} `, - // :ref:`/config_dump?mask={} `, - // or :ref:`/config_dump?resource={},mask={} - // ` for more information. - repeated google.protobuf.Any configs = 1; -} - -message UpdateFailureState { - // What the component configuration would have been if the update had succeeded. - google.protobuf.Any failed_configuration = 1; - - // Time of the latest failed update attempt. - google.protobuf.Timestamp last_update_attempt = 2; - - // Details about the last failed update attempt. - string details = 3; -} - -// This message describes the bootstrap configuration that Envoy was started with. This includes -// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate -// the static portions of an Envoy configuration by reusing the output as the bootstrap -// configuration for another Envoy. -message BootstrapConfigDump { - config.bootstrap.v2.Bootstrap bootstrap = 1; - - // The timestamp when the BootstrapConfig was last updated. - google.protobuf.Timestamp last_updated = 2; -} - -// Envoy's listener manager fills this message with all currently known listeners. Listener -// configuration information can be used to recreate an Envoy configuration by populating all -// listeners as static listeners or by returning them in a LDS response. -message ListenersConfigDump { - // Describes a statically loaded listener. - message StaticListener { - // The listener config. - google.protobuf.Any listener = 1; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 2; - } - - message DynamicListenerState { - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the listener was loaded. In the future, discrete per-listener versions may be supported - // by the API. - string version_info = 1; - - // The listener config. - google.protobuf.Any listener = 2; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 3; - } - - // Describes a dynamically loaded listener via the LDS API. - // [#next-free-field: 6] - message DynamicListener { - // The name or unique id of this listener, pulled from the DynamicListenerState config. - string name = 1; - - // The listener state for any active listener by this name. - // These are listeners that are available to service data plane traffic. - DynamicListenerState active_state = 2; - - // The listener state for any warming listener by this name. - // These are listeners that are currently undergoing warming in preparation to service data - // plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the warming listeners should generally be discarded. - DynamicListenerState warming_state = 3; - - // The listener state for any draining listener by this name. - // These are listeners that are currently undergoing draining in preparation to stop servicing - // data plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the draining listeners should generally be discarded. - DynamicListenerState draining_state = 4; - - // Set if the last update failed, cleared after the next successful update. - UpdateFailureState error_state = 5; - } - - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - string version_info = 1; - - // The statically loaded listener configs. - repeated StaticListener static_listeners = 2; - - // State for any warming, active, or draining listeners. - repeated DynamicListener dynamic_listeners = 3; -} - -// Envoy's cluster manager fills this message with all currently known clusters. Cluster -// configuration information can be used to recreate an Envoy configuration by populating all -// clusters as static clusters or by returning them in a CDS response. -message ClustersConfigDump { - // Describes a statically loaded cluster. - message StaticCluster { - // The cluster config. - google.protobuf.Any cluster = 1; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // Describes a dynamically loaded cluster via the CDS API. - message DynamicCluster { - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by - // the API. - string version_info = 1; - - // The cluster config. - google.protobuf.Any cluster = 2; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - string version_info = 1; - - // The statically loaded cluster configs. - repeated StaticCluster static_clusters = 2; - - // The dynamically loaded active clusters. These are clusters that are available to service - // data plane traffic. - repeated DynamicCluster dynamic_active_clusters = 3; - - // The dynamically loaded warming clusters. These are clusters that are currently undergoing - // warming in preparation to service data plane traffic. Note that if attempting to recreate an - // Envoy configuration from a configuration dump, the warming clusters should generally be - // discarded. - repeated DynamicCluster dynamic_warming_clusters = 4; -} - -// Envoy's RDS implementation fills this message with all currently loaded routes, as described by -// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration -// or defined inline while configuring listeners are separated from those configured dynamically via RDS. -// Route configuration information can be used to recreate an Envoy configuration by populating all routes -// as static routes or by returning them in RDS responses. -message RoutesConfigDump { - message StaticRouteConfig { - // The route config. - google.protobuf.Any route_config = 1; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - message DynamicRouteConfig { - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the route configuration was loaded. - string version_info = 1; - - // The route config. - google.protobuf.Any route_config = 2; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - // The statically loaded route configs. - repeated StaticRouteConfig static_route_configs = 2; - - // The dynamically loaded route configs. - repeated DynamicRouteConfig dynamic_route_configs = 3; -} - -// Envoy's scoped RDS implementation fills this message with all currently loaded route -// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both -// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the -// dynamically obtained scopes via the SRDS API. -message ScopedRoutesConfigDump { - message InlineScopedRouteConfigs { - // The name assigned to the scoped route configurations. - string name = 1; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 2; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - message DynamicScopedRouteConfigs { - // The name assigned to the scoped route configurations. - string name = 1; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the scoped routes configuration was loaded. - string version_info = 2; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 3; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 4; - } - - // The statically loaded scoped route configs. - repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; - - // The dynamically loaded scoped route configs. - repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; -} - -// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. -message SecretsConfigDump { - // DynamicSecret contains secret information fetched via SDS. - message DynamicSecret { - // The name assigned to the secret. - string name = 1; - - // This is the per-resource version information. - string version_info = 2; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 3; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 4; - } - - // StaticSecret specifies statically loaded secret in bootstrap. - message StaticSecret { - // The name assigned to the secret. - string name = 1; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 2; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 3; - } - - // The statically loaded secrets. - repeated StaticSecret static_secrets = 1; - - // The dynamically loaded active secrets. These are secrets that are available to service - // clusters or listeners. - repeated DynamicSecret dynamic_active_secrets = 2; - - // The dynamically loaded warming secrets. These are secrets that are currently undergoing - // warming in preparation to service clusters or listeners. - repeated DynamicSecret dynamic_warming_secrets = 3; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/listeners.proto b/generated_api_shadow/envoy/admin/v2alpha/listeners.proto deleted file mode 100644 index ca7b736521d0d..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/listeners.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "envoy/api/v2/core/address.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "ListenersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Listeners] - -// Admin endpoint uses this wrapper for `/listeners` to display listener status information. -// See :ref:`/listeners ` for more information. -message Listeners { - // List of listener statuses. - repeated ListenerStatus listener_statuses = 1; -} - -// Details an individual listener's current status. -message ListenerStatus { - // Name of the listener - string name = 1; - - // The actual local address that the listener is listening on. If a listener was configured - // to listen on port 0, then this address has the port that was allocated by the OS. - api.v2.core.Address local_address = 2; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/memory.proto b/generated_api_shadow/envoy/admin/v2alpha/memory.proto deleted file mode 100644 index 85fd2169d6d70..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/memory.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "MemoryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Memory] - -// Proto representation of the internal memory consumption of an Envoy instance. These represent -// values extracted from an internal TCMalloc instance. For more information, see the section of the -// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). -// [#next-free-field: 7] -message Memory { - // The number of bytes allocated by the heap for Envoy. This is an alias for - // `generic.current_allocated_bytes`. - uint64 allocated = 1; - - // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for - // `generic.heap_size`. - uint64 heap_size = 2; - - // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards - // virtual memory usage, and depending on the OS, typically do not count towards physical memory - // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. - uint64 pageheap_unmapped = 3; - - // The number of bytes in free, mapped pages in the page heap. These bytes always count towards - // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also - // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. - uint64 pageheap_free = 4; - - // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias - // for `tcmalloc.current_total_thread_cache_bytes`. - uint64 total_thread_cache = 5; - - // The number of bytes of the physical memory usage by the allocator. This is an alias for - // `generic.total_physical_bytes`. - uint64 total_physical_bytes = 6; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/metrics.proto b/generated_api_shadow/envoy/admin/v2alpha/metrics.proto deleted file mode 100644 index 15ad219c13e58..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/metrics.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "MetricsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metrics] - -// Proto representation of an Envoy Counter or Gauge value. -message SimpleMetric { - enum Type { - COUNTER = 0; - GAUGE = 1; - } - - // Type of the metric represented. - Type type = 1; - - // Current metric value. - uint64 value = 2; - - // Name of the metric. - string name = 3; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto b/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto deleted file mode 100644 index 22c65f3de5a64..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/mutex_stats.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "MutexStatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: MutexStats] - -// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run -// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` -// [docs](https://abseil.io/about/design/mutex#extra-features). -// -// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not -// correspond to core clock frequency. For more information, see the `CycleClock` -// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). -message MutexStats { - // The number of individual mutex contentions which have occurred since startup. - uint64 num_contentions = 1; - - // The length of the current contention wait cycle. - uint64 current_wait_cycles = 2; - - // The lifetime total of all contention wait cycles. - uint64 lifetime_wait_cycles = 3; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/server_info.proto b/generated_api_shadow/envoy/admin/v2alpha/server_info.proto deleted file mode 100644 index b9db6bbc1e1fb..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/server_info.proto +++ /dev/null @@ -1,154 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "ServerInfoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Server State] - -// Proto representation of the value returned by /server_info, containing -// server version/server status information. -// [#next-free-field: 7] -message ServerInfo { - enum State { - // Server is live and serving traffic. - LIVE = 0; - - // Server is draining listeners in response to external health checks failing. - DRAINING = 1; - - // Server has not yet completed cluster manager initialization. - PRE_INITIALIZING = 2; - - // Server is running the cluster manager initialization callbacks (e.g., RDS). - INITIALIZING = 3; - } - - // Server version. - string version = 1; - - // State of the server. - State state = 2; - - // Uptime since current epoch was started. - google.protobuf.Duration uptime_current_epoch = 3; - - // Uptime since the start of the first epoch. - google.protobuf.Duration uptime_all_epochs = 4; - - // Hot restart version. - string hot_restart_version = 5; - - // Command line options the server is currently running with. - CommandLineOptions command_line_options = 6; -} - -// [#next-free-field: 29] -message CommandLineOptions { - enum IpVersion { - v4 = 0; - v6 = 1; - } - - enum Mode { - // Validate configs and then serve traffic normally. - Serve = 0; - - // Validate configs and exit. - Validate = 1; - - // Completely load and initialize the config, and then exit without running the listener loop. - InitOnly = 2; - } - - reserved 12; - - // See :option:`--base-id` for details. - uint64 base_id = 1; - - // See :option:`--concurrency` for details. - uint32 concurrency = 2; - - // See :option:`--config-path` for details. - string config_path = 3; - - // See :option:`--config-yaml` for details. - string config_yaml = 4; - - // See :option:`--allow-unknown-static-fields` for details. - bool allow_unknown_static_fields = 5; - - // See :option:`--reject-unknown-dynamic-fields` for details. - bool reject_unknown_dynamic_fields = 26; - - // See :option:`--admin-address-path` for details. - string admin_address_path = 6; - - // See :option:`--local-address-ip-version` for details. - IpVersion local_address_ip_version = 7; - - // See :option:`--log-level` for details. - string log_level = 8; - - // See :option:`--component-log-level` for details. - string component_log_level = 9; - - // See :option:`--log-format` for details. - string log_format = 10; - - // See :option:`--log-format-escaped` for details. - bool log_format_escaped = 27; - - // See :option:`--log-path` for details. - string log_path = 11; - - // See :option:`--service-cluster` for details. - string service_cluster = 13; - - // See :option:`--service-node` for details. - string service_node = 14; - - // See :option:`--service-zone` for details. - string service_zone = 15; - - // See :option:`--file-flush-interval-msec` for details. - google.protobuf.Duration file_flush_interval = 16; - - // See :option:`--drain-time-s` for details. - google.protobuf.Duration drain_time = 17; - - // See :option:`--parent-shutdown-time-s` for details. - google.protobuf.Duration parent_shutdown_time = 18; - - // See :option:`--mode` for details. - Mode mode = 19; - - // max_stats and max_obj_name_len are now unused and have no effect. - uint64 max_stats = 20 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - uint64 max_obj_name_len = 21 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // See :option:`--disable-hot-restart` for details. - bool disable_hot_restart = 22; - - // See :option:`--enable-mutex-tracing` for details. - bool enable_mutex_tracing = 23; - - // See :option:`--restart-epoch` for details. - uint32 restart_epoch = 24; - - // See :option:`--cpuset-threads` for details. - bool cpuset_threads = 25; - - // See :option:`--disable-extensions` for details. - repeated string disabled_extensions = 28; -} diff --git a/generated_api_shadow/envoy/admin/v2alpha/tap.proto b/generated_api_shadow/envoy/admin/v2alpha/tap.proto deleted file mode 100644 index 6335b4db62841..0000000000000 --- a/generated_api_shadow/envoy/admin/v2alpha/tap.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v2alpha; - -import "envoy/service/tap/v2alpha/common.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v2alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap] - -// The /tap admin request body that is used to configure an active tap session. -message TapRequest { - // The opaque configuration ID used to match the configuration to a loaded extension. - // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The tap configuration to load. - service.tap.v2alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/admin/v3/BUILD b/generated_api_shadow/envoy/admin/v3/BUILD deleted file mode 100644 index 38eadcb09feaa..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/admin/v2alpha:pkg", - "//envoy/annotations:pkg", - "//envoy/config/bootstrap/v3:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/tap/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/admin/v3/certs.proto b/generated_api_shadow/envoy/admin/v3/certs.proto deleted file mode 100644 index 5580bb5ef17d1..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/certs.proto +++ /dev/null @@ -1,84 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "CertsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Certificates] - -// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to -// display certificate information. See :ref:`/certs ` for more -// information. -message Certificates { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Certificates"; - - // List of certificates known to an Envoy. - repeated Certificate certificates = 1; -} - -message Certificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Certificate"; - - // Details of CA certificate. - repeated CertificateDetails ca_cert = 1; - - // Details of Certificate Chain - repeated CertificateDetails cert_chain = 2; -} - -// [#next-free-field: 8] -message CertificateDetails { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.CertificateDetails"; - - message OcspDetails { - // Indicates the time from which the OCSP response is valid. - google.protobuf.Timestamp valid_from = 1; - - // Indicates the time at which the OCSP response expires. - google.protobuf.Timestamp expiration = 2; - } - - // Path of the certificate. - string path = 1; - - // Certificate Serial Number. - string serial_number = 2; - - // List of Subject Alternate names. - repeated SubjectAlternateName subject_alt_names = 3; - - // Minimum of days until expiration of certificate and it's chain. - uint64 days_until_expiration = 4; - - // Indicates the time from which the certificate is valid. - google.protobuf.Timestamp valid_from = 5; - - // Indicates the time at which the certificate expires. - google.protobuf.Timestamp expiration_time = 6; - - // Details related to the OCSP response associated with this certificate, if any. - OcspDetails ocsp_details = 7; -} - -message SubjectAlternateName { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.SubjectAlternateName"; - - // Subject Alternate Name. - oneof name { - string dns = 1; - - string uri = 2; - - string ip_address = 3; - } -} diff --git a/generated_api_shadow/envoy/admin/v3/clusters.proto b/generated_api_shadow/envoy/admin/v3/clusters.proto deleted file mode 100644 index 509280f466243..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/clusters.proto +++ /dev/null @@ -1,177 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/admin/v3/metrics.proto"; -import "envoy/config/cluster/v3/circuit_breaker.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/health_check.proto"; -import "envoy/type/v3/percent.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "ClustersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Clusters] - -// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. -// See :ref:`/clusters ` for more information. -message Clusters { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Clusters"; - - // Mapping from cluster name to each cluster's status. - repeated ClusterStatus cluster_statuses = 1; -} - -// Details an individual cluster's current status. -// [#next-free-field: 8] -message ClusterStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClusterStatus"; - - // Name of the cluster. - string name = 1; - - // Denotes whether this cluster was added via API or configured statically. - bool added_via_api = 2; - - // The success rate threshold used in the last interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used to calculate the threshold. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used to calculate the threshold. - // The threshold is used to eject hosts based on their success rate. See - // :ref:`Cluster outlier detection ` documentation for details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.v3.Percent success_rate_ejection_threshold = 3; - - // Mapping from host address to the host's current status. - repeated HostStatus host_statuses = 4; - - // The success rate threshold used in the last interval when only locally originated failures were - // taken into account and externally originated errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. The threshold is used to eject hosts based on their success rate. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: this field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based - // outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no - // threshold for that interval. - // 3. Outlier detection is not enabled for this cluster. - type.v3.Percent local_origin_success_rate_ejection_threshold = 5; - - // :ref:`Circuit breaking ` settings of the cluster. - config.cluster.v3.CircuitBreakers circuit_breakers = 6; - - // Observability name of the cluster. - string observability_name = 7; -} - -// Current state of a particular host. -// [#next-free-field: 10] -message HostStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.HostStatus"; - - // Address of this host. - config.core.v3.Address address = 1; - - // List of stats specific to this host. - repeated SimpleMetric stats = 2; - - // The host's current health status. - HostHealthStatus health_status = 3; - - // Request success rate for this host over the last calculated interval. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used in success rate - // calculation. If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used in success rate calculation. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.v3.Percent success_rate = 4; - - // The host's weight. If not configured, the value defaults to 1. - uint32 weight = 5; - - // The hostname of the host, if applicable. - string hostname = 6; - - // The host's priority. If not configured, the value defaults to 0 (highest priority). - uint32 priority = 7; - - // Request success rate for this host over the last calculated - // interval when only locally originated errors are taken into account and externally originated - // errors were treated as success. - // This field should be interpreted only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. - // See :ref:`Cluster outlier detection ` documentation for - // details. - // - // Note: the message will not be present if host did not have enough request volume to calculate - // success rate or the cluster did not have enough hosts to run through success rate outlier - // ejection. - type.v3.Percent local_origin_success_rate = 8; - - // locality of the host. - config.core.v3.Locality locality = 9; -} - -// Health status for a host. -// [#next-free-field: 9] -message HostHealthStatus { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.HostHealthStatus"; - - // The host is currently failing active health checks. - bool failed_active_health_check = 1; - - // The host is currently considered an outlier and has been ejected. - bool failed_outlier_check = 2; - - // The host is currently being marked as degraded through active health checking. - bool failed_active_degraded_check = 4; - - // The host has been removed from service discovery, but is being stabilized due to active - // health checking. - bool pending_dynamic_removal = 5; - - // The host has not yet been health checked. - bool pending_active_hc = 6; - - // The host should be excluded from panic, spillover, etc. calculations because it was explicitly - // taken out of rotation via protocol signal and is not meant to be routed to. - bool excluded_via_immediate_hc_fail = 7; - - // The host failed active HC due to timeout. - bool active_hc_timeout = 8; - - // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported - // here. - // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] - config.core.v3.HealthStatus eds_health_status = 3; -} diff --git a/generated_api_shadow/envoy/admin/v3/config_dump.proto b/generated_api_shadow/envoy/admin/v3/config_dump.proto deleted file mode 100644 index ddafb56b39362..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/config_dump.proto +++ /dev/null @@ -1,482 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/config/bootstrap/v3/bootstrap.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "ConfigDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: ConfigDump] - -// Resource status from the view of a xDS client, which tells the synchronization -// status between the xDS client and the xDS server. -enum ClientResourceStatus { - // Resource status is not available/unknown. - UNKNOWN = 0; - - // Client requested this resource but hasn't received any update from management - // server. The client will not fail requests, but will queue them until update - // arrives or the client times out waiting for the resource. - REQUESTED = 1; - - // This resource has been requested by the client but has either not been - // delivered by the server or was previously delivered by the server and then - // subsequently removed from resources provided by the server. For more - // information, please refer to the :ref:`"Knowing When a Requested Resource - // Does Not Exist" ` section. - DOES_NOT_EXIST = 2; - - // Client received this resource and replied with ACK. - ACKED = 3; - - // Client received this resource and replied with NACK. - NACKED = 4; -} - -// The :ref:`/config_dump ` admin endpoint uses this wrapper -// message to maintain and serve arbitrary configuration information from any component in Envoy. -message ConfigDump { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ConfigDump"; - - // This list is serialized and dumped in its entirety at the - // :ref:`/config_dump ` endpoint. - // - // The following configurations are currently supported and will be dumped in the order given - // below: - // - // * *bootstrap*: :ref:`BootstrapConfigDump ` - // * *clusters*: :ref:`ClustersConfigDump ` - // * *endpoints*: :ref:`EndpointsConfigDump ` - // * *listeners*: :ref:`ListenersConfigDump ` - // * *scoped_routes*: :ref:`ScopedRoutesConfigDump ` - // * *routes*: :ref:`RoutesConfigDump ` - // * *secrets*: :ref:`SecretsConfigDump ` - // - // EDS Configuration will only be dumped by using parameter `?include_eds` - // - // You can filter output with the resource and mask query parameters. - // See :ref:`/config_dump?resource={} `, - // :ref:`/config_dump?mask={} `, - // or :ref:`/config_dump?resource={},mask={} - // ` for more information. - repeated google.protobuf.Any configs = 1; -} - -message UpdateFailureState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.UpdateFailureState"; - - // What the component configuration would have been if the update had succeeded. - // This field may not be populated by xDS clients due to storage overhead. - google.protobuf.Any failed_configuration = 1; - - // Time of the latest failed update attempt. - google.protobuf.Timestamp last_update_attempt = 2; - - // Details about the last failed update attempt. - string details = 3; - - // This is the version of the rejected resource. - // [#not-implemented-hide:] - string version_info = 4; -} - -// This message describes the bootstrap configuration that Envoy was started with. This includes -// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate -// the static portions of an Envoy configuration by reusing the output as the bootstrap -// configuration for another Envoy. -message BootstrapConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.BootstrapConfigDump"; - - config.bootstrap.v3.Bootstrap bootstrap = 1; - - // The timestamp when the BootstrapConfig was last updated. - google.protobuf.Timestamp last_updated = 2; -} - -// Envoy's listener manager fills this message with all currently known listeners. Listener -// configuration information can be used to recreate an Envoy configuration by populating all -// listeners as static listeners or by returning them in a LDS response. -message ListenersConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ListenersConfigDump"; - - // Describes a statically loaded listener. - message StaticListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ListenersConfigDump.StaticListener"; - - // The listener config. - google.protobuf.Any listener = 1; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 2; - } - - message DynamicListenerState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ListenersConfigDump.DynamicListenerState"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the listener was loaded. In the future, discrete per-listener versions may be supported - // by the API. - string version_info = 1; - - // The listener config. - google.protobuf.Any listener = 2; - - // The timestamp when the Listener was last successfully updated. - google.protobuf.Timestamp last_updated = 3; - } - - // Describes a dynamically loaded listener via the LDS API. - // [#next-free-field: 7] - message DynamicListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ListenersConfigDump.DynamicListener"; - - // The name or unique id of this listener, pulled from the DynamicListenerState config. - string name = 1; - - // The listener state for any active listener by this name. - // These are listeners that are available to service data plane traffic. - DynamicListenerState active_state = 2; - - // The listener state for any warming listener by this name. - // These are listeners that are currently undergoing warming in preparation to service data - // plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the warming listeners should generally be discarded. - DynamicListenerState warming_state = 3; - - // The listener state for any draining listener by this name. - // These are listeners that are currently undergoing draining in preparation to stop servicing - // data plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the draining listeners should generally be discarded. - DynamicListenerState draining_state = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - string version_info = 1; - - // The statically loaded listener configs. - repeated StaticListener static_listeners = 2; - - // State for any warming, active, or draining listeners. - repeated DynamicListener dynamic_listeners = 3; -} - -// Envoy's cluster manager fills this message with all currently known clusters. Cluster -// configuration information can be used to recreate an Envoy configuration by populating all -// clusters as static clusters or by returning them in a CDS response. -message ClustersConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ClustersConfigDump"; - - // Describes a statically loaded cluster. - message StaticCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ClustersConfigDump.StaticCluster"; - - // The cluster config. - google.protobuf.Any cluster = 1; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // Describes a dynamically loaded cluster via the CDS API. - // [#next-free-field: 6] - message DynamicCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ClustersConfigDump.DynamicCluster"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by - // the API. - string version_info = 1; - - // The cluster config. - google.protobuf.Any cluster = 2; - - // The timestamp when the Cluster was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - string version_info = 1; - - // The statically loaded cluster configs. - repeated StaticCluster static_clusters = 2; - - // The dynamically loaded active clusters. These are clusters that are available to service - // data plane traffic. - repeated DynamicCluster dynamic_active_clusters = 3; - - // The dynamically loaded warming clusters. These are clusters that are currently undergoing - // warming in preparation to service data plane traffic. Note that if attempting to recreate an - // Envoy configuration from a configuration dump, the warming clusters should generally be - // discarded. - repeated DynamicCluster dynamic_warming_clusters = 4; -} - -// Envoy's RDS implementation fills this message with all currently loaded routes, as described by -// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration -// or defined inline while configuring listeners are separated from those configured dynamically via RDS. -// Route configuration information can be used to recreate an Envoy configuration by populating all routes -// as static routes or by returning them in RDS responses. -message RoutesConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.RoutesConfigDump"; - - message StaticRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.RoutesConfigDump.StaticRouteConfig"; - - // The route config. - google.protobuf.Any route_config = 1; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // [#next-free-field: 6] - message DynamicRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.RoutesConfigDump.DynamicRouteConfig"; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the route configuration was loaded. - string version_info = 1; - - // The route config. - google.protobuf.Any route_config = 2; - - // The timestamp when the Route was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // The statically loaded route configs. - repeated StaticRouteConfig static_route_configs = 2; - - // The dynamically loaded route configs. - repeated DynamicRouteConfig dynamic_route_configs = 3; -} - -// Envoy's scoped RDS implementation fills this message with all currently loaded route -// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both -// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the -// dynamically obtained scopes via the SRDS API. -message ScopedRoutesConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ScopedRoutesConfigDump"; - - message InlineScopedRouteConfigs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ScopedRoutesConfigDump.InlineScopedRouteConfigs"; - - // The name assigned to the scoped route configurations. - string name = 1; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 2; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 3; - } - - // [#next-free-field: 7] - message DynamicScopedRouteConfigs { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.ScopedRoutesConfigDump.DynamicScopedRouteConfigs"; - - // The name assigned to the scoped route configurations. - string name = 1; - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the scoped routes configuration was loaded. - string version_info = 2; - - // The scoped route configurations. - repeated google.protobuf.Any scoped_route_configs = 3; - - // The timestamp when the scoped route config set was last updated. - google.protobuf.Timestamp last_updated = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // The statically loaded scoped route configs. - repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; - - // The dynamically loaded scoped route configs. - repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; -} - -// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. -message SecretsConfigDump { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.SecretsConfigDump"; - - // DynamicSecret contains secret information fetched via SDS. - // [#next-free-field: 7] - message DynamicSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.SecretsConfigDump.DynamicSecret"; - - // The name assigned to the secret. - string name = 1; - - // This is the per-resource version information. - string version_info = 2; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 3; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 4; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 5; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 6; - } - - // StaticSecret specifies statically loaded secret in bootstrap. - message StaticSecret { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.SecretsConfigDump.StaticSecret"; - - // The name assigned to the secret. - string name = 1; - - // The timestamp when the secret was last updated. - google.protobuf.Timestamp last_updated = 2; - - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - google.protobuf.Any secret = 3; - } - - // The statically loaded secrets. - repeated StaticSecret static_secrets = 1; - - // The dynamically loaded active secrets. These are secrets that are available to service - // clusters or listeners. - repeated DynamicSecret dynamic_active_secrets = 2; - - // The dynamically loaded warming secrets. These are secrets that are currently undergoing - // warming in preparation to service clusters or listeners. - repeated DynamicSecret dynamic_warming_secrets = 3; -} - -// Envoy's admin fill this message with all currently known endpoints. Endpoint -// configuration information can be used to recreate an Envoy configuration by populating all -// endpoints as static endpoints or by returning them in an EDS response. -message EndpointsConfigDump { - message StaticEndpointConfig { - // The endpoint config. - google.protobuf.Any endpoint_config = 1; - - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - google.protobuf.Timestamp last_updated = 2; - } - - // [#next-free-field: 6] - message DynamicEndpointConfig { - // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the endpoint configuration was loaded. - string version_info = 1; - - // The endpoint config. - google.protobuf.Any endpoint_config = 2; - - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - google.protobuf.Timestamp last_updated = 3; - - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - UpdateFailureState error_state = 4; - - // The client status of this resource. - // [#not-implemented-hide:] - ClientResourceStatus client_status = 5; - } - - // The statically loaded endpoint configs. - repeated StaticEndpointConfig static_endpoint_configs = 2; - - // The dynamically loaded endpoint configs. - repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; -} diff --git a/generated_api_shadow/envoy/admin/v3/init_dump.proto b/generated_api_shadow/envoy/admin/v3/init_dump.proto deleted file mode 100644 index 0c2eb738c4310..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/init_dump.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "InitDumpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: InitDump] - -// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers, -// which provides the information of their unready targets. -// The :ref:`/init_dump ` will dump all unready targets information. -message UnreadyTargetsDumps { - // Message of unready targets information of an init manager. - message UnreadyTargetsDump { - // Name of the init manager. Example: "init_manager_xxx". - string name = 1; - - // Names of unready targets of the init manager. Example: "target_xxx". - repeated string target_names = 2; - } - - // You can choose specific component to dump unready targets with mask query parameter. - // See :ref:`/init_dump?mask={} ` for more information. - // The dumps of unready targets of all init managers. - repeated UnreadyTargetsDump unready_targets_dumps = 1; -} diff --git a/generated_api_shadow/envoy/admin/v3/listeners.proto b/generated_api_shadow/envoy/admin/v3/listeners.proto deleted file mode 100644 index 6197a44e4243f..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/listeners.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/config/core/v3/address.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "ListenersProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Listeners] - -// Admin endpoint uses this wrapper for `/listeners` to display listener status information. -// See :ref:`/listeners ` for more information. -message Listeners { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Listeners"; - - // List of listener statuses. - repeated ListenerStatus listener_statuses = 1; -} - -// Details an individual listener's current status. -message ListenerStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ListenerStatus"; - - // Name of the listener - string name = 1; - - // The actual local address that the listener is listening on. If a listener was configured - // to listen on port 0, then this address has the port that was allocated by the OS. - config.core.v3.Address local_address = 2; -} diff --git a/generated_api_shadow/envoy/admin/v3/memory.proto b/generated_api_shadow/envoy/admin/v3/memory.proto deleted file mode 100644 index bcf9f271748d8..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/memory.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "MemoryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Memory] - -// Proto representation of the internal memory consumption of an Envoy instance. These represent -// values extracted from an internal TCMalloc instance. For more information, see the section of the -// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). -// [#next-free-field: 7] -message Memory { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Memory"; - - // The number of bytes allocated by the heap for Envoy. This is an alias for - // `generic.current_allocated_bytes`. - uint64 allocated = 1; - - // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for - // `generic.heap_size`. - uint64 heap_size = 2; - - // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards - // virtual memory usage, and depending on the OS, typically do not count towards physical memory - // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. - uint64 pageheap_unmapped = 3; - - // The number of bytes in free, mapped pages in the page heap. These bytes always count towards - // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also - // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. - uint64 pageheap_free = 4; - - // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias - // for `tcmalloc.current_total_thread_cache_bytes`. - uint64 total_thread_cache = 5; - - // The number of bytes of the physical memory usage by the allocator. This is an alias for - // `generic.total_physical_bytes`. - uint64 total_physical_bytes = 6; -} diff --git a/generated_api_shadow/envoy/admin/v3/metrics.proto b/generated_api_shadow/envoy/admin/v3/metrics.proto deleted file mode 100644 index 71592ac1e9ecf..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/metrics.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "MetricsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metrics] - -// Proto representation of an Envoy Counter or Gauge value. -message SimpleMetric { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.SimpleMetric"; - - enum Type { - COUNTER = 0; - GAUGE = 1; - } - - // Type of the metric represented. - Type type = 1; - - // Current metric value. - uint64 value = 2; - - // Name of the metric. - string name = 3; -} diff --git a/generated_api_shadow/envoy/admin/v3/mutex_stats.proto b/generated_api_shadow/envoy/admin/v3/mutex_stats.proto deleted file mode 100644 index 49965d87ae805..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/mutex_stats.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "MutexStatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: MutexStats] - -// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run -// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` -// [docs](https://abseil.io/about/design/mutex#extra-features). -// -// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not -// correspond to core clock frequency. For more information, see the `CycleClock` -// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). -message MutexStats { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.MutexStats"; - - // The number of individual mutex contentions which have occurred since startup. - uint64 num_contentions = 1; - - // The length of the current contention wait cycle. - uint64 current_wait_cycles = 2; - - // The lifetime total of all contention wait cycles. - uint64 lifetime_wait_cycles = 3; -} diff --git a/generated_api_shadow/envoy/admin/v3/server_info.proto b/generated_api_shadow/envoy/admin/v3/server_info.proto deleted file mode 100644 index 7593ade49a62e..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/server_info.proto +++ /dev/null @@ -1,205 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "ServerInfoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Server State] - -// Proto representation of the value returned by /server_info, containing -// server version/server status information. -// [#next-free-field: 8] -message ServerInfo { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ServerInfo"; - - enum State { - // Server is live and serving traffic. - LIVE = 0; - - // Server is draining listeners in response to external health checks failing. - DRAINING = 1; - - // Server has not yet completed cluster manager initialization. - PRE_INITIALIZING = 2; - - // Server is running the cluster manager initialization callbacks (e.g., RDS). - INITIALIZING = 3; - } - - // Server version. - string version = 1; - - // State of the server. - State state = 2; - - // Uptime since current epoch was started. - google.protobuf.Duration uptime_current_epoch = 3; - - // Uptime since the start of the first epoch. - google.protobuf.Duration uptime_all_epochs = 4; - - // Hot restart version. - string hot_restart_version = 5; - - // Command line options the server is currently running with. - CommandLineOptions command_line_options = 6; - - // Populated node identity of this server. - config.core.v3.Node node = 7; -} - -// [#next-free-field: 38] -message CommandLineOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.admin.v2alpha.CommandLineOptions"; - - enum IpVersion { - v4 = 0; - v6 = 1; - } - - enum Mode { - // Validate configs and then serve traffic normally. - Serve = 0; - - // Validate configs and exit. - Validate = 1; - - // Completely load and initialize the config, and then exit without running the listener loop. - InitOnly = 2; - } - - enum DrainStrategy { - // Gradually discourage connections over the course of the drain period. - Gradual = 0; - - // Discourage all connections for the duration of the drain sequence. - Immediate = 1; - } - - reserved 12, 29; - - reserved "bootstrap_version"; - - // See :option:`--base-id` for details. - uint64 base_id = 1; - - // See :option:`--use-dynamic-base-id` for details. - bool use_dynamic_base_id = 31; - - // See :option:`--base-id-path` for details. - string base_id_path = 32; - - // See :option:`--concurrency` for details. - uint32 concurrency = 2; - - // See :option:`--config-path` for details. - string config_path = 3; - - // See :option:`--config-yaml` for details. - string config_yaml = 4; - - // See :option:`--allow-unknown-static-fields` for details. - bool allow_unknown_static_fields = 5; - - // See :option:`--reject-unknown-dynamic-fields` for details. - bool reject_unknown_dynamic_fields = 26; - - // See :option:`--ignore-unknown-dynamic-fields` for details. - bool ignore_unknown_dynamic_fields = 30; - - // See :option:`--admin-address-path` for details. - string admin_address_path = 6; - - // See :option:`--local-address-ip-version` for details. - IpVersion local_address_ip_version = 7; - - // See :option:`--log-level` for details. - string log_level = 8; - - // See :option:`--component-log-level` for details. - string component_log_level = 9; - - // See :option:`--log-format` for details. - string log_format = 10; - - // See :option:`--log-format-escaped` for details. - bool log_format_escaped = 27; - - // See :option:`--log-path` for details. - string log_path = 11; - - // See :option:`--service-cluster` for details. - string service_cluster = 13; - - // See :option:`--service-node` for details. - string service_node = 14; - - // See :option:`--service-zone` for details. - string service_zone = 15; - - // See :option:`--file-flush-interval-msec` for details. - google.protobuf.Duration file_flush_interval = 16; - - // See :option:`--drain-time-s` for details. - google.protobuf.Duration drain_time = 17; - - // See :option:`--drain-strategy` for details. - DrainStrategy drain_strategy = 33; - - // See :option:`--parent-shutdown-time-s` for details. - google.protobuf.Duration parent_shutdown_time = 18; - - // See :option:`--mode` for details. - Mode mode = 19; - - // See :option:`--disable-hot-restart` for details. - bool disable_hot_restart = 22; - - // See :option:`--enable-mutex-tracing` for details. - bool enable_mutex_tracing = 23; - - // See :option:`--restart-epoch` for details. - uint32 restart_epoch = 24; - - // See :option:`--cpuset-threads` for details. - bool cpuset_threads = 25; - - // See :option:`--disable-extensions` for details. - repeated string disabled_extensions = 28; - - // See :option:`--enable-fine-grain-logging` for details. - bool enable_fine_grain_logging = 34; - - // See :option:`--socket-path` for details. - string socket_path = 35; - - // See :option:`--socket-mode` for details. - uint32 socket_mode = 36; - - // See :option:`--enable-core-dump` for details. - bool enable_core_dump = 37; - - uint64 hidden_envoy_deprecated_max_stats = 20 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - uint64 hidden_envoy_deprecated_max_obj_name_len = 21 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} diff --git a/generated_api_shadow/envoy/admin/v3/tap.proto b/generated_api_shadow/envoy/admin/v3/tap.proto deleted file mode 100644 index 934170b2deeab..0000000000000 --- a/generated_api_shadow/envoy/admin/v3/tap.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.admin.v3; - -import "envoy/config/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.admin.v3"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap] - -// The /tap admin request body that is used to configure an active tap session. -message TapRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.TapRequest"; - - // The opaque configuration ID used to match the configuration to a loaded extension. - // A tap extension configures a similar opaque ID that is used to match. - string config_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The tap configuration to load. - config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/annotations/BUILD b/generated_api_shadow/envoy/annotations/BUILD deleted file mode 100644 index 5c06e2deae7d8..0000000000000 --- a/generated_api_shadow/envoy/annotations/BUILD +++ /dev/null @@ -1,5 +0,0 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package() diff --git a/generated_api_shadow/envoy/annotations/deprecation.proto b/generated_api_shadow/envoy/annotations/deprecation.proto deleted file mode 100644 index ce02ab98a8dcd..0000000000000 --- a/generated_api_shadow/envoy/annotations/deprecation.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.annotations; - -import "google/protobuf/descriptor.proto"; - -// [#protodoc-title: Deprecation] -// Adds annotations for deprecated fields and enums to allow tagging proto -// fields as fatal by default and the minor version on which the field was -// deprecated. One Envoy release after deprecation, deprecated fields will be -// disallowed by default, a state which is reversible with -// :ref:`runtime overrides `. - -// Magic number in this file derived from top 28bit of SHA256 digest of -// "envoy.annotation.disallowed_by_default" and "envoy.annotation.deprecated_at_minor_version" -extend google.protobuf.FieldOptions { - bool disallowed_by_default = 189503207; - - // The API major and minor version on which the field was deprecated - // (e.g., "3.5" for major version 3 and minor version 5). - string deprecated_at_minor_version = 157299826; -} - -// Magic number in this file derived from top 28bit of SHA256 digest of -// "envoy.annotation.disallowed_by_default_enum" and -// "envoy.annotation.deprecated_at_minor_version_eum" -extend google.protobuf.EnumValueOptions { - bool disallowed_by_default_enum = 70100853; - - // The API major and minor version on which the enum value was deprecated - // (e.g., "3.5" for major version 3 and minor version 5). - string deprecated_at_minor_version_enum = 181198657; -} diff --git a/generated_api_shadow/envoy/annotations/resource.proto b/generated_api_shadow/envoy/annotations/resource.proto deleted file mode 100644 index b9dcf658e5226..0000000000000 --- a/generated_api_shadow/envoy/annotations/resource.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.annotations; - -import "google/protobuf/descriptor.proto"; - -// Magic number in this file derived from top 28bit of SHA256 digest of "envoy.annotation.resource". -extend google.protobuf.ServiceOptions { - ResourceAnnotation resource = 265073217; -} - -message ResourceAnnotation { - // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource - // type. - string type = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/BUILD b/generated_api_shadow/envoy/api/v2/BUILD deleted file mode 100644 index 0aded6e51b71a..0000000000000 --- a/generated_api_shadow/envoy/api/v2/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/auth:pkg", - "//envoy/api/v2/cluster:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/endpoint:pkg", - "//envoy/api/v2/listener:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/config/listener/v2:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/README.md b/generated_api_shadow/envoy/api/v2/README.md deleted file mode 100644 index 984be690a103b..0000000000000 --- a/generated_api_shadow/envoy/api/v2/README.md +++ /dev/null @@ -1,9 +0,0 @@ -Protocol buffer definitions for xDS and top-level resource API messages. - -Package group `//envoy/api/v2:friends` enumerates all consumers of the shared -API messages. That includes package envoy.api.v2 itself, which contains several -xDS definitions. Default visibility for all shared definitions should be set to -`//envoy/api/v2:friends`. - -Additionally, packages envoy.api.v2.core and envoy.api.v2.auth are also -consumed throughout the subpackages of `//envoy/api/v2`. diff --git a/generated_api_shadow/envoy/api/v2/auth/BUILD b/generated_api_shadow/envoy/api/v2/auth/BUILD deleted file mode 100644 index aaab1df155473..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/auth/cert.proto b/generated_api_shadow/envoy/api/v2/auth/cert.proto deleted file mode 100644 index 6a9cbddd25084..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/cert.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.auth; - -import "udpa/annotations/migrate.proto"; - -import public "envoy/api/v2/auth/common.proto"; -import public "envoy/api/v2/auth/secret.proto"; -import public "envoy/api/v2/auth/tls.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.auth"; -option java_outer_classname = "CertProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tls.v3"; diff --git a/generated_api_shadow/envoy/api/v2/auth/common.proto b/generated_api_shadow/envoy/api/v2/auth/common.proto deleted file mode 100644 index c8122f4010297..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/common.proto +++ /dev/null @@ -1,327 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.auth; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.auth"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for - // servers. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - // specified, the default list will be used. - // - // In non-FIPS builds, the default cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; - - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - } -} - -// [#next-free-field: 7] -message TlsCertificate { - // The TLS certificate chain. - core.DataSource certificate_chain = 1; - - // The TLS private key. - core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // [#not-implemented-hide:] - core.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated core.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// [#next-free-field: 11] -message CertificateValidationContext { - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - core.DataSource trusted_ca = 1; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative Names. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified values. - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated string verify_subject_alt_name = 4 [deprecated = true]; - - // An optional list of Subject Alternative name matchers. Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matches. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - google.protobuf.BoolValue require_ocsp_staple = 5; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. - core.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/auth/secret.proto b/generated_api_shadow/envoy/api/v2/auth/secret.proto deleted file mode 100644 index 3a6d8cf7dcb67..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/secret.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.auth; - -import "envoy/api/v2/auth/common.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.auth"; -option java_outer_classname = "SecretProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Secrets configuration] - -message GenericSecret { - // Secret of generic type and is available to filters. - core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via - // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; - - core.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/api/v2/auth/tls.proto b/generated_api_shadow/envoy/api/v2/auth/tls.proto deleted file mode 100644 index 201973a2b9de8..0000000000000 --- a/generated_api_shadow/envoy/api/v2/auth/tls.proto +++ /dev/null @@ -1,152 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.auth; - -import "envoy/api/v2/auth/common.proto"; -import "envoy/api/v2/auth/secret.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.auth"; -option java_outer_classname = "TlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tls.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: TLS transport socket] -// [#extension: envoy.transport_sockets.tls] -// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. - -message UpstreamTlsContext { - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 8] -message DownstreamTlsContext { - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 9] -message CommonTlsContext { - message CombinedCertificateValidationContext { - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 1}]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; -} diff --git a/generated_api_shadow/envoy/api/v2/cds.proto b/generated_api_shadow/envoy/api/v2/cds.proto deleted file mode 100644 index 0b657a0fa452b..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cds.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/cluster.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "CdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: CDS] - -// Return list of all clusters this proxy will load balance to. -service ClusterDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.Cluster"; - - rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:clusters"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message CdsDummy { -} diff --git a/generated_api_shadow/envoy/api/v2/cluster.proto b/generated_api_shadow/envoy/api/v2/cluster.proto deleted file mode 100644 index fab95f71b7630..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster.proto +++ /dev/null @@ -1,867 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/auth/tls.proto"; -import "envoy/api/v2/cluster/circuit_breaker.proto"; -import "envoy/api/v2/cluster/filter.proto"; -import "envoy/api/v2/cluster/outlier_detection.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/core/health_check.proto"; -import "envoy/api/v2/core/protocol.proto"; -import "envoy/api/v2/endpoint.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Cluster configuration] - -// Configuration for a single upstream cluster. -// [#next-free-field: 48] -message Cluster { - // Refer to :ref:`service discovery type ` - // for an explanation on each type. - enum DiscoveryType { - // Refer to the :ref:`static discovery type` - // for an explanation. - STATIC = 0; - - // Refer to the :ref:`strict DNS discovery - // type` - // for an explanation. - STRICT_DNS = 1; - - // Refer to the :ref:`logical DNS discovery - // type` - // for an explanation. - LOGICAL_DNS = 2; - - // Refer to the :ref:`service discovery type` - // for an explanation. - EDS = 3; - - // Refer to the :ref:`original destination discovery - // type` - // for an explanation. - ORIGINAL_DST = 4; - } - - // Refer to :ref:`load balancer type ` architecture - // overview section for information on each type. - enum LbPolicy { - // Refer to the :ref:`round robin load balancing - // policy` - // for an explanation. - ROUND_ROBIN = 0; - - // Refer to the :ref:`least request load balancing - // policy` - // for an explanation. - LEAST_REQUEST = 1; - - // Refer to the :ref:`ring hash load balancing - // policy` - // for an explanation. - RING_HASH = 2; - - // Refer to the :ref:`random load balancing - // policy` - // for an explanation. - RANDOM = 3; - - // Refer to the :ref:`original destination load balancing - // policy` - // for an explanation. - // - // .. attention:: - // - // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. - // - ORIGINAL_DST_LB = 4 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; - - // Refer to the :ref:`Maglev load balancing policy` - // for an explanation. - MAGLEV = 5; - - // This load balancer type must be specified if the configured cluster provides a cluster - // specific load balancer. Consult the configured cluster's documentation for whether to set - // this option or not. - CLUSTER_PROVIDED = 6; - - // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy - // ` field to determine the LB policy. - // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - // and instead using the new load_balancing_policy field as the one and only mechanism for - // configuring this.] - LOAD_BALANCING_POLICY_CONFIG = 7; - } - - // When V4_ONLY is selected, the DNS resolver will only perform a lookup for - // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will - // only perform a lookup for addresses in the IPv6 family. If AUTO is - // specified, the DNS resolver will first perform a lookup for addresses in - // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. - // For cluster types other than - // :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS`, - // this setting is - // ignored. - enum DnsLookupFamily { - AUTO = 0; - V4_ONLY = 1; - V6_ONLY = 2; - } - - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - - // TransportSocketMatch specifies what transport socket config will be used - // when the match conditions are satisfied. - message TransportSocketMatch { - // The name of the match, used in stats generation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria. - // The connection to the endpoint with metadata matching what is set in this field - // will use the transport socket configuration specified here. - // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match - // against the values specified in this field. - google.protobuf.Struct match = 2; - - // The configuration of the transport socket. - core.TransportSocket transport_socket = 3; - } - - // Extended cluster type. - message CustomClusterType { - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - google.protobuf.Any typed_config = 2; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - // Configuration for the source of EDS updates for this Cluster. - core.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; - } - - // Optionally divide the endpoints in this cluster into subsets defined by - // endpoint metadata and selected by route and weighted cluster metadata. - // [#next-free-field: 8] - message LbSubsetConfig { - // If NO_FALLBACK is selected, a result - // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, - // any cluster endpoint may be returned (subject to policy, health checks, - // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - enum LbSubsetFallbackPolicy { - NO_FALLBACK = 0; - ANY_ENDPOINT = 1; - DEFAULT_SUBSET = 2; - } - - // Specifications for subsets. - message LbSubsetSelector { - // Allows to override top level fallback policy per selector. - enum LbSubsetSelectorFallbackPolicy { - // If NOT_DEFINED top level config fallback policy is used instead. - NOT_DEFINED = 0; - - // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. - NO_FALLBACK = 1; - - // If ANY_ENDPOINT is selected, any cluster endpoint may be returned - // (subject to policy, health checks, etc). - ANY_ENDPOINT = 2; - - // If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - DEFAULT_SUBSET = 3; - - // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata - // keys reduced to - // :ref:`fallback_keys_subset`. - // It allows for a fallback to a different, less specific selector if some of the keys of - // the selector are considered optional. - KEYS_SUBSET = 4; - } - - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum = {defined_only: true}]; - - // Subset of - // :ref:`keys` used by - // :ref:`KEYS_SUBSET` - // fallback policy. - // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. - // For any other fallback policy the parameter is not used and should not be set. - // Only values also present in - // :ref:`keys` are allowed, but - // `fallback_keys_subset` cannot be equal to `keys`. - repeated string fallback_keys_subset = 3; - } - - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - - // For each entry, LbEndpoint.Metadata's - // *envoy.lb* namespace is traversed and a subset is created for each unique - // combination of key and value. For example: - // - // .. code-block:: json - // - // { "subset_selectors": [ - // { "keys": [ "version" ] }, - // { "keys": [ "stage", "hardware_type" ] } - // ]} - // - // A subset is matched when the metadata from the selected route and - // weighted cluster contains the same keys and values as the subset's - // metadata. The same host may appear in multiple subsets. - repeated LbSubsetSelector subset_selectors = 3; - - // If true, routing to subsets will take into account the localities and locality weights of the - // endpoints when making the routing decision. - // - // There are some potential pitfalls associated with enabling this feature, as the resulting - // traffic split after applying both a subset match and locality weights might be undesirable. - // - // Consider for example a situation in which you have 50/50 split across two localities X/Y - // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 - // host selected but Y having 100, then a lot more load is being dumped on the single host in X - // than originally anticipated in the load balancing assignment delivered via EDS. - bool locality_weight_aware = 4; - - // When used with locality_weight_aware, scales the weight of each locality by the ratio - // of hosts in the subset vs hosts in the original subset. This aims to even out the load - // going to an individual locality if said locality is disproportionately affected by the - // subset predicate. - bool scale_locality_weight = 5; - - // If true, when a fallback policy is configured and its corresponding subset fails to find - // a host this will cause any host to be selected instead. - // - // This is useful when using the default subset as the fallback policy, given the default - // subset might become empty. With this option enabled, if that happens the LB will attempt - // to select a host from the entire cluster. - bool panic_mode_any = 6; - - // If true, metadata specified for a metadata key will be matched against the corresponding - // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value - // and any of the elements in the list matches the criteria. - bool list_as_any = 7; - } - - // Specific configuration for the LeastRequest load balancing policy. - message LeastRequestLbConfig { - // The number of random healthy hosts from which the host with the fewest active requests will - // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; - } - - // Specific configuration for the :ref:`RingHash` - // load balancing policy. - message RingHashLbConfig { - // The hash function used to hash hosts onto the ketama ring. - enum HashFunction { - // Use `xxHash `_, this is the default hash function. - XX_HASH = 0; - - // Use `MurmurHash2 `_, this is compatible with - // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled - // on Linux and not macOS. - MURMUR_HASH_2 = 1; - } - - reserved 2; - - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; - - // The hash function used to hash hosts onto the ketama ring. The value defaults to - // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; - - // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered - // to further constrain resource use. See also - // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; - } - - // Specific configuration for the - // :ref:`Original Destination ` - // load balancing policy. - message OriginalDstLbConfig { - // When true, :ref:`x-envoy-original-dst-host - // ` can be used to override destination - // address. - // - // .. attention:: - // - // This header isn't sanitized by default, so enabling this feature allows HTTP clients to - // route traffic to arbitrary hosts and/or ports, which may have serious security - // consequences. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - bool use_http_header = 1; - } - - // Common configuration for all load balancer implementations. - // [#next-free-field: 8] - message CommonLbConfig { - // Configuration for :ref:`zone aware routing - // `. - message ZoneAwareLbConfig { - // Configures percentage of requests that will be considered for zone aware routing - // if zone aware routing is configured. If not specified, the default is 100%. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - type.Percent routing_enabled = 1; - - // Configures minimum upstream cluster size required for zone aware routing - // If upstream cluster size is less than specified, zone aware routing is not performed - // even if zone aware routing is configured. If not specified, the default is 6. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - google.protobuf.UInt64Value min_cluster_size = 2; - - // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic - // mode`. Instead, the cluster will fail all - // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a - // failing service. - bool fail_traffic_on_panic = 3; - } - - // Configuration for :ref:`locality weighted load balancing - // ` - message LocalityWeightedLbConfig { - } - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - message ConsistentHashingLbConfig { - // If set to `true`, the cluster will use hostname instead of the resolved - // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. - bool use_hostname_for_hashing = 1; - } - - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - type.Percent healthy_panic_threshold = 1; - - oneof locality_config_specifier { - ZoneAwareLbConfig zone_aware_lb_config = 2; - - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } - - // If set, all health check/weight/metadata updates that happen within this duration will be - // merged and delivered in one shot when the duration expires. The start of the duration is when - // the first update happens. This is useful for big clusters, with potentially noisy deploys - // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes - // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new - // cluster). Please always keep in mind that the use of sandbox technologies may change this - // behavior. - // - // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge - // window to 0. - // - // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is - // because merging those updates isn't currently safe. See - // https://github.com/envoyproxy/envoy/pull/3941. - google.protobuf.Duration update_merge_window = 4; - - // If set to true, Envoy will not consider new hosts when computing load balancing weights until - // they have been health checked for the first time. This will have no effect unless - // active health checking is also configured. - // - // Ignoring a host means that for any load balancing calculations that adjust weights based - // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and - // panic mode) Envoy will exclude these hosts in the denominator. - // - // For example, with hosts in two priorities P0 and P1, where P0 looks like - // {healthy, unhealthy (new), unhealthy (new)} - // and where P1 looks like - // {healthy, healthy} - // all traffic will still hit P0, as 1 / (3 - 2) = 1. - // - // Enabling this will allow scaling up the number of hosts for a given cluster without entering - // panic mode or triggering priority spillover, assuming the hosts pass the first health check. - // - // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not - // contribute to the calculation when deciding whether panic mode is enabled or not. - bool ignore_new_hosts_until_first_hc = 5; - - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - bool close_connections_on_host_set_change = 6; - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - ConsistentHashingLbConfig consistent_hashing_lb_config = 7; - } - - message RefreshRate { - // Specifies the base interval between refreshes. This parameter is required and must be greater - // than zero and less than - // :ref:`max_interval `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {nanos: 1000000} - }]; - - // Specifies the maximum interval between refreshes. This parameter is optional, but must be - // greater than or equal to the - // :ref:`base_interval ` if set. The default - // is 10 times the :ref:`base_interval `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; - } - - reserved 12, 15; - - // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket_match* in the - // :ref:`LbEndpoint.Metadata ` - // is used to match against the transport sockets as they appear in the list. The first - // :ref:`match ` is used. - // For example, with the following match - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "enableMTLS" - // match: - // acceptMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - name: "defaultToPlaintext" - // match: {} - // transport_socket: - // name: envoy.transport_sockets.raw_buffer - // - // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* - // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. - // - // If a :ref:`socket match ` with empty match - // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" - // socket match in case above. - // - // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any - // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - // *transport_socket* specified in this cluster. - // - // This field allows gradual and flexible transport socket configuration changes. - // - // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, - // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", - // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic - // has "acceptPlaintext": "true" metadata information. - // - // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS - // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - // *TransportSocketMatch* in this field. Other client Envoys receive CDS without - // *transport_socket_match* set, and still send plain text traffic to the same cluster. - // - // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] - repeated TransportSocketMatch transport_socket_matches = 43; - - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // An optional alternative to the cluster name to be used while emitting stats. - // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be - // confused with :ref:`Router Filter Header - // `. - string alt_stat_name = 28; - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; - - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; - - // If the service discovery type is - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS`, - // then hosts is required. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`load_assignment` field instead. - // - repeated core.Address hosts = 7 [deprecated = true]; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes the *hosts* field in the v2 API. - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // - ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.HealthCheck health_checks = 8; - - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; - - // Optional :ref:`circuit breaking ` for the cluster. - cluster.CircuitBreakers circuit_breakers = 10; - - // The TLS configuration for connections to the upstream cluster. - // - // .. attention:: - // - // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - // set, `transport_socket` takes priority. - auth.UpstreamTlsContext tls_context = 11 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // HTTP protocol options that are applied only to upstream HTTP connections. - // These options apply to all HTTP versions. - core.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; - - // Additional options when handling HTTP requests upstream. These options will be applicable to - // both HTTP1 and HTTP2 requests. - core.HttpProtocolOptions common_http_protocol_options = 29; - - // Additional options when handling HTTP1 requests. - core.Http1ProtocolOptions http_protocol_options = 13; - - // Even if default HTTP2 protocol options are desired, this field must be - // set so that Envoy will assume that the upstream supports HTTP/2 when - // making new HTTP connection pool connections. Currently, Envoy only - // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - // connections to happen over plain text. - core.Http2ProtocolOptions http2_protocol_options = 14; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map extension_protocol_options = 35 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map typed_extension_protocol_options = 36; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. The value configured must be at least 1ms. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 - [(validate.rules).duration = {gt {nanos: 1000000}}]; - - // If the DNS failure refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types - // other than :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS` this setting is - // ignored. - RefreshRate dns_failure_refresh_rate = 44; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; - - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; - - // If DNS resolvers are specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used to specify the cluster’s dns resolvers. - // If this setting is not specified, the value defaults to the default - // resolver, which uses /etc/resolv.conf for configuration. For cluster types - // other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. - repeated core.Address dns_resolvers = 18; - - // [#next-major-version: Reconcile DNS options in a single message.] - // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - bool use_tcp_for_dns_lookups = 45; - - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - cluster.OutlierDetection outlier_detection = 19; - - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; - - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.BindConfig upstream_bind_config = 21; - - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - - // Common configuration for all load balancer implementations. - CommonLbConfig common_lb_config = 27; - - // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `tls` and - // :ref:`UpstreamTlsContexts ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - core.TransportSocket transport_socket = 24; - - // The Metadata field can be used to provide additional information about the - // cluster. It can be used for stats, logging, and varying filter behavior. - // Fields should use reverse DNS notation to denote which entity within Envoy - // will need the information. For instance, if the metadata is intended for - // the Router filter, the filter name should be specified as *envoy.filters.http.router*. - core.Metadata metadata = 25; - - // Determines how Envoy selects the protocol used to speak to upstream hosts. - ClusterProtocolSelection protocol_selection = 26; - - // Optional options for upstream connections. - UpstreamConnectionOptions upstream_connection_options = 30; - - // If an upstream host becomes unhealthy (as determined by the configured health checks - // or outlier detection), immediately close all connections to the failed host. - // - // .. note:: - // - // This is currently only supported for connections created by tcp_proxy. - // - // .. note:: - // - // The current implementation of this feature closes all connections immediately when - // the unhealthy status is detected. If there are a large number of connections open - // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of - // time exclusively closing these connections, and not processing any other traffic. - bool close_connections_on_host_health_failure = 31; - - // If set to true, Envoy will ignore the health value of a host when processing its removal - // from service discovery. This means that if active health checking is used, Envoy will *not* - // wait for the endpoint to go unhealthy before removing it. - bool drain_connections_on_host_removal = 32 - [(udpa.annotations.field_migrate).rename = "ignore_health_on_host_removal"]; - - // An (optional) network filter chain, listed in the order the filters should be applied. - // The chain will be applied to all outgoing connections that Envoy makes to the upstream - // servers of this cluster. - repeated cluster.Filter filters = 40; - - // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the - // :ref:`lb_policy` field has the value - // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - LoadBalancingPolicy load_balancing_policy = 41; - - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - core.ConfigSource lrs_server = 42; - - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - bool track_timeout_budgets = 47; -} - -// [#not-implemented-hide:] Extensible load balancing policy configuration. -// -// Every LB policy defined via this mechanism will be identified via a unique name using reverse -// DNS notation. If the policy needs configuration parameters, it must define a message for its -// own configuration, which will be stored in the config field. The name of the policy will tell -// clients which type of message they should expect to see in the config field. -// -// Note that there are cases where it is useful to be able to independently select LB policies -// for choosing a locality and for choosing an endpoint within that locality. For example, a -// given deployment may always use the same policy to choose the locality, but for choosing the -// endpoint within the locality, some clusters may use weighted-round-robin, while others may -// use some sort of session-based balancing. -// -// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a -// child LB policy for each locality. For each request, the parent chooses the locality and then -// delegates to the child policy for that locality to choose the endpoint within the locality. -// -// To facilitate this, the config message for the top-level LB policy may include a field of -// type LoadBalancingPolicy that specifies the child policy. -message LoadBalancingPolicy { - message Policy { - // Required. The name of the LB policy. - string name = 1; - - // Optional config for the LB policy. - // No more than one of these two fields may be populated. - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - - // Each client will iterate over the list in order and stop at the first policy that it - // supports. This provides a mechanism for starting to use new LB policies that are not yet - // supported by all clients. - repeated Policy policies = 1; -} - -// An extensible structure containing the address Envoy should bind to when -// establishing upstream connections. -message UpstreamBindConfig { - // The address Envoy should bind to when establishing upstream connections. - core.Address source_address = 1; -} - -message UpstreamConnectionOptions { - // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - core.TcpKeepalive tcp_keepalive = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/cluster/BUILD b/generated_api_shadow/envoy/api/v2/cluster/BUILD deleted file mode 100644 index 2ffbc958786b3..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto b/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto deleted file mode 100644 index 510619b264296..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster/circuit_breaker.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.cluster; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option java_outer_classname = "CircuitBreakerProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Circuit breakers] - -// :ref:`Circuit breaking` settings can be -// specified individually for each defined priority. -message CircuitBreakers { - // A Thresholds defines CircuitBreaker settings for a - // :ref:`RoutingPriority`. - // [#next-free-field: 9] - message Thresholds { - message RetryBudget { - // Specifies the limit on concurrent retries as a percentage of the sum of active requests and - // active pending requests. For example, if there are 100 active requests and the - // budget_percent is set to 25, there may be 25 active retries. - // - // This parameter is optional. Defaults to 20%. - type.Percent budget_percent = 1; - - // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the - // number of active retries may never go below this number. - // - // This parameter is optional. Defaults to 3. - google.protobuf.UInt32Value min_retry_concurrency = 2; - } - - // The :ref:`RoutingPriority` - // the specified CircuitBreaker settings apply to. - core.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; - - // The maximum number of connections that Envoy will make to the upstream - // cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_connections = 2; - - // The maximum number of pending requests that Envoy will allow to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 3; - - // The maximum number of parallel requests that Envoy will make to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_requests = 4; - - // The maximum number of parallel retries that Envoy will allow to the - // upstream cluster. If not specified, the default is 3. - google.protobuf.UInt32Value max_retries = 5; - - // Specifies a limit on concurrent retries in relation to the number of active requests. This - // parameter is optional. - // - // .. note:: - // - // If this field is set, the retry budget will override any configured retry circuit - // breaker. - RetryBudget retry_budget = 8; - - // If track_remaining is true, then stats will be published that expose - // the number of resources remaining until the circuit breakers open. If - // not specified, the default is false. - // - // .. note:: - // - // If a retry budget is used in lieu of the max_retries circuit breaker, - // the remaining retry resources remaining will not be tracked. - bool track_remaining = 6; - - // The maximum number of connection pools per cluster that Envoy will concurrently support at - // once. If not specified, the default is unlimited. Set this for clusters which create a - // large number of connection pools. See - // :ref:`Circuit Breaking ` for - // more details. - google.protobuf.UInt32Value max_connection_pools = 7; - } - - // If multiple :ref:`Thresholds` - // are defined with the same :ref:`RoutingPriority`, - // the first one in the list is used. If no Thresholds is defined for a given - // :ref:`RoutingPriority`, the default values - // are used. - repeated Thresholds thresholds = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/cluster/filter.proto b/generated_api_shadow/envoy/api/v2/cluster/filter.proto deleted file mode 100644 index b87ad79d8f352..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster/filter.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.cluster; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option java_outer_classname = "FilterProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Upstream filters] -// Upstream filters apply to the connections to the upstream cluster hosts. - -message Filter { - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any typed_config = 2; -} diff --git a/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto b/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto deleted file mode 100644 index 6cf35e41ff153..0000000000000 --- a/generated_api_shadow/envoy/api/v2/cluster/outlier_detection.proto +++ /dev/null @@ -1,151 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.cluster; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option java_outer_classname = "OutlierDetectionProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Outlier detection] - -// See the :ref:`architecture overview ` for -// more information on outlier detection. -// [#next-free-field: 21] -message OutlierDetection { - // The number of consecutive 5xx responses or local origin errors that are mapped - // to 5xx error codes before a consecutive 5xx ejection - // occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_5xx = 1; - - // The time interval between ejection analysis sweeps. This can result in - // both new ejections as well as hosts being returned to service. Defaults - // to 10000ms or 10s. - google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; - - // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected. - // Defaults to 30000ms or 30s. - google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; - - // The maximum % of an upstream cluster that can be ejected due to outlier - // detection. Defaults to 10% but will eject at least one host regardless of the value. - google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive 5xx. This setting can be used to disable - // ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics. This setting can be used to - // disable ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; - - // The number of hosts in a cluster that must have enough request volume to - // detect success rate outliers. If the number of hosts is less than this - // setting, outlier detection via success rate statistics is not performed - // for any host in the cluster. Defaults to 5. - google.protobuf.UInt32Value success_rate_minimum_hosts = 7; - - // The minimum number of total requests that must be collected in one - // interval (as defined by the interval duration above) to include this host - // in success rate based outlier detection. If the volume is lower than this - // setting, outlier detection via success rate statistics is not performed - // for that host. Defaults to 100. - google.protobuf.UInt32Value success_rate_request_volume = 8; - - // This factor is used to determine the ejection threshold for success rate - // outlier ejection. The ejection threshold is the difference between the - // mean success rate, and the product of this factor and the standard - // deviation of the mean success rate: mean - (stdev * - // success_rate_stdev_factor). This factor is divided by a thousand to get a - // double. That is, if the desired factor is 1.9, the runtime value should - // be 1900. Defaults to 1900. - google.protobuf.UInt32Value success_rate_stdev_factor = 9; - - // The number of consecutive gateway failures (502, 503, 504 status codes) - // before a consecutive gateway failure ejection occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_gateway_failure = 10; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive gateway failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 - [(validate.rules).uint32 = {lte: 100}]; - - // Determines whether to distinguish local origin failures from external errors. If set to true - // the following configuration parameters are taken into account: - // :ref:`consecutive_local_origin_failure`, - // :ref:`enforcing_consecutive_local_origin_failure` - // and - // :ref:`enforcing_local_origin_success_rate`. - // Defaults to false. - bool split_external_local_origin_errors = 12; - - // The number of consecutive locally originated failures before ejection - // occurs. Defaults to 5. Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value consecutive_local_origin_failure = 13; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive locally originated failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics for locally originated errors. - // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 - [(validate.rules).uint32 = {lte: 100}]; - - // The failure percentage to use when determining failure percentage-based outlier detection. If - // the failure percentage of a given host is greater than or equal to this value, it will be - // ejected. Defaults to 85. - google.protobuf.UInt32Value failure_percentage_threshold = 16 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // failure percentage statistics. This setting can be used to disable ejection or to ramp it up - // slowly. Defaults to 0. - // - // [#next-major-version: setting this without setting failure_percentage_threshold should be - // invalid in v4.] - google.protobuf.UInt32Value enforcing_failure_percentage = 17 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // local-origin failure percentage statistics. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 - [(validate.rules).uint32 = {lte: 100}]; - - // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. - // If the total number of hosts in the cluster is less than this value, failure percentage-based - // ejection will not be performed. Defaults to 5. - google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; - - // The minimum number of total requests that must be collected in one interval (as defined by the - // interval duration above) to perform failure percentage-based ejection for this host. If the - // volume is lower than this setting, failure percentage-based ejection will not be performed for - // this host. Defaults to 50. - google.protobuf.UInt32Value failure_percentage_request_volume = 20; -} diff --git a/generated_api_shadow/envoy/api/v2/core/BUILD b/generated_api_shadow/envoy/api/v2/core/BUILD deleted file mode 100644 index 8475a4ba83760..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/core/address.proto b/generated_api_shadow/envoy/api/v2/core/address.proto deleted file mode 100644 index fdcb4e7d94f9a..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/address.proto +++ /dev/null @@ -1,134 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/socket_option.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "AddressProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Network addresses] - -message Pipe { - // Unix Domain Socket path. On Linux, paths starting with '@' will use the - // abstract namespace. The starting '@' is replaced by a null byte by Envoy. - // Paths starting with '@' will result in an error in environments other than - // Linux. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The mode for the Pipe. Not applicable for abstract sockets. - uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; -} - -// [#next-free-field: 7] -message SocketAddress { - enum Protocol { - TCP = 0; - UDP = 1; - } - - Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; - - // The address for this socket. :ref:`Listeners ` will bind - // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` - // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: - // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used - // within an upstream :ref:`BindConfig `, the address - // controls the source address of outbound connections. For :ref:`clusters - // `, the cluster type determines whether the - // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized - // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_bytes: 1}]; - - oneof port_specifier { - option (validate.required) = true; - - uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; - - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. - string named_port = 4; - } - - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. - string resolver_name = 5; - - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. - bool ipv4_compat = 6; -} - -message TcpKeepalive { - // Maximum number of keepalive probes to send without response before deciding - // the connection is dead. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 9.) - google.protobuf.UInt32Value keepalive_probes = 1; - - // The number of seconds a connection needs to be idle before keep-alive probes - // start being sent. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 7200s (i.e., 2 hours.) - google.protobuf.UInt32Value keepalive_time = 2; - - // The number of seconds between keep-alive probes. Default is to use the OS - // level configuration (unless overridden, Linux defaults to 75s.) - google.protobuf.UInt32Value keepalive_interval = 3; -} - -message BindConfig { - // The address to bind to when creating a socket. - SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; - - // Whether to set the *IP_FREEBIND* option when creating the socket. When this - // flag is set to true, allows the :ref:`source_address - // ` to be an IP address - // that is not configured on the system running Envoy. When this flag is set - // to false, the option *IP_FREEBIND* is disabled on the socket. When this - // flag is not set (default), the socket is not modified, i.e. the option is - // neither enabled nor disabled. - google.protobuf.BoolValue freebind = 2; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated SocketOption socket_options = 3; -} - -// Addresses specify either a logical or physical address and port, which are -// used to tell Envoy where to bind/listen, connect to upstream and find -// management servers. -message Address { - oneof address { - option (validate.required) = true; - - SocketAddress socket_address = 1; - - Pipe pipe = 2; - } -} - -// CidrRange specifies an IP Address and a prefix length to construct -// the subnet mask for a `CIDR `_ range. -message CidrRange { - // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; -} diff --git a/generated_api_shadow/envoy/api/v2/core/backoff.proto b/generated_api_shadow/envoy/api/v2/core/backoff.proto deleted file mode 100644 index e45c71e39be8f..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/backoff.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "BackoffProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Backoff Strategy] - -// Configuration defining a jittered exponential back off strategy. -message BackoffStrategy { - // The base interval to be used for the next back off computation. It should - // be greater than zero and less than or equal to :ref:`max_interval - // `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, - // but must be greater than or equal to the :ref:`base_interval - // ` if set. The default - // is 10 times the :ref:`base_interval - // `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; -} diff --git a/generated_api_shadow/envoy/api/v2/core/base.proto b/generated_api_shadow/envoy/api/v2/core/base.proto deleted file mode 100644 index 32cd90b4ee1b4..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/base.proto +++ /dev/null @@ -1,381 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/backoff.proto"; -import "envoy/api/v2/core/http_uri.proto"; -import "envoy/type/percent.proto"; -import "envoy/type/semantic_version.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -import public "envoy/api/v2/core/socket_option.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "BaseProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common types] - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -enum RoutingPriority { - DEFAULT = 0; - HIGH = 1; -} - -// HTTP request method. -enum RequestMethod { - METHOD_UNSPECIFIED = 0; - GET = 1; - HEAD = 2; - POST = 3; - PUT = 4; - DELETE = 5; - CONNECT = 6; - OPTIONS = 7; - TRACE = 8; - PATCH = 9; -} - -// Identifies the direction of the traffic relative to the local Envoy. -enum TrafficDirection { - // Default option is unspecified. - UNSPECIFIED = 0; - - // The transport is used for incoming traffic. - INBOUND = 1; - - // The transport is used for outgoing traffic. - OUTBOUND = 2; -} - -// Identifies location of where either Envoy runs or where upstream hosts run. -message Locality { - // Region this :ref:`zone ` belongs to. - string region = 1; - - // Defines the local service zone where Envoy is running. Though optional, it - // should be set if discovery service routing is used and the discovery - // service exposes :ref:`zone data `, - // either in this message or via :option:`--service-zone`. The meaning of zone - // is context dependent, e.g. `Availability Zone (AZ) - // `_ - // on AWS, `Zone `_ on - // GCP, etc. - string zone = 2; - - // When used for locality of upstream hosts, this field further splits zone - // into smaller chunks of sub-zones so they can be load balanced - // independently. - string sub_zone = 3; -} - -// BuildVersion combines SemVer version of extension with free-form build information -// (i.e. 'alpha', 'private-build') as a set of strings. -message BuildVersion { - // SemVer version of extension. - type.SemanticVersion version = 1; - - // Free-form build information. - // Envoy defines several well known keys in the source/common/version/version.h file - google.protobuf.Struct metadata = 2; -} - -// Version and identification for an Envoy extension. -// [#next-free-field: 6] -message Extension { - // This is the name of the Envoy filter as specified in the Envoy - // configuration, e.g. envoy.filters.http.router, com.acme.widget. - string name = 1; - - // Category of the extension. - // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" - // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from - // acme.com vendor. - // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] - string category = 2; - - // [#not-implemented-hide:] Type descriptor of extension configuration proto. - // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] - // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] - string type_descriptor = 3; - - // The version is a property of the extension and maintained independently - // of other extensions and the Envoy API. - // This field is not set when extension did not provide version information. - BuildVersion version = 4; - - // Indicates that the extension is present but was disabled via dynamic configuration. - bool disabled = 5; -} - -// Identifies a specific Envoy instance. The node identifier is presented to the -// management server, which may use this identifier to distinguish per Envoy -// configuration for serving. -// [#next-free-field: 12] -message Node { - // An opaque node identifier for the Envoy node. This also provides the local - // service node name. It should be set if any of the following features are - // used: :ref:`statsd `, :ref:`CDS - // `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-node`. - string id = 1; - - // Defines the local service cluster name where Envoy is running. Though - // optional, it should be set if any of the following features are used: - // :ref:`statsd `, :ref:`health check cluster - // verification - // `, - // :ref:`runtime override directory `, - // :ref:`user agent addition - // `, - // :ref:`HTTP global rate limiting `, - // :ref:`CDS `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-cluster`. - string cluster = 2; - - // Opaque metadata extending the node identifier. Envoy will pass this - // directly to the management server. - google.protobuf.Struct metadata = 3; - - // Locality specifying where the Envoy instance is running. - Locality locality = 4; - - // This is motivated by informing a management server during canary which - // version of Envoy is being tested in a heterogeneous fleet. This will be set - // by Envoy in management server RPCs. - // This field is deprecated in favor of the user_agent_name and user_agent_version values. - string build_version = 5 [deprecated = true]; - - // Free-form string that identifies the entity requesting config. - // E.g. "envoy" or "grpc" - string user_agent_name = 6; - - oneof user_agent_version_type { - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - string user_agent_version = 7; - - // Structured version of the entity requesting config. - BuildVersion user_agent_build_version = 8; - } - - // List of extensions and their versions supported by the node. - repeated Extension extensions = 9; - - // Client feature support list. These are well known features described - // in the Envoy API repository for a given major version of an API. Client features - // use reverse DNS naming scheme, for example `com.acme.feature`. - // See :ref:`the list of features ` that xDS client may - // support. - repeated string client_features = 10; - - // Known listening ports on the node as a generic hint to the management server - // for filtering :ref:`listeners ` to be returned. For example, - // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. - repeated Address listening_addresses = 11; -} - -// Metadata provides additional inputs to filters based on matched listeners, -// filter chains, routes and endpoints. It is structured as a map, usually from -// filter name (in reverse DNS format) to metadata specific to the filter. Metadata -// key-values for a filter are merged as connection and request handling occurs, -// with later values for the same key overriding earlier values. -// -// An example use of metadata is providing additional values to -// http_connection_manager in the envoy.http_connection_manager.access_log -// namespace. -// -// Another example use of metadata is to per service config info in cluster metadata, which may get -// consumed by multiple filters. -// -// For load balancing, Metadata provides a means to subset cluster endpoints. -// Endpoints have a Metadata object associated and routes contain a Metadata -// object to match against. There are some well defined metadata used today for -// this purpose: -// -// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an -// endpoint and is also used during header processing -// (x-envoy-upstream-canary) and for stats purposes. -// [#next-major-version: move to type/metadata/v2] -message Metadata { - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - map filter_metadata = 1; -} - -// Runtime derived uint32 with a default when not specified. -message RuntimeUInt32 { - // Default value if runtime value is not available. - uint32 default_value = 2; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; -} - -// Runtime derived double with a default when not specified. -message RuntimeDouble { - // Default value if runtime value is not available. - double default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; -} - -// Runtime derived bool with a default when not specified. -message RuntimeFeatureFlag { - // Default value if runtime value is not available. - google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key to get value for comparison. This value is used if defined. The boolean value must - // be represented via its - // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; -} - -// Header name/value pair. -message HeaderValue { - // Header name. - string key = 1 - [(validate.rules).string = - {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Header value. - // - // The same :ref:`format specifier ` as used for - // :ref:`HTTP access logging ` applies here, however - // unknown header values are replaced with the empty string instead of `-`. - string value = 2 [ - (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; -} - -// Header name/value pair plus option to control append behavior. -message HeaderValueOption { - // Header name/value pair that this option applies to. - HeaderValue header = 1 [(validate.rules).message = {required: true}]; - - // Should the value be appended? If true (default), the value is appended to - // existing values. - google.protobuf.BoolValue append = 2; -} - -// Wrapper for a set of headers. -message HeaderMap { - repeated HeaderValue headers = 1; -} - -// Data source consisting of either a file or an inline value. -message DataSource { - oneof specifier { - option (validate.required) = true; - - // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Bytes inlined in the configuration. - bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; - - // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; - } -} - -// The message specifies the retry policy of remote data source when fetching fails. -message RetryPolicy { - // Specifies parameters that control :ref:`retry backoff strategy `. - // This parameter is optional, in which case the default base interval is 1000 milliseconds. The - // default maximum interval is 10 times the base interval. - BackoffStrategy retry_back_off = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. - google.protobuf.UInt32Value num_retries = 2; -} - -// The message specifies how to fetch data from remote and how to verify it. -message RemoteDataSource { - // The HTTP URI to fetch the remote data. - HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; - - // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Retry policy for fetching remote data. - RetryPolicy retry_policy = 3; -} - -// Async data source which support async data fetch. -message AsyncDataSource { - oneof specifier { - option (validate.required) = true; - - // Local async data source. - DataSource local = 1; - - // Remote async data source. - RemoteDataSource remote = 2; - } -} - -// Configuration for transport socket in :ref:`listeners ` and -// :ref:`clusters `. If the configuration is -// empty, a default transport socket implementation and configuration will be -// chosen based on the platform and existence of tls_context. -message TransportSocket { - // The name of the transport socket to instantiate. The name must match a supported transport - // socket implementation. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Implementation specific configuration which depends on the implementation being instantiated. - // See the supported transport socket implementations for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not -// specified via a runtime key. -// -// .. note:: -// -// Parsing of the runtime key's data is implemented such that it may be represented as a -// :ref:`FractionalPercent ` proto represented as JSON/YAML -// and may also be represented as an integer with the assumption that the value is an integral -// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse -// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. -message RuntimeFractionalPercent { - // Default value if the runtime value's for the numerator/denominator keys are not available. - type.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key for a YAML representation of a FractionalPercent. - string runtime_key = 2; -} - -// Identifies a specific ControlPlane instance that Envoy is connected to. -message ControlPlane { - // An opaque control plane identifier that uniquely identifies an instance - // of control plane. This can be used to identify which control plane instance, - // the Envoy is connected to. - string identifier = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/core/config_source.proto b/generated_api_shadow/envoy/api/v2/core/config_source.proto deleted file mode 100644 index 6cf44dbe9bbd2..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/config_source.proto +++ /dev/null @@ -1,185 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "ConfigSourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Configuration sources] - -// xDS API version. This is used to describe both resource and transport -// protocol versions (in distinct configuration fields). -enum ApiVersion { - // When not specified, we assume v2, to ease migration to Envoy's stable API - // versioning. If a client does not support v2 (e.g. due to deprecation), this - // is an invalid value. - AUTO = 0 [deprecated = true]; - - // Use xDS v2 API. - V2 = 1 [deprecated = true]; - - // Use xDS v3 API. - V3 = 2; -} - -// API configuration source. This identifies the API type and cluster that Envoy -// will use to fetch an xDS API. -// [#next-free-field: 9] -message ApiConfigSource { - // APIs may be fetched via either REST or gRPC. - enum ApiType { - // Ideally this would be 'reserved 0' but one can't reserve the default - // value. Instead we throw an exception if this is ever used. - UNSUPPORTED_REST_LEGACY = 0 - [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; - - // REST-JSON v2 API. The `canonical JSON encoding - // `_ for - // the v2 protos is used. - REST = 1; - - // gRPC v2 API. - GRPC = 2; - - // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} - // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state - // with every update, the xDS server only sends what has changed since the last update. - DELTA_GRPC = 3; - } - - // API type (gRPC, REST, delta gRPC) - ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; - - // Cluster names should be used only with REST. If > 1 - // cluster is defined, clusters will be cycled through if any kind of failure - // occurs. - // - // .. note:: - // - // The cluster with name ``cluster_name`` must be statically defined and its - // type must not be ``EDS``. - repeated string cluster_names = 2; - - // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - // services will be cycled through if any kind of failure occurs. - repeated GrpcService grpc_services = 4; - - // For REST APIs, the delay between successive polls. - google.protobuf.Duration refresh_delay = 3; - - // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; - - // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be - // rate limited. - RateLimitSettings rate_limit_settings = 6; - - // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. - bool set_node_on_first_message_only = 7; -} - -// Aggregated Discovery Service (ADS) options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that ADS is to be used. -message AggregatedConfigSource { -} - -// [#not-implemented-hide:] -// Self-referencing config source options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that other data can be obtained from the same server. -message SelfConfigSource { - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; -} - -// Rate Limit settings to be applied for discovery requests made by Envoy. -message RateLimitSettings { - // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a - // default value of 100 will be used. - google.protobuf.UInt32Value max_tokens = 1; - - // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens - // per second will be used. - google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; -} - -// Configuration for :ref:`listeners `, :ref:`clusters -// `, :ref:`routes -// `, :ref:`endpoints -// ` etc. may either be sourced from the -// filesystem or from an xDS API source. Filesystem configs are watched with -// inotify for updates. -// [#next-free-field: 7] -message ConfigSource { - oneof config_source_specifier { - option (validate.required) = true; - - // Path on the filesystem to source and watch for configuration updates. - // When sourcing configuration for :ref:`secret `, - // the certificate and key files are also watched for updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - string path = 1; - - // API configuration source. - ApiConfigSource api_config_source = 2; - - // When set, ADS will be used to fetch resources. The ADS API configuration - // source in the bootstrap configuration is used. - AggregatedConfigSource ads = 3; - - // [#not-implemented-hide:] - // When set, the client will access the resources from the same server it got the - // ConfigSource from, although not necessarily from the same stream. This is similar to the - // :ref:`ads` field, except that the client may use a - // different stream to the same server. As a result, this field can be used for things - // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) - // LDS to RDS on the same server without requiring the management server to know its name - // or required credentials. - // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since - // this field can implicitly mean to use the same stream in the case where the ConfigSource - // is provided via ADS and the specified data can also be obtained via ADS.] - SelfConfigSource self = 5; - } - - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - google.protobuf.Duration initial_fetch_timeout = 4; - - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. - ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/core/event_service_config.proto b/generated_api_shadow/envoy/api/v2/core/event_service_config.proto deleted file mode 100644 index f822f8c6b630d..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/event_service_config.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "EventServiceConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#not-implemented-hide:] -// Configuration of the event reporting service endpoint. -message EventServiceConfig { - oneof config_source_specifier { - option (validate.required) = true; - - // Specifies the gRPC service that hosts the event reporting service. - GrpcService grpc_service = 1; - } -} diff --git a/generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto b/generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto deleted file mode 100644 index 3d646484b359d..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/grpc_method_list.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "GrpcMethodListProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC method list] - -// A list of gRPC methods which can be used as an allowlist, for example. -message GrpcMethodList { - message Service { - // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The names of the gRPC methods in this service. - repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; - } - - repeated Service services = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/core/grpc_service.proto b/generated_api_shadow/envoy/api/v2/core/grpc_service.proto deleted file mode 100644 index dd789644e1d71..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/grpc_service.proto +++ /dev/null @@ -1,227 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "GrpcServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC services] - -// gRPC service configuration. This is used by :ref:`ApiConfigSource -// ` and filter configurations. -// [#next-free-field: 6] -message GrpcService { - message EnvoyGrpc { - // The name of the upstream gRPC cluster. SSL credentials will be supplied - // in the :ref:`Cluster ` :ref:`transport_socket - // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // [#next-free-field: 7] - message GoogleGrpc { - // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. - message SslCredentials { - // PEM encoded server root certificates. - DataSource root_certs = 1; - - // PEM encoded client private key. - DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // PEM encoded client certificate chain. - DataSource cert_chain = 3; - } - - // Local channel credentials. Only UDS is supported for now. - // See https://github.com/grpc/grpc/pull/15909. - message GoogleLocalCredentials { - } - - // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call - // credential types. - message ChannelCredentials { - oneof credential_specifier { - option (validate.required) = true; - - SslCredentials ssl_credentials = 1; - - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_default = 2; - - GoogleLocalCredentials local_credentials = 3; - } - } - - // [#next-free-field: 8] - message CallCredentials { - message ServiceAccountJWTAccessCredentials { - string json_key = 1; - - uint64 token_lifetime_seconds = 2; - } - - message GoogleIAMCredentials { - string authorization_token = 1; - - string authority_selector = 2; - } - - message MetadataCredentialsFromPlugin { - string name = 1; - - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - // Security token service configuration that allows Google gRPC to - // fetch security token from an OAuth 2.0 authorization server. - // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and - // https://github.com/grpc/grpc/pull/19587. - // [#next-free-field: 10] - message StsService { - // URI of the token exchange service that handles token exchange requests. - // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - // https://github.com/envoyproxy/protoc-gen-validate/issues/303] - string token_exchange_service_uri = 1; - - // Location of the target service or resource where the client - // intends to use the requested security token. - string resource = 2; - - // Logical name of the target service where the client intends to - // use the requested security token. - string audience = 3; - - // The desired scope of the requested security token in the - // context of the service or resource where the token will be used. - string scope = 4; - - // Type of the requested security token. - string requested_token_type = 5; - - // The path of subject token, a security token that represents the - // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; - - // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; - - // The path of actor token, a security token that represents the identity - // of the acting party. The acting party is authorized to use the - // requested security token and act on behalf of the subject. - string actor_token_path = 8; - - // Type of the actor token. - string actor_token_type = 9; - } - - oneof credential_specifier { - option (validate.required) = true; - - // Access token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. - string access_token = 1; - - // Google Compute Engine credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_compute_engine = 2; - - // Google refresh token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. - string google_refresh_token = 3; - - // Service Account JWT Access credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. - ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; - - // Google IAM credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. - GoogleIAMCredentials google_iam = 5; - - // Custom authenticator credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. - // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. - MetadataCredentialsFromPlugin from_plugin = 6; - - // Custom security token service which implements OAuth 2.0 token exchange. - // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 - // See https://github.com/grpc/grpc/pull/19587. - StsService sts_service = 7; - } - } - - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; - - ChannelCredentials channel_credentials = 2; - - // A set of call credentials that can be composed with `channel credentials - // `_. - repeated CallCredentials call_credentials = 3; - - // The human readable prefix to use when emitting statistics for the gRPC - // service. - // - // .. csv-table:: - // :header: Name, Type, Description - // :widths: 1, 1, 2 - // - // streams_total, Counter, Total number of streams opened - // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; - - // The name of the Google gRPC credentials factory to use. This must have been registered with - // Envoy. If this is empty, a default credentials factory will be used that sets up channel - // credentials based on other configuration parameters. - string credentials_factory_name = 5; - - // Additional configuration for site-specific customizations of the Google - // gRPC library. - google.protobuf.Struct config = 6; - } - - reserved 4; - - oneof target_specifier { - option (validate.required) = true; - - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - EnvoyGrpc envoy_grpc = 1; - - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - GoogleGrpc google_grpc = 2; - } - - // The timeout for the gRPC request. This is the timeout for a specific - // request. - google.protobuf.Duration timeout = 3; - - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. - repeated HeaderValue initial_metadata = 5; -} diff --git a/generated_api_shadow/envoy/api/v2/core/health_check.proto b/generated_api_shadow/envoy/api/v2/core/health_check.proto deleted file mode 100644 index bc4ae3e5c8666..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/health_check.proto +++ /dev/null @@ -1,308 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/event_service_config.proto"; -import "envoy/type/http.proto"; -import "envoy/type/matcher/string.proto"; -import "envoy/type/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Health check] -// * Health checking :ref:`architecture overview `. -// * If health checking is configured for a cluster, additional statistics are emitted. They are -// documented :ref:`here `. - -// Endpoint health status. -enum HealthStatus { - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0; - - // Healthy. - HEALTHY = 1; - - // Unhealthy. - UNHEALTHY = 2; - - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3; - - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - TIMEOUT = 4; - - // Degraded. - DEGRADED = 5; -} - -// [#next-free-field: 23] -message HealthCheck { - // Describes the encoding of the payload bytes in the payload. - message Payload { - oneof payload { - option (validate.required) = true; - - // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_bytes: 1}]; - - // [#not-implemented-hide:] Binary payload. - bytes binary = 2; - } - } - - // [#next-free-field: 12] - message HttpHealthCheck { - // The value of the host header in the HTTP health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The host header can be customized for a specific endpoint by setting the - // :ref:`hostname ` field. - string host = 1; - - // Specifies the HTTP path that will be requested during health checking. For example - // */healthcheck*. - string path = 2 [(validate.rules).string = {min_bytes: 1}]; - - // [#not-implemented-hide:] HTTP specific payload. - Payload send = 3; - - // [#not-implemented-hide:] HTTP specific response. - Payload receive = 4; - - // An optional service name parameter which is used to validate the identity of - // the health checked cluster. See the :ref:`architecture overview - // ` for more information. - // - // .. attention:: - // - // This field has been deprecated in favor of `service_name_matcher` for better flexibility - // over matching with service-cluster name. - string service_name = 5 [deprecated = true]; - - // Specifies a list of HTTP headers that should be added to each request that is sent to the - // health checked cluster. For more information, including details on header value syntax, see - // the documentation on :ref:`custom request headers - // `. - repeated HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request that is sent to the - // health checked cluster. - repeated string request_headers_to_remove = 8; - - // If set, health checks will be made using http/2. - // Deprecated, use :ref:`codec_client_type - // ` instead. - bool use_http2 = 7 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default - // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - // semantics of :ref:`Int64Range `. The start and end of each - // range are required. Only statuses in the range [100, 600) are allowed. - repeated type.Int64Range expected_statuses = 9; - - // Use specified application protocol for health checks. - type.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; - - // An optional service name parameter which is used to validate the identity of - // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview - // ` for more information. - type.matcher.StringMatcher service_name_matcher = 11; - } - - message TcpHealthCheck { - // Empty payloads imply a connect-only health check. - Payload send = 1; - - // When checking the response, “fuzzy” matching is performed such that each - // binary block must be found, and in the order specified, but not - // necessarily contiguous. - repeated Payload receive = 2; - } - - message RedisHealthCheck { - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; - } - - // `grpc.health.v1.Health - // `_-based - // healthcheck. See `gRPC doc `_ - // for details. - message GrpcHealthCheck { - // An optional service name parameter which will be sent to gRPC service in - // `grpc.health.v1.HealthCheckRequest - // `_. - // message. See `gRPC health-checking overview - // `_ for more information. - string service_name = 1; - - // The value of the :authority header in the gRPC health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The authority header can be customized for a specific endpoint by setting - // the :ref:`hostname ` field. - string authority = 2; - } - - // Custom health check. - message CustomHealthCheck { - // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // A custom health checker specific configuration which depends on the custom health checker - // being instantiated. See :api:`envoy/config/health_checker` for reference. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - // Health checks occur over the transport socket specified for the cluster. This implies that if a - // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. - // - // This allows overriding the cluster TLS settings, just for health check connections. - message TlsOptions { - // Specifies the ALPN protocols for health check connections. This is useful if the - // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks - // versus data connections. If empty, no ALPN protocols will be set on health check connections. - repeated string alpn_protocols = 1; - } - - reserved 10; - - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - google.protobuf.Duration timeout = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // The interval between health checks. - google.protobuf.Duration interval = 2 [(validate.rules).duration = { - required: true - gt {} - }]; - - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - google.protobuf.Duration initial_jitter = 20; - - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - google.protobuf.Duration interval_jitter = 3; - - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - uint32 interval_jitter_percent = 18; - - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; - - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Non-serving port for health checking. - google.protobuf.UInt32Value alt_port = 6; - - // Reuse health check connection between health checks. Default is true. - google.protobuf.BoolValue reuse_connection = 7; - - oneof health_checker { - option (validate.required) = true; - - // HTTP health check. - HttpHealthCheck http_health_check = 8; - - // TCP health check. - TcpHealthCheck tcp_health_check = 9; - - // gRPC health check. - GrpcHealthCheck grpc_health_check = 11; - - // Custom health check. - CustomHealthCheck custom_health_check = 13; - } - - // The "no traffic interval" is a special health check interval that is used when a cluster has - // never had traffic routed to it. This lower interval allows cluster information to be kept up to - // date, without sending a potentially large amount of active health checking traffic for no - // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. Note that this interval takes precedence over - // any other. - // - // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy interval" is a health check interval that is used for hosts that are marked as - // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the - // standard health check interval that is defined. - // - // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as unhealthy. For subsequent health checks - // Envoy will shift back to using either "unhealthy interval" if present or the standard health - // check interval that is defined. - // - // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; - - // The "healthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as healthy. For subsequent health checks - // Envoy will shift back to using the standard health check interval that is defined. - // - // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; - - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - string event_log_path = 17; - - // [#not-implemented-hide:] - // The gRPC service for the health check event service. - // If empty, health check events won't be sent to a remote endpoint. - EventServiceConfig event_service = 22; - - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - bool always_log_health_check_failures = 19; - - // This allows overriding the cluster TLS settings, just for health check connections. - TlsOptions tls_options = 21; -} diff --git a/generated_api_shadow/envoy/api/v2/core/http_uri.proto b/generated_api_shadow/envoy/api/v2/core/http_uri.proto deleted file mode 100644 index cd1a0660e330a..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/http_uri.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "HttpUriProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP Service URI ] - -// Envoy external URI descriptor -message HttpUri { - // The HTTP server URI. It should be a full FQDN with protocol, host and path. - // - // Example: - // - // .. code-block:: yaml - // - // uri: https://www.googleapis.com/oauth2/v1/certs - // - string uri = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Specify how `uri` is to be fetched. Today, this requires an explicit - // cluster, but in the future we may support dynamic cluster creation or - // inline DNS resolution. See `issue - // `_. - oneof http_upstream_type { - option (validate.required) = true; - - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 [(validate.rules).duration = { - required: true - gte {} - }]; -} diff --git a/generated_api_shadow/envoy/api/v2/core/protocol.proto b/generated_api_shadow/envoy/api/v2/core/protocol.proto deleted file mode 100644 index ae1a86424cf07..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/protocol.proto +++ /dev/null @@ -1,297 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "ProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Protocol options] - -// [#not-implemented-hide:] -message TcpProtocolOptions { -} - -message UpstreamHttpProtocolOptions { - // Set transport socket `SNI `_ for new - // upstream connections based on the downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - bool auto_sni = 1; - - // Automatic validate upstream presented certificate for new upstream connections based on the - // downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - // This field is intended to set with `auto_sni` field. - bool auto_san_validation = 2; -} - -// [#next-free-field: 6] -message HttpProtocolOptions { - // Action to take when Envoy receives client request with header names containing underscore - // characters. - // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented - // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore - // characters. - enum HeadersWithUnderscoresAction { - // Allow headers with underscores. This is the default behavior. - ALLOW = 0; - - // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests - // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter - // is incremented for each rejected request. - REJECT_REQUEST = 1; - - // Drop the header with name containing underscores. The header is dropped before the filter chain is - // invoked and as such filters will not see dropped headers. The - // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. - DROP_HEADER = 2; - } - - // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. When the - // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 - // downstream connection a drain sequence will occur prior to closing the connection, see - // :ref:`drain_timeout - // `. - // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - google.protobuf.Duration idle_timeout = 1; - - // The maximum duration of a connection. The duration is defined as a period since a connection - // was established. If not set, there is no max duration. When max_connection_duration is reached - // the connection will be closed. Drain sequence will occur prior to closing the connection if - // if's applicable. See :ref:`drain_timeout - // `. - // Note: not implemented for upstream connections. - google.protobuf.Duration max_connection_duration = 3; - - // The maximum number of headers. If unconfigured, the default - // maximum number of request headers allowed is 100. Requests that exceed this limit will receive - // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be - // reset independent of any other timeouts. If not specified, this value is not set. - google.protobuf.Duration max_stream_duration = 4; - - // Action to take when a client request with a header name containing underscore characters is received. - // If this setting is not specified, the value defaults to ALLOW. - // Note: upstream responses are not affected by this setting. - HeadersWithUnderscoresAction headers_with_underscores_action = 5; -} - -// [#next-free-field: 6] -message Http1ProtocolOptions { - message HeaderKeyFormat { - message ProperCaseWords { - } - - oneof header_format { - option (validate.required) = true; - - // Formats the header by proper casing words: the first character and any character following - // a special character will be capitalized if it's an alpha character. For example, - // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". - // Note that while this results in most headers following conventional casing, certain headers - // are not covered. For example, the "TE" header will be formatted as "Te". - ProperCaseWords proper_case_words = 1; - } - } - - // Handle HTTP requests with absolute URLs in the requests. These requests - // are generally sent by clients to forward/explicit proxies. This allows clients to configure - // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - // *http_proxy* environment variable. - google.protobuf.BoolValue allow_absolute_url = 1; - - // Handle incoming HTTP/1.0 and HTTP 0.9 requests. - // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 - // style connect logic, dechunking, and handling lack of client host iff - // *default_host_for_http_10* is configured. - bool accept_http_10 = 2; - - // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as - // Envoy does not otherwise support HTTP/1.0 without a Host header. - // This is a no-op if *accept_http_10* is not true. - string default_host_for_http_10 = 3; - - // Describes how the keys for response headers should be formatted. By default, all header keys - // are lower cased. - HeaderKeyFormat header_key_format = 4; - - // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. - // - // .. attention:: - // - // Note that this only happens when Envoy is chunk encoding which occurs when: - // - The request is HTTP/1.1. - // - Is neither a HEAD only request nor a HTTP Upgrade. - // - Not a response to a HEAD request. - // - The content length header is not present. - bool enable_trailers = 5; -} - -// [#next-free-field: 14] -message Http2ProtocolOptions { - // Defines a parameter to be sent in the SETTINGS frame. - // See `RFC7540, sec. 6.5.1 `_ for details. - message SettingsParameter { - // The 16 bit parameter identifier. - google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65536 gte: 1}, - (validate.rules).message = {required: true} - ]; - - // The 32 bit parameter value. - google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; - } - - // `Maximum table size `_ - // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values - // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header - // compression. - google.protobuf.UInt32Value hpack_table_size = 1; - - // `Maximum concurrent streams `_ - // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) - // and defaults to 2147483647. - // - // For upstream connections, this also limits how many streams Envoy will initiate concurrently - // on a single connection. If the limit is reached, Envoy may queue requests or establish - // additional connections (as allowed per circuit breaker limits). - google.protobuf.UInt32Value max_concurrent_streams = 2 - [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; - - // `Initial stream-level flow-control window - // `_ size. Valid values range from 65535 - // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 - // (256 * 1024 * 1024). - // - // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default - // window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the codec buffers. - google.protobuf.UInt32Value initial_stream_window_size = 3 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Similar to *initial_stream_window_size*, but for connection-level flow-control - // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. - google.protobuf.UInt32Value initial_connection_window_size = 4 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Allows proxying Websocket and other upgrades over H2 connect. - bool allow_connect = 5; - - // [#not-implemented-hide:] Hiding until envoy has full metadata support. - // Still under implementation. DO NOT USE. - // - // Allows metadata. See [metadata - // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more - // information. - bool allow_metadata = 6; - - // Limit the number of pending outbound downstream frames of all types (frames that are waiting to - // be written into the socket). Exceeding this limit triggers flood mitigation and connection is - // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due - // to flood mitigation. The default limit is 10000. - // [#comment:TODO: implement same limits for upstream outbound frames as well.] - google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, - // preventing high memory utilization when receiving continuous stream of these frames. Exceeding - // this limit triggers flood mitigation and connection is terminated. The - // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood - // mitigation. The default limit is 1000. - // [#comment:TODO: implement same limits for upstream outbound frames as well.] - google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an - // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but - // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` - // stat tracks the number of connections terminated due to flood mitigation. - // Setting this to 0 will terminate connection upon receiving first frame with an empty payload - // and no end stream flag. The default limit is 1. - // [#comment:TODO: implement same limits for upstream inbound frames as well.] - google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; - - // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number - // of PRIORITY frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // max_inbound_priority_frames_per_stream * (1 + inbound_streams) - // - // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 100. - // [#comment:TODO: implement same limits for upstream inbound frames as well.] - google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; - - // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number - // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // 1 + 2 * (inbound_streams + - // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) - // - // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 10. - // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, - // but more complex implementations that try to estimate available bandwidth require at least 2. - // [#comment:TODO: implement same limits for upstream inbound frames as well.] - google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 - [(validate.rules).uint32 = {gte: 1}]; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12; - - // [#not-implemented-hide:] - // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: - // - // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by - // Envoy. - // - // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field - // 'allow_connect'. - // - // Note that custom parameters specified through this field can not also be set in the - // corresponding named parameters: - // - // .. code-block:: text - // - // ID Field Name - // ---------------- - // 0x1 hpack_table_size - // 0x3 max_concurrent_streams - // 0x4 initial_stream_window_size - // - // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies - // between custom parameters with the same identifier will trigger a failure. - // - // See `IANA HTTP/2 Settings - // `_ for - // standardized identifiers. - repeated SettingsParameter custom_settings_parameters = 13; -} - -// [#not-implemented-hide:] -message GrpcProtocolOptions { - Http2ProtocolOptions http2_protocol_options = 1; -} diff --git a/generated_api_shadow/envoy/api/v2/core/socket_option.proto b/generated_api_shadow/envoy/api/v2/core/socket_option.proto deleted file mode 100644 index 39678ad1b8bc6..0000000000000 --- a/generated_api_shadow/envoy/api/v2/core/socket_option.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.core; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.core"; -option java_outer_classname = "SocketOptionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Socket Option ] - -// Generic socket option message. This would be used to set socket options that -// might not exist in upstream kernels or precompiled Envoy binaries. -// [#next-free-field: 7] -message SocketOption { - enum SocketState { - // Socket options are applied after socket creation but before binding the socket to a port - STATE_PREBIND = 0; - - // Socket options are applied after binding the socket to a port but before calling listen() - STATE_BOUND = 1; - - // Socket options are applied after calling listen() - STATE_LISTENING = 2; - } - - // An optional name to give this socket option for debugging, etc. - // Uniqueness is not required and no special meaning is assumed. - string description = 1; - - // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - int64 level = 2; - - // The numeric name as passed to setsockopt - int64 name = 3; - - oneof value { - option (validate.required) = true; - - // Because many sockopts take an int value. - int64 int_value = 4; - - // Otherwise it's a byte buffer. - bytes buf_value = 5; - } - - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/discovery.proto b/generated_api_shadow/envoy/api/v2/discovery.proto deleted file mode 100644 index da2690f867fc3..0000000000000 --- a/generated_api_shadow/envoy/api/v2/discovery.proto +++ /dev/null @@ -1,234 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; -import "google/rpc/status.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "DiscoveryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.discovery.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common discovery API components] - -// A DiscoveryRequest requests a set of versioned resources of the same type for -// a given Envoy node on some API. -// [#next-free-field: 7] -message DiscoveryRequest { - // The version_info provided in the request messages will be the version_info - // received with the most recent successfully processed response or empty on - // the first request. It is expected that no new request is sent after a - // response is received until the Envoy instance is ready to ACK/NACK the new - // configuration. ACK/NACK takes place by returning the new API config version - // as applied or the previous API config version respectively. Each type_url - // (see below) has an independent version associated with it. - string version_info = 1; - - // The node making the request. - core.Node node = 2; - - // List of resources to subscribe to, e.g. list of cluster names or a route - // configuration name. If this is empty, all resources for the API are - // returned. LDS/CDS may have empty resource_names, which will cause all - // resources for the Envoy instance to be returned. The LDS and CDS responses - // will then imply a number of resources that need to be fetched via EDS/RDS, - // which will be explicitly enumerated in resource_names. - repeated string resource_names = 3; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit - // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is - // required for ADS. - string type_url = 4; - - // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above - // discussion on version_info and the DiscoveryResponse nonce comment. This - // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, - // or 2) the client has not yet accepted an update in this xDS stream (unlike - // delta, where it is populated only for new explicit ACKs). - string response_nonce = 5; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* provides the Envoy - // internal exception related to the failure. It is only intended for consumption during manual - // debugging, the string provided is not guaranteed to be stable across Envoy versions. - google.rpc.Status error_detail = 6; -} - -// [#next-free-field: 7] -message DiscoveryResponse { - // The version of the response data. - string version_info = 1; - - // The response resources. These resources are typed and depend on the API being called. - repeated google.protobuf.Any resources = 2; - - // [#not-implemented-hide:] - // Canary is used to support two Envoy command line flags: - // - // * --terminate-on-canary-transition-failure. When set, Envoy is able to - // terminate if it detects that configuration is stuck at canary. Consider - // this example sequence of updates: - // - Management server applies a canary config successfully. - // - Management server rolls back to a production config. - // - Envoy rejects the new production config. - // Since there is no sensible way to continue receiving configuration - // updates, Envoy will then terminate and apply production config from a - // clean slate. - // * --dry-run-canary. When set, a canary response will never be applied, only - // validated via a dry run. - bool canary = 3; - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). - string type_url = 4; - - // For gRPC based subscriptions, the nonce provides a way to explicitly ack a - // specific DiscoveryResponse in a following DiscoveryRequest. Additional - // messages may have been sent by Envoy to the management server for the - // previous version on the stream prior to this DiscoveryResponse, that were - // unprocessed at response send time. The nonce allows the management server - // to ignore any further DiscoveryRequests for the previous version until a - // DiscoveryRequest bearing the nonce. The nonce is optional and is not - // required for non-stream based xDS implementations. - string nonce = 5; - - // [#not-implemented-hide:] - // The control plane instance that sent the response. - core.ControlPlane control_plane = 6; -} - -// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC -// endpoint for Delta xDS. -// -// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full -// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a -// diff to the state of a xDS client. -// In Delta XDS there are per-resource versions, which allow tracking state at -// the resource granularity. -// An xDS Delta session is always in the context of a gRPC bidirectional -// stream. This allows the xDS server to keep track of the state of xDS clients -// connected to it. -// -// In Delta xDS the nonce field is required and used to pair -// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. -// Optionally, a response message level system_version_info is present for -// debugging purposes only. -// -// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest -// can be either or both of: [1] informing the server of what resources the -// client has gained/lost interest in (using resource_names_subscribe and -// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from -// the server (using response_nonce, with presence of error_detail making it a NACK). -// Additionally, the first message (for a given type_url) of a reconnected gRPC stream -// has a third role: informing the server of the resources (and their versions) -// that the client already possesses, using the initial_resource_versions field. -// -// As with state-of-the-world, when multiple resource types are multiplexed (ADS), -// all requests/acknowledgments/updates are logically walled off by type_url: -// a Cluster ACK exists in a completely separate world from a prior Route NACK. -// In particular, initial_resource_versions being sent at the "start" of every -// gRPC stream actually entails a message for each type_url, each with its own -// initial_resource_versions. -// [#next-free-field: 8] -message DeltaDiscoveryRequest { - // The node making the request. - core.Node node = 1; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". - string type_url = 2; - - // DeltaDiscoveryRequests allow the client to add or remove individual - // resources to the set of tracked resources in the context of a stream. - // All resource names in the resource_names_subscribe list are added to the - // set of tracked resources and all resource names in the resource_names_unsubscribe - // list are removed from the set of tracked resources. - // - // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or - // resource_names_unsubscribe list simply means that no resources are to be - // added or removed to the resource list. - // *Like* state-of-the-world xDS, the server must send updates for all tracked - // resources, but can also send updates for resources the client has not subscribed to. - // - // NOTE: the server must respond with all resources listed in resource_names_subscribe, - // even if it believes the client has the most recent version of them. The reason: - // the client may have dropped them, but then regained interest before it had a chance - // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. - // - // These two fields can be set in any DeltaDiscoveryRequest, including ACKs - // and initial_resource_versions. - // - // A list of Resource names to add to the list of tracked resources. - repeated string resource_names_subscribe = 3; - - // A list of Resource names to remove from the list of tracked resources. - repeated string resource_names_unsubscribe = 4; - - // Informs the server of the versions of the resources the xDS client knows of, to enable the - // client to continue the same logical xDS session even in the face of gRPC stream reconnection. - // It will not be populated: [1] in the very first stream of a session, since the client will - // not yet have any resources, [2] in any message after the first in a stream (for a given - // type_url), since the server will already be correctly tracking the client's state. - // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) - // The map's keys are names of xDS resources known to the xDS client. - // The map's values are opaque resource versions. - map initial_resource_versions = 5; - - // When the DeltaDiscoveryRequest is a ACK or NACK message in response - // to a previous DeltaDiscoveryResponse, the response_nonce must be the - // nonce in the DeltaDiscoveryResponse. - // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. - string response_nonce = 6; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* - // provides the Envoy internal exception related to the failure. - google.rpc.Status error_detail = 7; -} - -// [#next-free-field: 7] -message DeltaDiscoveryResponse { - // The version of the response data (used for debugging). - string system_version_info = 1; - - // The response resources. These are typed resources, whose types must match - // the type_url field. - repeated Resource resources = 2; - - // field id 3 IS available! - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. - string type_url = 4; - - // Resources names of resources that have be deleted and to be removed from the xDS Client. - // Removed resources for missing resources can be ignored. - repeated string removed_resources = 6; - - // The nonce provides a way for DeltaDiscoveryRequests to uniquely - // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - string nonce = 5; -} - -message Resource { - // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; - - // The aliases are a list of other names that this resource can go by. - repeated string aliases = 4; - - // The resource level version. It allows xDS to track the state of individual - // resources. - string version = 1; - - // The resource being tracked. - google.protobuf.Any resource = 2; -} diff --git a/generated_api_shadow/envoy/api/v2/eds.proto b/generated_api_shadow/envoy/api/v2/eds.proto deleted file mode 100644 index d757f17fc2f37..0000000000000 --- a/generated_api_shadow/envoy/api/v2/eds.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/endpoint.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "EdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.endpoint.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: EDS] -// Endpoint discovery :ref:`architecture overview ` - -service EndpointDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.ClusterLoadAssignment"; - - // The resource_names field in DiscoveryRequest specifies a list of clusters - // to subscribe to updates for. - rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaEndpoints(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:endpoints"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message EdsDummy { -} diff --git a/generated_api_shadow/envoy/api/v2/endpoint.proto b/generated_api_shadow/envoy/api/v2/endpoint.proto deleted file mode 100644 index 70bac3c6c4f6c..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint.proto +++ /dev/null @@ -1,119 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/endpoint/endpoint_components.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "EndpointProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Endpoint configuration] -// Endpoint discovery :ref:`architecture overview ` - -// Each route from RDS will map to a single cluster or traffic split across -// clusters using weights expressed in the RDS WeightedCluster. -// -// With EDS, each cluster is treated independently from a LB perspective, with -// LB taking place between the Localities within a cluster and at a finer -// granularity between the hosts within a locality. The percentage of traffic -// for each endpoint is determined by both its load_balancing_weight, and the -// load_balancing_weight of its locality. First, a locality will be selected, -// then an endpoint within that locality will be chose based on its weight. -// [#next-free-field: 6] -message ClusterLoadAssignment { - // Load balancing policy settings. - // [#next-free-field: 6] - message Policy { - // [#not-implemented-hide:] - message DropOverload { - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Percentage of traffic that should be dropped for the category. - type.FractionalPercent drop_percentage = 2; - } - - reserved 1; - - // Action to trim the overall incoming traffic to protect the upstream - // hosts. This action allows protection in case the hosts are unable to - // recover from an outage, or unable to autoscale or unable to handle - // incoming traffic volume for any reason. - // - // At the client each category is applied one after the other to generate - // the 'actual' drop percentage on all outgoing traffic. For example: - // - // .. code-block:: json - // - // { "drop_overloads": [ - // { "category": "throttle", "drop_percentage": 60 } - // { "category": "lb", "drop_percentage": 50 } - // ]} - // - // The actual drop percentages applied to the traffic at the clients will be - // "throttle"_drop = 60% - // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. - // actual_outgoing_load = 20% // remaining after applying all categories. - // [#not-implemented-hide:] - repeated DropOverload drop_overloads = 2; - - // Priority levels and localities are considered overprovisioned with this - // factor (in percentage). This means that we don't consider a priority - // level or locality unhealthy until the percentage of healthy hosts - // multiplied by the overprovisioning factor drops below 100. - // With the default value 140(1.4), Envoy doesn't consider a priority level - // or a locality unhealthy until their percentage of healthy hosts drops - // below 72%. For example: - // - // .. code-block:: json - // - // { "overprovisioning_factor": 100 } - // - // Read more at :ref:`priority levels ` and - // :ref:`localities `. - google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; - - // The max time until which the endpoints from this assignment can be used. - // If no new assignments are received before this time expires the endpoints - // are considered stale and should be marked unhealthy. - // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; - - // The flag to disable overprovisioning. If it is set to true, - // :ref:`overprovisioning factor - // ` will be ignored - // and Envoy will not perform graceful failover between priority levels or - // localities as endpoints become unhealthy. Otherwise Envoy will perform - // graceful failover as :ref:`overprovisioning factor - // ` suggests. - // [#not-implemented-hide:] - bool disable_overprovisioning = 5 [deprecated = true]; - } - - // Name of the cluster. This will be the :ref:`service_name - // ` value if specified - // in the cluster :ref:`EdsClusterConfig - // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // List of endpoints to load balance to. - repeated endpoint.LocalityLbEndpoints endpoints = 2; - - // Map of named endpoints that can be referenced in LocalityLbEndpoints. - // [#not-implemented-hide:] - map named_endpoints = 5; - - // Load balancing policy settings. - Policy policy = 4; -} diff --git a/generated_api_shadow/envoy/api/v2/endpoint/BUILD b/generated_api_shadow/envoy/api/v2/endpoint/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto b/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto deleted file mode 100644 index 247c9ae265a56..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint/endpoint.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.endpoint; - -import public "envoy/api/v2/endpoint/endpoint_components.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; -option java_outer_classname = "EndpointProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto b/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto deleted file mode 100644 index 78d45e2e08d06..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint/endpoint_components.proto +++ /dev/null @@ -1,148 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.endpoint; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/health_check.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; -option java_outer_classname = "EndpointComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Endpoints] - -// Upstream host identifier. -message Endpoint { - // The optional health check configuration. - message HealthCheckConfig { - // Optional alternative health check port value. - // - // By default the health check address port of an upstream host is the same - // as the host's serving address port. This provides an alternative health - // check port. Setting this with a non-zero value allows an upstream host - // to have different health check address port. - uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; - - // By default, the host header for L7 health checks is controlled by cluster level configuration - // (see: :ref:`host ` and - // :ref:`authority `). Setting this - // to a non-empty value allows overriding the cluster level configuration for a specific - // endpoint. - string hostname = 2; - } - - // The upstream host address. - // - // .. attention:: - // - // The form of host address depends on the given cluster type. For STATIC or EDS, - // it is expected to be a direct IP address (or something resolvable by the - // specified :ref:`resolver ` - // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, - // and will be resolved via DNS. - core.Address address = 1; - - // The optional health check configuration is used as configuration for the - // health checker to contact the health checked host. - // - // .. attention:: - // - // This takes into effect only for upstream clusters with - // :ref:`active health checking ` enabled. - HealthCheckConfig health_check_config = 2; - - // The hostname associated with this endpoint. This hostname is not used for routing or address - // resolution. If provided, it will be associated with the endpoint, and can be used for features - // that require a hostname, like - // :ref:`auto_host_rewrite `. - string hostname = 3; -} - -// An Endpoint that Envoy can route traffic to. -// [#next-free-field: 6] -message LbEndpoint { - // Upstream host identifier or a named reference. - oneof host_identifier { - Endpoint endpoint = 1; - - // [#not-implemented-hide:] - string endpoint_name = 5; - } - - // Optional health status when known and supplied by EDS server. - core.HealthStatus health_status = 2; - - // The endpoint metadata specifies values that may be used by the load - // balancer to select endpoints in a cluster for a given request. The filter - // name should be specified as *envoy.lb*. An example boolean key-value pair - // is *canary*, providing the optional canary status of the upstream host. - // This may be matched against in a route's - // :ref:`RouteAction ` metadata_match field - // to subset the endpoints considered in cluster load balancing. - core.Metadata metadata = 3; - - // The optional load balancing weight of the upstream host; at least 1. - // Envoy uses the load balancing weight in some of the built in load - // balancers. The load balancing weight for an endpoint is divided by the sum - // of the weights of all endpoints in the endpoint's locality to produce a - // percentage of traffic for the endpoint. This percentage is then further - // weighted by the endpoint's locality's load balancing weight from - // LocalityLbEndpoints. If unspecified, each host is presumed to have equal - // weight in a locality. The sum of the weights of all endpoints in the - // endpoint's locality must not exceed uint32_t maximal value (4294967295). - google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; -} - -// A group of endpoints belonging to a Locality. -// One can have multiple LocalityLbEndpoints for a locality, but this is -// generally only done if the different groups need to have different load -// balancing weights or different priorities. -// [#next-free-field: 7] -message LocalityLbEndpoints { - // Identifies location of where the upstream hosts run. - core.Locality locality = 1; - - // The group of endpoints belonging to the locality specified. - repeated LbEndpoint lb_endpoints = 2; - - // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load - // balancing weight for a locality is divided by the sum of the weights of all - // localities at the same priority level to produce the effective percentage - // of traffic for the locality. The sum of the weights of all localities at - // the same priority level must not exceed uint32_t maximal value (4294967295). - // - // Locality weights are only considered when :ref:`locality weighted load - // balancing ` is - // configured. These weights are ignored otherwise. If no weights are - // specified when locality weighted load balancing is enabled, the locality is - // assigned no load. - google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Optional: the priority for this LocalityLbEndpoints. If unspecified this will - // default to the highest priority (0). - // - // Under usual circumstances, Envoy will only select endpoints for the highest - // priority (0). In the event all endpoints for a particular priority are - // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the - // next highest priority group. - // - // Priorities should range from 0 (highest) to N (lowest) without skipping. - uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; - - // Optional: Per locality proximity value which indicates how close this - // locality is from the source locality. This value only provides ordering - // information (lower the value, closer it is to the source locality). - // This will be consumed by load balancing schemes that need proximity order - // to determine where to route the requests. - // [#not-implemented-hide:] - google.protobuf.UInt32Value proximity = 6; -} diff --git a/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto b/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto deleted file mode 100644 index 928aed6102df8..0000000000000 --- a/generated_api_shadow/envoy/api/v2/endpoint/load_report.proto +++ /dev/null @@ -1,157 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.endpoint; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; -option java_outer_classname = "LoadReportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// These are stats Envoy reports to GLB every so often. Report frequency is -// defined by -// :ref:`LoadStatsResponse.load_reporting_interval`. -// Stats per upstream region/zone and optionally per subzone. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -// [#next-free-field: 9] -message UpstreamLocalityStats { - // Name of zone, region and optionally endpoint group these metrics were - // collected from. Zone and region names could be empty if unknown. - core.Locality locality = 1; - - // The total number of requests successfully completed by the endpoints in the - // locality. - uint64 total_successful_requests = 2; - - // The total number of unfinished requests - uint64 total_requests_in_progress = 3; - - // The total number of requests that failed due to errors at the endpoint, - // aggregated over all endpoints in the locality. - uint64 total_error_requests = 4; - - // The total number of requests that were issued by this Envoy since - // the last report. This information is aggregated over all the - // upstream endpoints in the locality. - uint64 total_issued_requests = 8; - - // Stats for multi-dimensional load balancing. - repeated EndpointLoadMetricStats load_metric_stats = 5; - - // Endpoint granularity stats information for this locality. This information - // is populated if the Server requests it by setting - // :ref:`LoadStatsResponse.report_endpoint_granularity`. - repeated UpstreamEndpointStats upstream_endpoint_stats = 7; - - // [#not-implemented-hide:] The priority of the endpoint group these metrics - // were collected from. - uint32 priority = 6; -} - -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -// [#next-free-field: 8] -message UpstreamEndpointStats { - // Upstream host address. - core.Address address = 1; - - // Opaque and implementation dependent metadata of the - // endpoint. Envoy will pass this directly to the management server. - google.protobuf.Struct metadata = 6; - - // The total number of requests successfully completed by the endpoints in the - // locality. These include non-5xx responses for HTTP, where errors - // originate at the client and the endpoint responded successfully. For gRPC, - // the grpc-status values are those not covered by total_error_requests below. - uint64 total_successful_requests = 2; - - // The total number of unfinished requests for this endpoint. - uint64 total_requests_in_progress = 3; - - // The total number of requests that failed due to errors at the endpoint. - // For HTTP these are responses with 5xx status codes and for gRPC the - // grpc-status values: - // - // - DeadlineExceeded - // - Unimplemented - // - Internal - // - Unavailable - // - Unknown - // - DataLoss - uint64 total_error_requests = 4; - - // The total number of requests that were issued to this endpoint - // since the last report. A single TCP connection, HTTP or gRPC - // request or stream is counted as one request. - uint64 total_issued_requests = 7; - - // Stats for multi-dimensional load balancing. - repeated EndpointLoadMetricStats load_metric_stats = 5; -} - -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -message EndpointLoadMetricStats { - // Name of the metric; may be empty. - string metric_name = 1; - - // Number of calls that finished and included this metric. - uint64 num_requests_finished_with_metric = 2; - - // Sum of metric values across all calls that finished with this metric for - // load_reporting_interval. - double total_metric_value = 3; -} - -// Per cluster load stats. Envoy reports these stats a management server in a -// :ref:`LoadStatsRequest` -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -// Next ID: 7 -// [#next-free-field: 7] -message ClusterStats { - message DroppedRequests { - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Total number of deliberately dropped requests for the category. - uint64 dropped_count = 2; - } - - // The name of the cluster. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The eds_cluster_config service_name of the cluster. - // It's possible that two clusters send the same service_name to EDS, - // in that case, the management server is supposed to do aggregation on the load reports. - string cluster_service_name = 6; - - // Need at least one. - repeated UpstreamLocalityStats upstream_locality_stats = 2 - [(validate.rules).repeated = {min_items: 1}]; - - // Cluster-level stats such as total_successful_requests may be computed by - // summing upstream_locality_stats. In addition, below there are additional - // cluster-wide stats. - // - // The total number of dropped requests. This covers requests - // deliberately dropped by the drop_overload policy and circuit breaking. - uint64 total_dropped_requests = 3; - - // Information about deliberately dropped requests for each category specified - // in the DropOverload policy. - repeated DroppedRequests dropped_requests = 5; - - // Period over which the actual load report occurred. This will be guaranteed to include every - // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy - // and the *LoadStatsResponse* message sent from the management server, this may be longer than - // the requested load reporting interval in the *LoadStatsResponse*. - google.protobuf.Duration load_report_interval = 4; -} diff --git a/generated_api_shadow/envoy/api/v2/lds.proto b/generated_api_shadow/envoy/api/v2/lds.proto deleted file mode 100644 index 01d9949777dd8..0000000000000 --- a/generated_api_shadow/envoy/api/v2/lds.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/listener.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "LdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Listener] -// Listener :ref:`configuration overview ` - -// The Envoy instance initiates an RPC at startup to discover a list of -// listeners. Updates are delivered via streaming from the LDS server and -// consist of a complete update of all listeners. Existing connections will be -// allowed to drain from listeners that are no longer present. -service ListenerDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.Listener"; - - rpc DeltaListeners(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc StreamListeners(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:listeners"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message LdsDummy { -} diff --git a/generated_api_shadow/envoy/api/v2/listener.proto b/generated_api_shadow/envoy/api/v2/listener.proto deleted file mode 100644 index 1fdd202de42a9..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener.proto +++ /dev/null @@ -1,248 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/socket_option.proto"; -import "envoy/api/v2/listener/listener_components.proto"; -import "envoy/api/v2/listener/udp_listener_config.proto"; -import "envoy/config/filter/accesslog/v2/accesslog.proto"; -import "envoy/config/listener/v2/api_listener.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "ListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Listener configuration] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 23] -message Listener { - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - - // [#not-implemented-hide:] - message DeprecatedV1 { - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // This is deprecated in v2, all Listeners will bind to their port. An - // additional filter chain must be created for every original destination - // port this listener may redirect to in v2, with the original port - // specified in the FilterChainMatch destination_port field. - // - // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] - google.protobuf.BoolValue bind_to_port = 1; - } - - // Configuration for listener connection balancing. - message ConnectionBalanceConfig { - // A connection balancer implementation that does exact balancing. This means that a lock is - // held during balancing so that connection counts are nearly exactly balanced between worker - // threads. This is "nearly" exact in the sense that a connection might close in parallel thus - // making the counts incorrect, but this should be rectified on the next accept. This balancer - // sacrifices accept throughput for accuracy and should be used when there are a small number of - // connections that rarely cycle (e.g., service mesh gRPC egress). - message ExactBalance { - } - - oneof balance_type { - option (validate.required) = true; - - // If specified, the listener will use the exact connection balancer. - ExactBalance exact_balance = 1; - } - } - - reserved 14; - - // The unique name by which this listener is known. If no name is provided, - // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - // updated or removed via :ref:`LDS ` a unique name must be provided. - string name = 1; - - // The address that the listener should listen on. In general, the address must be unique, though - // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on - // Linux as the actual port will be allocated by the OS. - core.Address address = 2 [(validate.rules).message = {required: true}]; - - // A list of filter chains to consider for this listener. The - // :ref:`FilterChain ` with the most specific - // :ref:`FilterChainMatch ` criteria is used on a - // connection. - // - // Example using SNI for filter chain selection can be found in the - // :ref:`FAQ entry `. - repeated listener.FilterChain filter_chains = 3; - - // If a connection is redirected using *iptables*, the port on which the proxy - // receives it might be different from the original destination address. When this flag is set to - // true, the listener hands off redirected connections to the listener associated with the - // original destination address. If there is no listener associated with the original destination - // address, the connection is handled by the listener that receives it. Defaults to false. - // - // .. attention:: - // - // This field is deprecated. Use :ref:`an original_dst ` - // :ref:`listener filter ` instead. - // - // Note that hand off to another listener is *NOT* performed without this flag. Once - // :ref:`FilterChainMatch ` is implemented this flag - // will be removed, as filter chain matching can be used to select a filter chain based on the - // restored destination address. - google.protobuf.BoolValue use_original_dst = 4 [deprecated = true]; - - // Soft limit on size of the listener’s new connection read and write buffers. - // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; - - // Listener metadata. - core.Metadata metadata = 6; - - // [#not-implemented-hide:] - DeprecatedV1 deprecated_v1 = 7; - - // The type of draining to perform at a listener-wide level. - DrainType drain_type = 8; - - // Listener filters have the opportunity to manipulate and augment the connection metadata that - // is used in connection filter chain matching, for example. These filters are run before any in - // :ref:`filter_chains `. Order matters as the - // filters are processed sequentially right after a socket has been accepted by the listener, and - // before a connection is created. - // UDP Listener filters can be specified when the protocol in the listener socket address in - // :ref:`protocol ` is :ref:`UDP - // `. - // UDP listeners currently support a single filter. - repeated listener.ListenerFilter listener_filters = 9; - - // The timeout to wait for all listener filters to complete operation. If the timeout is reached, - // the accepted socket is closed without a connection being created unless - // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the - // timeout. If not specified, a default timeout of 15s is used. - google.protobuf.Duration listener_filters_timeout = 15; - - // Whether a connection should be created when listener filters timeout. Default is false. - // - // .. attention:: - // - // Some listener filters, such as :ref:`Proxy Protocol filter - // `, should not be used with this option. It will cause - // unexpected behavior when a connection is created. - bool continue_on_listener_filters_timeout = 17; - - // Whether the listener should be set as a transparent socket. - // When this flag is set to true, connections can be redirected to the listener using an - // *iptables* *TPROXY* target, in which case the original source and destination addresses and - // ports are preserved on accepted connections. This flag should be used in combination with - // :ref:`an original_dst ` :ref:`listener filter - // ` to mark the connections' local addresses as - // "restored." This can be used to hand off each redirected connection to another listener - // associated with the connection's destination address. Direct connections to the socket without - // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are - // therefore treated as if they were redirected. - // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. - // When this flag is not set (default), the socket is not modified, i.e. the transparent option - // is neither set nor reset. - google.protobuf.BoolValue transparent = 10; - - // Whether the listener should set the *IP_FREEBIND* socket option. When this - // flag is set to true, listeners can be bound to an IP address that is not - // configured on the system running Envoy. When this flag is set to false, the - // option *IP_FREEBIND* is disabled on the socket. When this flag is not set - // (default), the socket is not modified, i.e. the option is neither enabled - // nor disabled. - google.protobuf.BoolValue freebind = 11; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.SocketOption socket_options = 13; - - // Whether the listener should accept TCP Fast Open (TFO) connections. - // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on - // the socket, with a queue length of the specified size - // (see `details in RFC7413 `_). - // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. - // When this flag is not set (default), the socket is not modified, - // i.e. the option is neither enabled nor disabled. - // - // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - // TCP_FASTOPEN. - // See `ip-sysctl.txt `_. - // - // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. - // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - - // Specifies the intended direction of the traffic relative to the local Envoy. - // This property is required on Windows for listeners using the original destination filter, - // see :ref:`Original Destination `. - core.TrafficDirection traffic_direction = 16; - - // If the protocol in the listener socket address in :ref:`protocol - // ` is :ref:`UDP - // `, this field specifies the actual udp - // listener to create, i.e. :ref:`udp_listener_name - // ` = "raw_udp_listener" for - // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". - listener.UdpListenerConfig udp_listener_config = 18; - - // Used to represent an API listener, which is used in non-proxy clients. The type of API - // exposed to the non-proxy application depends on the type of API listener. - // When this field is set, no other field except for :ref:`name` - // should be set. - // - // .. note:: - // - // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, - // not LDS. - // - // [#next-major-version: In the v3 API, instead of this messy approach where the socket - // listener fields are directly in the top-level Listener message and the API listener types - // are in the ApiListener message, the socket listener messages should be in their own message, - // and the top-level Listener should essentially be a oneof that selects between the - // socket listener and the various types of API listener. That way, a given Listener message - // can structurally only contain the fields of the relevant type.] - config.listener.v2.ApiListener api_listener = 19; - - // The listener's connection balancer configuration, currently only applicable to TCP listeners. - // If no configuration is specified, Envoy will not attempt to balance active connections between - // worker threads. - ConnectionBalanceConfig connection_balance_config = 20; - - // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and - // create one socket for each worker thread. This makes inbound connections - // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. - // - // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart - // (see `3rd paragraph in 'soreuseport' commit message - // `_). - // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket - // `_. - bool reuse_port = 21; - - // Configuration for :ref:`access logs ` - // emitted by this listener. - repeated config.filter.accesslog.v2.AccessLog access_log = 22; -} diff --git a/generated_api_shadow/envoy/api/v2/listener/BUILD b/generated_api_shadow/envoy/api/v2/listener/BUILD deleted file mode 100644 index ea23dff77c22e..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/auth:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/listener/listener.proto b/generated_api_shadow/envoy/api/v2/listener/listener.proto deleted file mode 100644 index 273b29cb5dd30..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/listener.proto +++ /dev/null @@ -1,11 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.listener; - -import public "envoy/api/v2/listener/listener_components.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option java_outer_classname = "ListenerProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy.Api.V2.ListenerNS"; diff --git a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto deleted file mode 100644 index 08738962c5eee..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto +++ /dev/null @@ -1,287 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.listener; - -import "envoy/api/v2/auth/tls.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/type/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option java_outer_classname = "ListenerComponentsProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy.Api.V2.ListenerNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Listener components] -// Listener :ref:`configuration overview ` - -message Filter { - reserved 3; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 4; - } -} - -// Specifies the match criteria for selecting a specific filter chain for a -// listener. -// -// In order for a filter chain to be selected, *ALL* of its criteria must be -// fulfilled by the incoming connection, properties of which are set by the -// networking stack and/or listener filters. -// -// The following order applies: -// -// 1. Destination port. -// 2. Destination IP address. -// 3. Server name (e.g. SNI for TLS protocol), -// 4. Transport protocol. -// 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Source type (e.g. any, local or external network). -// 7. Source IP address. -// 8. Source port. -// -// For criteria that allow ranges or wildcards, the most specific value in any -// of the configured filter chains that matches the incoming connection is going -// to be used (e.g. for SNI ``www.example.com`` the most specific match would be -// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter -// chain without ``server_names`` requirements). -// -// [#comment: Implemented rules are kept in the preference order, with deprecated fields -// listed at the end, because that's how we want to list them in the docs. -// -// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 13] -message FilterChainMatch { - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - - // Match a connection originating from the same host. - LOCAL = 1 [(udpa.annotations.enum_value_migrate).rename = "SAME_IP_OR_LOOPBACK"]; - - // Match a connection originating from a different host. - EXTERNAL = 2; - } - - reserved 1; - - // Optional destination port to consider when use_original_dst is set on the - // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; - - // If non-empty, an IP address and prefix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - repeated core.CidrRange prefix_ranges = 3; - - // If non-empty, an IP address and suffix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - // [#not-implemented-hide:] - string address_suffix = 4; - - // [#not-implemented-hide:] - google.protobuf.UInt32Value suffix_len = 5; - - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; - - // The criteria is satisfied if the source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the - // parameter is not specified or the list is empty, the source IP address is - // ignored. - repeated core.CidrRange source_prefix_ranges = 6; - - // The criteria is satisfied if the source port of the downstream connection - // is contained in at least one of the specified ports. If the parameter is - // not specified, the source port is ignored. - repeated uint32 source_ports = 7 - [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; - - // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining - // a filter chain match. Those values will be compared against the server names of a new - // connection, when detected by one of the listener filters. - // - // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` - // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. - // - // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. - // - // .. attention:: - // - // See the :ref:`FAQ entry ` on how to configure SNI for more - // information. - repeated string server_names = 11; - - // If non-empty, a transport protocol to consider when determining a filter chain match. - // This value will be compared against the transport protocol of a new connection, when - // it's detected by one of the listener filters. - // - // Suggested values include: - // - // * ``raw_buffer`` - default, used when no transport protocol is detected, - // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // when TLS protocol is detected. - string transport_protocol = 9; - - // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when - // determining a filter chain match. Those values will be compared against the application - // protocols of a new connection, when detected by one of the listener filters. - // - // Suggested values include: - // - // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector - // `, - // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // - // .. attention:: - // - // Currently, only :ref:`TLS Inspector ` provides - // application protocol detection based on the requested - // `ALPN `_ values. - // - // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, - // and matching on values other than ``h2`` is going to lead to a lot of false negatives, - // unless all connecting clients are known to use ALPN. - repeated string application_protocols = 10; -} - -// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and -// various other parameters. -// [#next-free-field: 8] -message FilterChain { - // The criteria to use when matching a connection to this filter chain. - FilterChainMatch filter_chain_match = 1; - - // The TLS context for this filter chain. - // - // .. attention:: - // - // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - // set, `transport_socket` takes priority. - auth.DownstreamTlsContext tls_context = 2 [deprecated = true]; - - // A list of individual network filters that make up the filter chain for - // connections established with the listener. Order matters as the filters are - // processed sequentially as connection events happen. Note: If the filter - // list is empty, the connection will close by default. - repeated Filter filters = 3; - - // Whether the listener should expect a PROXY protocol V1 header on new - // connections. If this option is enabled, the listener will assume that that - // remote address of the connection is the one specified in the header. Some - // load balancers including the AWS ELB support this option. If the option is - // absent or set to false, Envoy will use the physical peer address of the - // connection as the remote address. - google.protobuf.BoolValue use_proxy_proto = 4; - - // [#not-implemented-hide:] filter chain metadata. - core.Metadata metadata = 5; - - // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `tls` and - // :ref:`DownstreamTlsContext ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - core.TransportSocket transport_socket = 6; - - // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no - // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter - // chain is to be dynamically updated or removed via FCDS a unique name must be provided. - string name = 7; -} - -// Listener filter chain match configuration. This is a recursive structure which allows complex -// nested match configurations to be built using various logical operators. -// -// Examples: -// -// * Matches if the destination port is 3306. -// -// .. code-block:: yaml -// -// destination_port_range: -// start: 3306 -// end: 3307 -// -// * Matches if the destination port is 3306 or 15000. -// -// .. code-block:: yaml -// -// or_match: -// rules: -// - destination_port_range: -// start: 3306 -// end: 3307 -// - destination_port_range: -// start: 15000 -// end: 15001 -// -// [#next-free-field: 6] -message ListenerFilterChainMatchPredicate { - // A set of match configurations used for logical operations. - message MatchSet { - // The list of rules that make up the set. - repeated ListenerFilterChainMatchPredicate rules = 1 - [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - ListenerFilterChainMatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // Match destination port. Particularly, the match evaluation must use the recovered local port if - // the owning listener filter is after :ref:`an original_dst listener filter `. - type.Int32Range destination_port_range = 5; - } -} - -message ListenerFilter { - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. - // See the supported filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - - // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - // See :ref:`ListenerFilterChainMatchPredicate ` - // for further examples. - ListenerFilterChainMatchPredicate filter_disabled = 4; -} diff --git a/generated_api_shadow/envoy/api/v2/listener/quic_config.proto b/generated_api_shadow/envoy/api/v2/listener/quic_config.proto deleted file mode 100644 index 2a4616bb09c99..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/quic_config.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.listener; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option java_outer_classname = "QuicConfigProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy.Api.V2.ListenerNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: QUIC listener Config] - -// Configuration specific to the QUIC protocol. -// Next id: 4 -message QuicProtocolOptions { - // Maximum number of streams that the client can negotiate per connection. 100 - // if not specified. - google.protobuf.UInt32Value max_concurrent_streams = 1; - - // Maximum number of milliseconds that connection will be alive when there is - // no network activity. 300000ms if not specified. - google.protobuf.Duration idle_timeout = 2; - - // Connection timeout in milliseconds before the crypto handshake is finished. - // 20000ms if not specified. - google.protobuf.Duration crypto_handshake_timeout = 3; -} diff --git a/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto b/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto deleted file mode 100644 index d4d29531f3aaa..0000000000000 --- a/generated_api_shadow/envoy/api/v2/listener/udp_listener_config.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.listener; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.listener"; -option java_outer_classname = "UdpListenerConfigProto"; -option java_multiple_files = true; -option csharp_namespace = "Envoy.Api.V2.ListenerNS"; -option ruby_package = "Envoy.Api.V2.ListenerNS"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: UDP Listener Config] -// Listener :ref:`configuration overview ` - -message UdpListenerConfig { - // Used to look up UDP listener factory, matches "raw_udp_listener" or - // "quic_listener" to create a specific udp listener. - // If not specified, treat as "raw_udp_listener". - string udp_listener_name = 1; - - // Used to create a specific listener factory. To some factory, e.g. - // "raw_udp_listener", config is not needed. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -message ActiveRawUdpListenerConfig { -} diff --git a/generated_api_shadow/envoy/api/v2/ratelimit/BUILD b/generated_api_shadow/envoy/api/v2/ratelimit/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/api/v2/ratelimit/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto b/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto deleted file mode 100644 index 5ac72c69a6fbb..0000000000000 --- a/generated_api_shadow/envoy/api/v2/ratelimit/ratelimit.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.ratelimit; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.ratelimit"; -option java_outer_classname = "RatelimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common rate limit components] - -// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to -// determine the final rate limit key and overall allowed limit. Here are some examples of how -// they might be used for the domain "envoy". -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["remote_address": "10.0.0.1"] -// -// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The -// configuration supplies a default limit for the *remote_address* key. If there is a desire to -// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the -// configuration. -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["path": "/foo/bar"] -// -// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if -// configured that way in the service). -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["path": "/foo/bar"], ["remote_address": "10.0.0.1"] -// -// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. -// Like (1) we can raise/block specific IP addresses if we want with an override configuration. -// -// .. code-block:: cpp -// -// ["authenticated": "true"], ["client_id": "foo"] -// -// What it does: Limits all traffic for an authenticated client "foo" -// -// .. code-block:: cpp -// -// ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] -// -// What it does: Limits traffic to a specific path for an authenticated client "foo" -// -// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. -// This enables building complex application scenarios with a generic backend. -message RateLimitDescriptor { - message Entry { - // Descriptor key. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Descriptor value. - string value = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // Descriptor entries. - repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/api/v2/rds.proto b/generated_api_shadow/envoy/api/v2/rds.proto deleted file mode 100644 index faa5fdcf31942..0000000000000 --- a/generated_api_shadow/envoy/api/v2/rds.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/route.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "RdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: RDS] - -// The resource_names field in DiscoveryRequest specifies a route configuration. -// This allows an Envoy configuration with multiple HTTP listeners (and -// associated HTTP connection manager filters) to use different route -// configurations. Each listener will bind its HTTP connection manager filter to -// a route table via this identifier. -service RouteDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.RouteConfiguration"; - - rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:routes"; - option (google.api.http).body = "*"; - } -} - -// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for -// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered -// during the processing of an HTTP request if a route for the request cannot be resolved. The -// :ref:`resource_names_subscribe ` -// field contains a list of virtual host names or aliases to track. The contents of an alias would -// be the contents of a *host* or *authority* header used to make an http request. An xDS server -// will match an alias to a virtual host based on the content of :ref:`domains' -// ` field. The *resource_names_unsubscribe* field -// contains a list of virtual host names that have been :ref:`unsubscribed -// ` from the routing table associated with the RouteConfiguration. -service VirtualHostDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.route.VirtualHost"; - - rpc DeltaVirtualHosts(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message RdsDummy { -} diff --git a/generated_api_shadow/envoy/api/v2/route.proto b/generated_api_shadow/envoy/api/v2/route.proto deleted file mode 100644 index 549f134a7f439..0000000000000 --- a/generated_api_shadow/envoy/api/v2/route.proto +++ /dev/null @@ -1,113 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/route/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP route configuration] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// [#next-free-field: 11] -message RouteConfiguration { - // The name of the route configuration. For example, it might match - // :ref:`route_config_name - // ` in - // :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`. - string name = 1; - - // An array of virtual hosts that make up the route table. - repeated route.VirtualHost virtual_hosts = 2; - - // An array of virtual hosts will be dynamically loaded via the VHDS API. - // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for - // on-demand discovery of virtual hosts. The contents of these two fields will be merged to - // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration - // taking precedence. - Vhds vhds = 9; - - // Optionally specifies a list of HTTP headers that the connection manager - // will consider to be internal only. If they are found on external requests they will be cleaned - // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information. - repeated string internal_only_headers = 3 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each response that - // the connection manager encodes. Headers specified at this level are applied - // after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // that the connection manager encodes. - repeated string response_headers_to_remove = 5 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each request - // routed by the HTTP connection manager. Headers specified at this level are - // applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // routed by the HTTP connection manager. - repeated string request_headers_to_remove = 8 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // By default, headers that should be added/removed are evaluated from most to least specific: - // - // * route level - // * virtual host level - // * connection manager level - // - // To allow setting overrides at the route or virtual host level, this order can be reversed - // by setting this option to true. Defaults to false. - // - // [#next-major-version: In the v3 API, this will default to true.] - bool most_specific_header_mutations_wins = 10; - - // An optional boolean that specifies whether the clusters that the route - // table refers to will be validated by the cluster manager. If set to true - // and a route refers to a non-existent cluster, the route table will not - // load. If set to false and a route refers to a non-existent cluster, the - // route table will load and the router filter will return a 404 if the route - // is selected at runtime. This setting defaults to true if the route table - // is statically defined via the :ref:`route_config - // ` - // option. This setting default to false if the route table is loaded dynamically via the - // :ref:`rds - // ` - // option. Users may wish to override the default behavior in certain cases (for example when - // using CDS with a static route table). - google.protobuf.BoolValue validate_clusters = 7; -} - -message Vhds { - // Configuration source specifier for VHDS. - core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/route/BUILD b/generated_api_shadow/envoy/api/v2/route/BUILD deleted file mode 100644 index 3d4e6acfeac17..0000000000000 --- a/generated_api_shadow/envoy/api/v2/route/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "//envoy/type/tracing/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/api/v2/route/route.proto b/generated_api_shadow/envoy/api/v2/route/route.proto deleted file mode 100644 index ec13e9e5c801b..0000000000000 --- a/generated_api_shadow/envoy/api/v2/route/route.proto +++ /dev/null @@ -1,9 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.route; - -import public "envoy/api/v2/route/route_components.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.route"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/api/v2/route/route_components.proto b/generated_api_shadow/envoy/api/v2/route/route_components.proto deleted file mode 100644 index d73fbb8674c90..0000000000000 --- a/generated_api_shadow/envoy/api/v2/route/route_components.proto +++ /dev/null @@ -1,1628 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2.route; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/matcher/regex.proto"; -import "envoy/type/matcher/string.proto"; -import "envoy/type/percent.proto"; -import "envoy/type/range.proto"; -import "envoy/type/tracing/v2/custom_tag.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2.route"; -option java_outer_classname = "RouteComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP route components] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// The top level element in the routing configuration is a virtual host. Each virtual host has -// a logical name as well as a set of domains that get routed to it based on the incoming request's -// host header. This allows a single listener to service multiple top level domain path trees. Once -// a virtual host is selected based on the domain, the routes are processed in order to see which -// upstream cluster to route to or whether to perform a redirect. -// [#next-free-field: 21] -message VirtualHost { - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - - reserved 9; - - // The logical name of the virtual host. This is used when emitting certain - // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // A list of domains (host/authority header) that will be matched to this - // virtual host. Wildcard hosts are supported in the suffix or prefix form. - // - // Domain search order: - // 1. Exact domain names: ``www.foo.com``. - // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. - // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. - // 4. Special wildcard ``*`` matching any domain. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - // The longest wildcards match first. - // Only a single virtual host in the entire route configuration can match on ``*``. A domain - // must be unique across all virtual hosts or the config will fail to load. - // - // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. - repeated string domains = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} - }]; - - // The list of routes that will be matched, in order, for incoming requests. - // The first route that matches will be used. - repeated Route routes = 3; - - // Specifies the type of TLS enforcement the virtual host expects. If this option is not - // specified, there is no TLS requirement for the virtual host. - TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; - - // A list of virtual clusters defined for this virtual host. Virtual clusters - // are used for additional statistics gathering. - repeated VirtualCluster virtual_clusters = 5; - - // Specifies a set of rate limit configurations that will be applied to the - // virtual host. - repeated RateLimit rate_limits = 6; - - // Specifies a list of HTTP headers that should be added to each request - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // handled by this virtual host. - repeated string request_headers_to_remove = 13; - - // Specifies a list of HTTP headers that should be added to each response - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // handled by this virtual host. - repeated string response_headers_to_remove = 11; - - // Indicates that the virtual host has a CORS policy. - CorsPolicy cors = 8; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map per_filter_config = 12 [deprecated = true]; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map typed_per_filter_config = 15; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the upstream request. Setting this option will cause it to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the upstream - // will see the attempt count as perceived by the second Envoy. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - // - // [#next-major-version: rename to include_attempt_count_in_request.] - bool include_request_attempt_count = 14; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the downstream response. Setting this option will cause the router to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the downstream - // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - bool include_attempt_count_in_response = 19; - - // Indicates the retry policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - RetryPolicy retry_policy = 16; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that setting a route level entry - // will take precedence over this config and it'll be treated independently (e.g.: values are not - // inherited). :ref:`Retry policy ` should not be - // set if this field is used. - google.protobuf.Any retry_policy_typed_config = 20; - - // Indicates the hedge policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - HedgePolicy hedge_policy = 17; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum - // value of this and the listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; -} - -// A filter-defined action type. -message FilterAction { - google.protobuf.Any action = 1; -} - -// A route is both a specification of how to match a request as well as an indication of what to do -// next (e.g., redirect, forward, rewrite, etc.). -// -// .. attention:: -// -// Envoy supports routing on HTTP method via :ref:`header matching -// `. -// [#next-free-field: 18] -message Route { - reserved 6; - - // Name for the route. - string name = 14; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - oneof action { - option (validate.required) = true; - - // Route request to some upstream cluster. - RouteAction route = 2; - - // Return a redirect. - RedirectAction redirect = 3; - - // Return an arbitrary HTTP response directly, without proxying. - DirectResponseAction direct_response = 7; - - // [#not-implemented-hide:] - // If true, a filter will define the action (e.g., it could dynamically generate the - // RouteAction). - FilterAction filter_action = 17; - } - - // The Metadata field can be used to provide additional information - // about the route. It can be used for configuration, stats, and logging. - // The metadata should go under the filter namespace that will need it. - // For instance, if the metadata is intended for the Router filter, - // the filter name should be specified as *envoy.filters.http.router*. - core.Metadata metadata = 4; - - // Decorator for the matched route. - Decorator decorator = 5; - - // The per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - map per_filter_config = 8 [deprecated = true]; - - // The typed_per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - map typed_per_filter_config = 13; - - // Specifies a set of headers that will be added to requests matching this - // route. Headers specified at this level are applied before headers from the - // enclosing :ref:`envoy_api_msg_route.VirtualHost` and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // matching this route. - repeated string request_headers_to_remove = 12; - - // Specifies a set of headers that will be added to responses to requests - // matching this route. Headers specified at this level are applied before - // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on - // :ref:`custom request headers `. - repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - repeated string response_headers_to_remove = 11; - - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - Tracing tracing = 15; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set, the bytes actually buffered will be the minimum value of this and the - // listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; -} - -// Compared to the :ref:`cluster ` field that specifies a -// single upstream cluster as the target of a request, the :ref:`weighted_clusters -// ` option allows for specification of -// multiple upstream clusters along with weights that indicate the percentage of -// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the -// weights. -message WeightedCluster { - // [#next-free-field: 11] - message ClusterWeight { - reserved 7; - - // Name of the upstream cluster. The cluster must exist in the - // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // An integer between 0 and :ref:`total_weight - // `. When a request matches the route, - // the choice of an upstream cluster is determined by its weight. The sum of weights across all - // entries in the clusters array must add up to the total_weight, which defaults to 100. - google.protobuf.UInt32Value weight = 2; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered for - // load balancing. Note that this will be merged with what's provided in - // :ref:`RouteAction.metadata_match `, with - // values here taking precedence. The filter name should be specified as *envoy.lb*. - core.Metadata metadata_match = 3; - - // Specifies a list of headers to be added to requests when this cluster is selected - // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request when - // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - repeated string request_headers_to_remove = 9; - - // Specifies a list of headers to be added to responses when this cluster is selected - // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of headers to be removed from responses when this cluster is selected - // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - repeated string response_headers_to_remove = 6; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map per_filter_config = 8 [deprecated = true]; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map typed_per_filter_config = 10; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the total weight across all clusters. The sum of all cluster weights must equal this - // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies the runtime key prefix that should be used to construct the - // runtime keys associated with each cluster. When the *runtime_key_prefix* is - // specified, the router will look for weights associated with each upstream - // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - // *cluster[i]* denotes an entry in the clusters array field. If the runtime - // key for the cluster does not exist, the value specified in the - // configuration file will be used as the default weight. See the :ref:`runtime documentation - // ` for how key names map to the underlying implementation. - string runtime_key_prefix = 2; -} - -// [#next-free-field: 12] -message RouteMatch { - message GrpcRouteMatchOptions { - } - - message TlsContextMatchOptions { - // If specified, the route will match against whether or not a certificate is presented. - // If not specified, certificate presentation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue presented = 1; - - // If specified, the route will match against whether or not a certificate is validated. - // If not specified, certificate validation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue validated = 2; - } - - reserved 5; - - oneof path_specifier { - option (validate.required) = true; - - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - string prefix = 1; - - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - string path = 2; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. The regex grammar is defined `here - // `_. - // - // Examples: - // - // * The regex ``/b[io]t`` matches the path */bit* - // * The regex ``/b[io]t`` matches the path */bot* - // * The regex ``/b[io]t`` does not match the path */bite* - // * The regex ``/b[io]t`` does not match the path */bit/bot* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex` as it is not safe for use with - // untrusted input in all cases. - string regex = 3 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - } - - // Indicates that prefix/path matching should be case sensitive. The default - // is true. - google.protobuf.BoolValue case_sensitive = 4; - - // Indicates that the route should additionally match on a runtime key. Every time the route - // is considered for a match, it must also fall under the percentage of matches indicated by - // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the router continues to evaluate the remaining match criteria. A runtime_fraction - // route configuration can be used to roll out route changes in a gradual manner without full - // code/config deploys. Refer to the :ref:`traffic shifting - // ` docs for additional documentation. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an - // integer with the assumption that the value is an integral percentage out of 100. For - // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent - // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - core.RuntimeFractionalPercent runtime_fraction = 9; - - // Specifies a set of headers that the route should match on. The router will - // check the request’s headers against all the specified headers in the route - // config. A match will happen if all the headers in the route are present in - // the request with the same values (or based on presence if the value field - // is not in the config). - repeated HeaderMatcher headers = 6; - - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - repeated QueryParameterMatcher query_parameters = 7; - - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - GrpcRouteMatchOptions grpc = 8; - - // If specified, the client tls context will be matched against the defined - // match options. - // - // [#next-major-version: unify with RBAC] - TlsContextMatchOptions tls_context = 11; -} - -// [#next-free-field: 12] -message CorsPolicy { - // Specifies the origins that will be allowed to do CORS requests. - // - // An origin is allowed if either allow_origin or allow_origin_regex match. - // - // .. attention:: - // This field has been deprecated in favor of `allow_origin_string_match`. - repeated string allow_origin = 1 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies regex patterns that match allowed origins. - // - // An origin is allowed if either allow_origin or allow_origin_regex match. - // - // .. attention:: - // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for - // use with untrusted input in all cases. - repeated string allow_origin_regex = 8 - [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; - - // Specifies string patterns that match allowed origins. An origin is allowed if any of the - // string matchers match. - repeated type.matcher.StringMatcher allow_origin_string_match = 11; - - // Specifies the content for the *access-control-allow-methods* header. - string allow_methods = 2; - - // Specifies the content for the *access-control-allow-headers* header. - string allow_headers = 3; - - // Specifies the content for the *access-control-expose-headers* header. - string expose_headers = 4; - - // Specifies the content for the *access-control-max-age* header. - string max_age = 5; - - // Specifies whether the resource allows credentials. - google.protobuf.BoolValue allow_credentials = 6; - - oneof enabled_specifier { - // Specifies if the CORS filter is enabled. Defaults to true. Only effective on route. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`filter_enabled` field instead. - google.protobuf.BoolValue enabled = 7 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies the % of requests for which the CORS filter is enabled. - // - // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - // filter will be enabled for 100% of the requests. - // - // If :ref:`runtime_key ` is - // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - core.RuntimeFractionalPercent filter_enabled = 9; - } - - // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not - // enforced. - // - // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those - // fields have to explicitly disable the filter in order for this setting to take effect. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* to determine if it's valid but will not enforce any policies. - core.RuntimeFractionalPercent shadow_enabled = 10; -} - -// [#next-free-field: 34] -message RouteAction { - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - - // Configures :ref:`internal redirect ` behavior. - enum InternalRedirectAction { - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - // - // .. note:: - // - // Shadowing will not be triggered if the primary cluster does not exist. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If not specified, all requests to the target cluster will be mirrored. If - // specified, Envoy will lookup the runtime key to get the % of requests to - // mirror. Valid values are from 0 to 10000, allowing for increments of - // 0.01% of requests to be mirrored. If the runtime key is specified in the - // configuration but not present in runtime, 0 is the default and thus 0% of - // requests will be mirrored. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`runtime_fraction - // ` - // field instead. Mirroring occurs if both this and - // ` - // are not set. - string runtime_key = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // If not specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - core.RuntimeFractionalPercent runtime_fraction = 3; - - // Determines if the trace span should be sampled. Defaults to true. - google.protobuf.BoolValue trace_sampled = 4; - } - - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - // [#next-free-field: 7] - message HashPolicy { - message Header { - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - // Hash on source IP address. - bool source_ip = 1; - } - - message QueryParameter { - // The name of the URL query parameter that will be used to obtain the hash - // key. If the parameter is not present, no hash will be produced. Query - // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - message FilterState { - // The name of the Object in the per-request filterState, which is an - // Envoy::Http::Hashable object. If there is no data associated with the key, - // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - - // Query parameter hash policy. - QueryParameter query_parameter = 5; - - // Filter state hash policy. - FilterState filter_state = 6; - } - - // The flag that short-circuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:`upgrade_configs - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - } - - reserved 12, 18, 19, 16, 22, 21; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string cluster_header = 2 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - WeightedCluster weighted_clusters = 3; - } - - // The HTTP status code to use when configured cluster is not found. - // The default response code is 503 Service Unavailable. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what's set in this field will be considered - // for load balancing. If using :ref:`weighted_clusters - // `, metadata will be merged, with values - // provided there taking precedence. The filter name should be specified as *envoy.lb*. - core.Metadata metadata_match = 4; - - // Indicates that during forwarding, the matched prefix (or path) should be - // swapped with this value. This option allows application URLs to be rooted - // at a different path from those exposed at the reverse proxy layer. The router filter will - // place the original path before rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of *prefix_rewrite* or - // :ref:`regex_rewrite ` - // may be specified. - // - // .. attention:: - // - // Pay careful attention to the use of trailing slashes in the - // :ref:`route's match ` prefix value. - // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single - // :ref:`Route `, as shown by the below config entries: - // - // .. code-block:: yaml - // - // - match: - // prefix: "/prefix/" - // route: - // prefix_rewrite: "/" - // - match: - // prefix: "/prefix" - // route: - // prefix_rewrite: "/" - // - // Having above entries in the config, requests to */prefix* will be stripped to */*, while - // requests to */prefix/etc* will be stripped to */etc*. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. The router filter will place the original path as it was - // before the rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of :ref:`prefix_rewrite ` - // or *regex_rewrite* may be specified. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.RegexMatchAndSubstitute regex_rewrite = 32; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite = 6 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}, - (udpa.annotations.field_migrate).rename = "host_rewrite_literal" - ]; - - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - google.protobuf.BoolValue auto_host_rewrite = 7; - - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string auto_host_rewrite_header = 29 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, - (udpa.annotations.field_migrate).rename = "host_rewrite_header" - ]; - } - - // Specifies the upstream timeout for the route. If not specified, the default is 15s. This - // spans between the point at which the entire downstream request (i.e. end-of-stream) has been - // processed and when the upstream response has been completely processed. A value of 0 will - // disable the route's timeout. - // - // .. note:: - // - // This timeout includes all retries. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration timeout = 8; - - // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, - // although the connection manager wide :ref:`stream_idle_timeout - // ` - // will still apply. A value of 0 will completely disable the route's idle timeout, even if a - // connection manager stream idle timeout is configured. - // - // The idle timeout is distinct to :ref:`timeout - // `, which provides an upper bound - // on the upstream response time; :ref:`idle_timeout - // ` instead bounds the amount - // of time the request's stream may be idle. - // - // After header decoding, the idle timeout will apply on downstream and - // upstream request events. Each time an encode/decode event for headers or - // data is processed for the stream, the timer will be reset. If the timeout - // fires, the stream is terminated with a 408 Request Timeout error code if no - // upstream response header has been received, otherwise a stream reset - // occurs. - google.protobuf.Duration idle_timeout = 24; - - // Indicates that the route has a retry policy. Note that if this is set, - // it'll take precedence over the virtual host level retry policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - RetryPolicy retry_policy = 9; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that if this is set, it'll take - // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, - // most internal one becomes the enforced policy). :ref:`Retry policy ` - // should not be set if this field is used. - google.protobuf.Any retry_policy_typed_config = 33; - - // Indicates that the route has a request mirroring policy. - // - // .. attention:: - // This field has been deprecated in favor of `request_mirror_policies` which supports one or - // more mirroring policies. - RequestMirrorPolicy request_mirror_policy = 10 [deprecated = true]; - - // Indicates that the route has request mirroring policies. - repeated RequestMirrorPolicy request_mirror_policies = 30; - - // Optionally specifies the :ref:`routing priority `. - core.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; - - // Specifies a set of rate limit configurations that could be applied to the - // route. - repeated RateLimit rate_limits = 13; - - // Specifies if the rate limit filter should include the virtual host rate - // limits. By default, if the route configured rate limits, the virtual host - // :ref:`rate_limits ` are not applied to the - // request. - google.protobuf.BoolValue include_vh_rate_limits = 14; - - // Specifies a list of hash policies to use for ring hash load balancing. Each - // hash policy is evaluated individually and the combined result is used to - // route the request. The method of combination is deterministic such that - // identical lists of hash policies will produce the same hash. Since a hash - // policy examines specific parts of a request, it can fail to produce a hash - // (i.e. if the hashed header is not present). If (and only if) all configured - // hash policies fail to generate a hash, no hash will be produced for - // the route. In this case, the behavior is the same as if no hash policies - // were specified (i.e. the ring hash load balancer will choose a random - // backend). If a hash policy has the "terminal" attribute set to true, and - // there is already a hash generated, the hash is returned immediately, - // ignoring the rest of the hash policy list. - repeated HashPolicy hash_policy = 15; - - // Indicates that the route has a CORS policy. - CorsPolicy cors = 17; - - // If present, and the request is a gRPC request, use the - // `grpc-timeout header `_, - // or its default value (infinity) instead of - // :ref:`timeout `, but limit the applied timeout - // to the maximum value specified here. If configured as 0, the maximum allowed timeout for - // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used - // and gRPC requests time out like any other requests using - // :ref:`timeout ` or its default. - // This can be used to prevent unexpected upstream request timeouts due to potentially long - // time gaps between gRPC request and response in gRPC streaming mode. - // - // .. note:: - // - // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - // precedence over `grpc-timeout header `_, when - // both are present. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration max_grpc_timeout = 23; - - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - google.protobuf.Duration grpc_timeout_offset = 28; - - repeated UpgradeConfig upgrade_configs = 25; - - InternalRedirectAction internal_redirect_action = 26; - - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31; - - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - HedgePolicy hedge_policy = 27; -} - -// HTTP retry :ref:`architecture overview `. -// [#next-free-field: 11] -message RetryPolicy { - message RetryPriority { - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - message RetryHostPredicate { - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - message RetryBackOff { - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - // Specifies the conditions under which retry takes place. These are the same - // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - string retry_on = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. These are the same conditions documented for - // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; - - // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - // same conditions documented for - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - // - // .. note:: - // - // If left unspecified, Envoy will use the global - // :ref:`route timeout ` for the request. - // Consequently, when using a :ref:`5xx ` based - // retry policy, a request that times out will not be retried as the total timeout budget - // would have been exhausted. - google.protobuf.Duration per_try_timeout = 3; - - // Specifies an implementation of a RetryPriority which is used to determine the - // distribution of load across priorities used for retries. Refer to - // :ref:`retry plugin configuration ` for more details. - RetryPriority retry_priority = 4; - - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host - // for retries. If any of the predicates reject the host, host selection will be reattempted. - // Refer to :ref:`retry plugin configuration ` for more - // details. - repeated RetryHostPredicate retry_host_predicate = 5; - - // The maximum number of times host selection will be reattempted before giving up, at which - // point the host that was last selected will be routed to. If unspecified, this will default to - // retrying once. - int64 host_selection_retry_max_attempts = 6; - - // HTTP status codes that should trigger a retry in addition to those specified by retry_on. - repeated uint32 retriable_status_codes = 7; - - // Specifies parameters that control retry back off. This parameter is optional, in which case the - // default base interval is 25 milliseconds or, if set, the current value of the - // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times - // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - // describes Envoy's back-off algorithm. - RetryBackOff retry_back_off = 8; - - // HTTP response headers that trigger a retry if present in the response. A retry will be - // triggered if any of the header matches match the upstream response headers. - // The field is only consulted if 'retriable-headers' retry policy is active. - repeated HeaderMatcher retriable_headers = 9; - - // HTTP headers which must be present in the request for retries to be attempted. - repeated HeaderMatcher retriable_request_headers = 10; -} - -// HTTP request hedging :ref:`architecture overview `. -message HedgePolicy { - // Specifies the number of initial requests that should be sent upstream. - // Must be at least 1. - // Defaults to 1. - // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies a probability that an additional upstream request should be sent - // on top of what is specified by initial_requests. - // Defaults to 0. - // [#not-implemented-hide:] - type.FractionalPercent additional_request_chance = 2; - - // Indicates that a hedged request should be sent when the per-try timeout is hit. - // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. - // The first request to complete successfully will be the one returned to the caller. - // - // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. - // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client - // if there are no more retries left. - // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. - // - // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least - // one error code and specifies a maximum number of retries. - // - // Defaults to false. - bool hedge_on_per_try_timeout = 3; -} - -// [#next-free-field: 9] -message RedirectAction { - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - - // When the scheme redirection take place, the following rules apply: - // 1. If the source URI scheme is `http` and the port is explicitly - // set to `:80`, the port will be removed after the redirection - // 2. If the source URI scheme is `https` and the port is explicitly - // set to `:443`, the port will be removed after the redirection - oneof scheme_rewrite_specifier { - // The scheme portion of the URL will be swapped with "https". - bool https_redirect = 4; - - // The scheme portion of the URL will be swapped with this value. - string scheme_redirect = 7; - } - - // The host portion of the URL will be swapped with this value. - string host_redirect = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The port value of the URL will be swapped with this value. - uint32 port_redirect = 8; - - oneof path_rewrite_specifier { - // The path portion of the URL will be swapped with this value. - // Please note that query string in path_redirect will override the - // request's query string and will not be stripped. - // - // For example, let's say we have the following routes: - // - // - match: { path: "/old-path-1" } - // redirect: { path_redirect: "/new-path-1" } - // - match: { path: "/old-path-2" } - // redirect: { path_redirect: "/new-path-2", strip-query: "true" } - // - match: { path: "/old-path-3" } - // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } - // - // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" - // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" - // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" - string path_redirect = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirection, the matched prefix (or path) - // should be swapped with this value. This option allows redirect URLs be dynamically created - // based on the request. - // - // .. attention:: - // - // Pay attention to the use of trailing slashes as mentioned in - // :ref:`RouteAction's prefix_rewrite `. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; - - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. - bool strip_query = 6; -} - -message DirectResponseAction { - // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; - - // Specifies the content of the response body. If this setting is omitted, - // no body is included in the generated response. - // - // .. note:: - // - // Headers can be specified using *response_headers_to_add* in the enclosing - // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or - // :ref:`envoy_api_msg_route.VirtualHost`. - core.DataSource body = 2; -} - -message Decorator { - // The operation name associated with the request matched to this route. If tracing is - // enabled, this information will be used as the span name reported for this request. - // - // .. note:: - // - // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden - // by the :ref:`x-envoy-decorator-operation - // ` header. - string operation = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Whether the decorated details should be propagated to the other party. The default is true. - google.protobuf.BoolValue propagate = 2; -} - -message Tracing { - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.FractionalPercent client_sampling = 1; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.FractionalPercent random_sampling = 2; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.FractionalPercent overall_sampling = 3; - - // A list of custom tags with unique tag name to create tags for the active span. - // It will take effect after merging with the :ref:`corresponding configuration - // ` - // configured in the HTTP connection manager. If two tags with the same name are configured - // each in the HTTP connection manager and the route level, the one configured here takes - // priority. - repeated type.tracing.v2.CustomTag custom_tags = 4; -} - -// A virtual cluster is a way of specifying a regex matching rule against -// certain important endpoints such that statistics are generated explicitly for -// the matched requests. The reason this is useful is that when doing -// prefix/path matching Envoy does not always know what the application -// considers to be an endpoint. Thus, it’s impossible for Envoy to generically -// emit per endpoint statistics. However, often systems have highly critical -// endpoints that they wish to get “perfect” statistics on. Virtual cluster -// statistics are perfect in the sense that they are emitted on the downstream -// side such that they include network level failures. -// -// Documentation for :ref:`virtual cluster statistics `. -// -// .. note:: -// -// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for -// every application endpoint. This is both not easily maintainable and as well the matching and -// statistics output are not free. -message VirtualCluster { - // Specifies a regex pattern to use for matching requests. The entire path of the request - // must match the regex. The regex grammar used is defined `here - // `_. - // - // Examples: - // - // * The regex ``/rides/\d+`` matches the path */rides/0* - // * The regex ``/rides/\d+`` matches the path */rides/123* - // * The regex ``/rides/\d+`` does not match the path */rides/123/456* - // - // .. attention:: - // This field has been deprecated in favor of `headers` as it is not safe for use with - // untrusted input in all cases. - string pattern = 1 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // Specifies a list of header matchers to use for matching requests. Each specified header must - // match. The pseudo-headers `:path` and `:method` can be used to match the request path and - // method, respectively. - repeated HeaderMatcher headers = 4; - - // Specifies the name of the virtual cluster. The virtual cluster name as well - // as the virtual host name are used when emitting statistics. The statistics are emitted by the - // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Optionally specifies the HTTP method to match on. For example GET, PUT, - // etc. - // - // .. attention:: - // This field has been deprecated in favor of `headers`. - core.RequestMethod method = 3 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; -} - -// Global rate limiting :ref:`architecture overview `. -message RateLimit { - // [#next-free-field: 7] - message Action { - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("source_cluster", "") - // - // is derived from the :option:`--service-cluster` option. - message SourceCluster { - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("destination_cluster", "") - // - // Once a request matches against a route table rule, a routed cluster is determined by one of - // the following :ref:`route table configuration ` - // settings: - // - // * :ref:`cluster ` indicates the upstream cluster - // to route to. - // * :ref:`weighted_clusters ` - // chooses a cluster randomly from a set of clusters with attributed weight. - // * :ref:`cluster_header ` indicates which - // header in the request contains the target cluster. - message DestinationCluster { - } - - // The following descriptor entry is appended when a header contains a key that matches the - // *header_name*: - // - // .. code-block:: cpp - // - // ("", "") - message RequestHeaders { - // The header name to be queried from the request headers. The header’s - // value is used to populate the value of the descriptor entry for the - // descriptor_key. - string header_name = 1 [ - (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} - ]; - - // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // The following descriptor entry is appended to the descriptor and is populated using the - // trusted address from :ref:`x-forwarded-for `: - // - // .. code-block:: cpp - // - // ("remote_address", "") - message RemoteAddress { - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("generic_key", "") - message GenericKey { - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("header_match", "") - message HeaderValueMatch { - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If set to true, the action will append a descriptor entry when the - // request matches the headers. If set to false, the action will append a - // descriptor entry when the request does not match the headers. The - // default value is true. - google.protobuf.BoolValue expect_match = 2; - - // Specifies a set of headers that the rate limit action should match - // on. The action will check the request’s headers against all the - // specified headers in the config. A match will happen if all the - // headers in the config are present in the request with the same values - // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; - } - - oneof action_specifier { - option (validate.required) = true; - - // Rate limit on source cluster. - SourceCluster source_cluster = 1; - - // Rate limit on destination cluster. - DestinationCluster destination_cluster = 2; - - // Rate limit on request headers. - RequestHeaders request_headers = 3; - - // Rate limit on remote address. - RemoteAddress remote_address = 4; - - // Rate limit on a generic key. - GenericKey generic_key = 5; - - // Rate limit on the existence of request headers. - HeaderValueMatch header_value_match = 6; - } - } - - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - - // A list of actions that are to be applied for this rate limit configuration. - // Order matters as the actions are processed sequentially and the descriptor - // is composed by appending descriptor entries in that sequence. If an action - // cannot append a descriptor entry, no descriptor is generated for the - // configuration. See :ref:`composing actions - // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; -} - -// .. attention:: -// -// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* -// header. Thus, if attempting to match on *Host*, match on *:authority* instead. -// -// .. attention:: -// -// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both -// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., -// -// .. code-block:: json -// -// { -// "name": ":method", -// "exact_match": "POST" -// } -// -// .. attention:: -// In the absence of any header match specifier, match will default to :ref:`present_match -// `. i.e, a request that has the :ref:`name -// ` header will match, regardless of the header's -// value. -// -// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] -// [#next-free-field: 12] -message HeaderMatcher { - reserved 2, 3; - - // Specifies the name of the header in the request. - string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Specifies how the header match will be performed to route the request. - oneof header_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 4; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. The regex grammar used in the value field is defined - // `here `_. - // - // Examples: - // - // * The regex ``\d{3}`` matches the value *123* - // * The regex ``\d{3}`` does not match the value *1234* - // * The regex ``\d{3}`` does not match the value *123.456* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use - // with untrusted input in all cases. - string regex_match = 5 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. - type.matcher.RegexMatcher safe_regex_match = 11; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting of - // an optional plus or minus sign followed by a sequence of digits. The rule will not match if - // the header value does not represent an integer. Match will fail for empty values, floating - // point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - // "-1somestring" - type.Int64Range range_match = 6; - - // If specified, header match will be performed based on whether the header is in the - // request. - bool present_match = 7; - - // If specified, header match will be performed based on the prefix of the header value. - // Note: empty prefix is not allowed, please use present_match instead. - // - // Examples: - // - // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; - - // If specified, header match will be performed based on the suffix of the header value. - // Note: empty suffix is not allowed, please use present_match instead. - // - // Examples: - // - // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; - } - - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. - bool invert_match = 8; -} - -// Query parameter matching treats the query string of a request's :path header -// as an ampersand-separated list of keys and/or key=value elements. -// [#next-free-field: 7] -message QueryParameterMatcher { - // Specifies the name of a key that must be present in the requested - // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; - - // Specifies the value of the key. If the value is absent, a request - // that contains the key in its query string will match, whether the - // key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") - // - // ..attention:: - // This field is deprecated. Use an `exact` match inside the `string_match` field. - string value = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Specifies whether the query parameter value is a regular expression. - // Defaults to false. The entire query parameter value (i.e., the part to - // the right of the equals sign in "key=value") must match the regex. - // E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. - // - // ..attention:: - // This field is deprecated. Use a `safe_regex` match inside the `string_match` field. - google.protobuf.BoolValue regex = 4 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - oneof query_parameter_match_specifier { - // Specifies whether a query parameter value should match against a string. - type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; - - // Specifies whether a query parameter should be present. - bool present_match = 6; - } -} diff --git a/generated_api_shadow/envoy/api/v2/scoped_route.proto b/generated_api_shadow/envoy/api/v2/scoped_route.proto deleted file mode 100644 index 0841bd08723c5..0000000000000 --- a/generated_api_shadow/envoy/api/v2/scoped_route.proto +++ /dev/null @@ -1,109 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "ScopedRouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP scoped routing configuration] -// * Routing :ref:`architecture overview ` - -// Specifies a routing scope, which associates a -// :ref:`Key` to a -// :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). -// -// The HTTP connection manager builds up a table consisting of these Key to -// RouteConfiguration mappings, and looks up the RouteConfiguration to use per -// request according to the algorithm specified in the -// :ref:`scope_key_builder` -// assigned to the HttpConnectionManager. -// -// For example, with the following configurations (in YAML): -// -// HttpConnectionManager config: -// -// .. code:: -// -// ... -// scoped_routes: -// name: foo-scoped-routes -// scope_key_builder: -// fragments: -// - header_value_extractor: -// name: X-Route-Selector -// element_separator: , -// element: -// separator: = -// key: vip -// -// ScopedRouteConfiguration resources (specified statically via -// :ref:`scoped_route_configurations_list` -// or obtained dynamically via SRDS): -// -// .. code:: -// -// (1) -// name: route-scope1 -// route_configuration_name: route-config1 -// key: -// fragments: -// - string_key: 172.10.10.20 -// -// (2) -// name: route-scope2 -// route_configuration_name: route-config2 -// key: -// fragments: -// - string_key: 172.20.20.30 -// -// A request from a client such as: -// -// .. code:: -// -// GET / HTTP/1.1 -// Host: foo.com -// X-Route-Selector: vip=172.10.10.20 -// -// would result in the routing table defined by the `route-config1` -// RouteConfiguration being assigned to the HTTP request/stream. -// -message ScopedRouteConfiguration { - // Specifies a key which is matched against the output of the - // :ref:`scope_key_builder` - // specified in the HttpConnectionManager. The matching is done per HTTP - // request and is dependent on the order of the fragments contained in the - // Key. - message Key { - message Fragment { - oneof type { - option (validate.required) = true; - - // A string to match against. - string string_key = 1; - } - } - - // The ordered set of fragments to match against. The order must match the - // fragments in the corresponding - // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an - // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated - // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The key to match against. - Key key = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/api/v2/srds.proto b/generated_api_shadow/envoy/api/v2/srds.proto deleted file mode 100644 index 0edb99a1eccbb..0000000000000 --- a/generated_api_shadow/envoy/api/v2/srds.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.api.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -import public "envoy/api/v2/scoped_route.proto"; - -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_outer_classname = "SrdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: SRDS] -// * Routing :ref:`architecture overview ` - -// The Scoped Routes Discovery Service (SRDS) API distributes -// :ref:`ScopedRouteConfiguration` -// resources. Each ScopedRouteConfiguration resource represents a "routing -// scope" containing a mapping that allows the HTTP connection manager to -// dynamically assign a routing table (specified via a -// :ref:`RouteConfiguration` message) to each -// HTTP request. -service ScopedRoutesDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.ScopedRouteConfiguration"; - - rpc StreamScopedRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaScopedRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { - } - - rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:scoped-routes"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message SrdsDummy { -} diff --git a/generated_api_shadow/envoy/config/README.md b/generated_api_shadow/envoy/config/README.md deleted file mode 100644 index 279bd7c2e8525..0000000000000 --- a/generated_api_shadow/envoy/config/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Protocol buffer definitions for Envoy's bootstrap, filter, and service configuration. - -Visibility should be constrained to none or `//envoy/config/bootstrap/v2` by default. diff --git a/generated_api_shadow/envoy/config/accesslog/v2/BUILD b/generated_api_shadow/envoy/config/accesslog/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/accesslog/v2/als.proto b/generated_api_shadow/envoy/config/accesslog/v2/als.proto deleted file mode 100644 index 5b4106af106ed..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v2/als.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; - -package envoy.config.accesslog.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.grpc.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Configuration for the built-in *envoy.access_loggers.http_grpc* -// :ref:`AccessLog `. This configuration will -// populate :ref:`StreamAccessLogsMessage.http_logs -// `. -// [#extension: envoy.access_loggers.http_grpc] -message HttpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; - - // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers - // `. - repeated string additional_request_headers_to_log = 2; - - // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers - // `. - repeated string additional_response_headers_to_log = 3; - - // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers - // `. - repeated string additional_response_trailers_to_log = 4; -} - -// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will -// populate *StreamAccessLogsMessage.tcp_logs*. -// [#extension: envoy.access_loggers.tcp_grpc] -message TcpGrpcAccessLogConfig { - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; -} - -// Common configuration for gRPC access logs. -// [#next-free-field: 6] -message CommonGrpcAccessLogConfig { - // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier - // `. This allows the - // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The gRPC service for the access log service. - api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time - // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to - // 1 second. - google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; - - // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until - // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it - // to zero effectively disables the batching. Defaults to 16384. - google.protobuf.UInt32Value buffer_size_bytes = 4; - - // Additional filter state objects to log in :ref:`filter_state_objects - // `. - // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object. - repeated string filter_state_objects_to_log = 5; -} diff --git a/generated_api_shadow/envoy/config/accesslog/v2/file.proto b/generated_api_shadow/envoy/config/accesslog/v2/file.proto deleted file mode 100644 index 9b8671c81358e..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v2/file.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.config.accesslog.v2; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; -option java_outer_classname = "FileProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.file.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: File access log] -// [#extension: envoy.access_loggers.file] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* -// AccessLog. -message FileAccessLog { - // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof access_log_format { - // Access log :ref:`format string`. - // Envoy supports :ref:`custom access log formats ` as well as a - // :ref:`default format `. - string format = 2; - - // Access log :ref:`format dictionary`. All values - // are rendered as strings. - google.protobuf.Struct json_format = 3; - - // Access log :ref:`format dictionary`. Values are - // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may - // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the - // documentation for a specific command operator for details. - google.protobuf.Struct typed_json_format = 4; - } -} diff --git a/generated_api_shadow/envoy/config/accesslog/v3/BUILD b/generated_api_shadow/envoy/config/accesslog/v3/BUILD deleted file mode 100644 index af60e4e1966f5..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v3/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto deleted file mode 100644 index 2161f80478c23..0000000000000 --- a/generated_api_shadow/envoy/config/accesslog/v3/accesslog.proto +++ /dev/null @@ -1,327 +0,0 @@ -syntax = "proto3"; - -package envoy.config.accesslog.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.accesslog.v3"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common access log types] - -message AccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.AccessLog"; - - // The name of the access log extension to instantiate. - // The name must match one of the compiled in loggers. - // See the :ref:`extensions listed in typed_config below ` for the default list of available loggers. - string name = 1; - - // Filter which is used to determine if the access log needs to be written. - AccessLogFilter filter = 2; - - // Custom configuration that must be set according to the access logger extension being instantiated. - // [#extension-category: envoy.access_loggers] - oneof config_type { - google.protobuf.Any typed_config = 4; - - google.protobuf.Struct hidden_envoy_deprecated_config = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// [#next-free-field: 13] -message AccessLogFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.AccessLogFilter"; - - oneof filter_specifier { - option (validate.required) = true; - - // Status code filter. - StatusCodeFilter status_code_filter = 1; - - // Duration filter. - DurationFilter duration_filter = 2; - - // Not health check filter. - NotHealthCheckFilter not_health_check_filter = 3; - - // Traceable filter. - TraceableFilter traceable_filter = 4; - - // Runtime filter. - RuntimeFilter runtime_filter = 5; - - // And filter. - AndFilter and_filter = 6; - - // Or filter. - OrFilter or_filter = 7; - - // Header filter. - HeaderFilter header_filter = 8; - - // Response flag filter. - ResponseFlagFilter response_flag_filter = 9; - - // gRPC status filter. - GrpcStatusFilter grpc_status_filter = 10; - - // Extension filter. - ExtensionFilter extension_filter = 11; - - // Metadata Filter - MetadataFilter metadata_filter = 12; - } -} - -// Filter on an integer comparison. -message ComparisonFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.ComparisonFilter"; - - enum Op { - // = - EQ = 0; - - // >= - GE = 1; - - // <= - LE = 2; - } - - // Comparison operator. - Op op = 1 [(validate.rules).enum = {defined_only: true}]; - - // Value to compare against. - core.v3.RuntimeUInt32 value = 2; -} - -// Filters on HTTP response/status code. -message StatusCodeFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.StatusCodeFilter"; - - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters on total request duration in milliseconds. -message DurationFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.DurationFilter"; - - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters for requests that are not health check requests. A health check -// request is marked by the health check filter. -message NotHealthCheckFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.NotHealthCheckFilter"; -} - -// Filters for requests that are traceable. See the tracing overview for more -// information on how a request becomes traceable. -message TraceableFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.TraceableFilter"; -} - -// Filters for random sampling of requests. -message RuntimeFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.RuntimeFilter"; - - // Runtime key to get an optional overridden numerator for use in the - // *percent_sampled* field. If found in runtime, this value will replace the - // default numerator. - string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; - - // The default sampling percentage. If not specified, defaults to 0% with - // denominator of 100. - type.v3.FractionalPercent percent_sampled = 2; - - // By default, sampling pivots on the header - // :ref:`x-request-id` being - // present. If :ref:`x-request-id` - // is present, the filter will consistently sample across multiple hosts based - // on the runtime key value and the value extracted from - // :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will - // randomly sample based on the runtime key value alone. - // *use_independent_randomness* can be used for logging kill switches within - // complex nested :ref:`AndFilter - // ` and :ref:`OrFilter - // ` blocks that are easier to - // reason about from a probability perspective (i.e., setting to true will - // cause the filter to behave like an independent random variable when - // composed within logical operator filters). - bool use_independent_randomness = 3; -} - -// Performs a logical “and” operation on the result of each filter in filters. -// Filters are evaluated sequentially and if one of them returns false, the -// filter returns false immediately. -message AndFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.AndFilter"; - - repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// Performs a logical “or” operation on the result of each individual filter. -// Filters are evaluated sequentially and if one of them returns true, the -// filter returns true immediately. -message OrFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.OrFilter"; - - repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; -} - -// Filters requests based on the presence or value of a request header. -message HeaderFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.HeaderFilter"; - - // Only requests with a header which matches the specified HeaderMatcher will - // pass the filter check. - route.v3.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; -} - -// Filters requests that received responses with an Envoy response flag set. -// A list of the response flags can be found -// in the access log formatter -// :ref:`documentation`. -message ResponseFlagFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.ResponseFlagFilter"; - - // Only responses with the any of the flags listed in this field will be - // logged. This field is optional. If it is not specified, then any response - // flag will pass the filter check. - repeated string flags = 1 [(validate.rules).repeated = { - items { - string { - in: "LH" - in: "UH" - in: "UT" - in: "LR" - in: "UR" - in: "UF" - in: "UC" - in: "UO" - in: "NR" - in: "DI" - in: "FI" - in: "RL" - in: "UAEX" - in: "RLSE" - in: "DC" - in: "URX" - in: "SI" - in: "IH" - in: "DPE" - in: "UMSDR" - in: "RFCF" - in: "NFCF" - in: "DT" - in: "UPE" - in: "NC" - in: "OM" - } - } - }]; -} - -// Filters gRPC requests based on their response status. If a gRPC status is not -// provided, the filter will infer the status from the HTTP status code. -message GrpcStatusFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.GrpcStatusFilter"; - - enum Status { - OK = 0; - CANCELED = 1; - UNKNOWN = 2; - INVALID_ARGUMENT = 3; - DEADLINE_EXCEEDED = 4; - NOT_FOUND = 5; - ALREADY_EXISTS = 6; - PERMISSION_DENIED = 7; - RESOURCE_EXHAUSTED = 8; - FAILED_PRECONDITION = 9; - ABORTED = 10; - OUT_OF_RANGE = 11; - UNIMPLEMENTED = 12; - INTERNAL = 13; - UNAVAILABLE = 14; - DATA_LOSS = 15; - UNAUTHENTICATED = 16; - } - - // Logs only responses that have any one of the gRPC statuses in this field. - repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - - // If included and set to true, the filter will instead block all responses - // with a gRPC status or inferred gRPC status enumerated in statuses, and - // allow all other responses. - bool exclude = 2; -} - -// Filters based on matching dynamic metadata. -// If the matcher path and key correspond to an existing key in dynamic -// metadata, the request is logged only if the matcher value is equal to the -// metadata value. If the matcher path and key *do not* correspond to an -// existing key in dynamic metadata, the request is logged only if -// match_if_key_not_found is "true" or unset. -message MetadataFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.MetadataFilter"; - - // Matcher to check metadata for specified value. For example, to match on the - // access_log_hint metadata, set the filter to "envoy.common" and the path to - // "access_log_hint", and the value to "true". - type.matcher.v3.MetadataMatcher matcher = 1; - - // Default result if the key does not exist in dynamic metadata: if unset or - // true, then log; if false, then don't log. - google.protobuf.BoolValue match_if_key_not_found = 2; -} - -// Extension filter is statically registered at runtime. -message ExtensionFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.accesslog.v2.ExtensionFilter"; - - // The name of the filter implementation to instantiate. The name must - // match a statically registered filter. - string name = 1; - - // Custom configuration that depends on the filter being instantiated. - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} diff --git a/generated_api_shadow/envoy/config/bootstrap/v2/BUILD b/generated_api_shadow/envoy/config/bootstrap/v2/BUILD deleted file mode 100644 index 0c656d1a9c5a5..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v2/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/auth:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/config/metrics/v2:pkg", - "//envoy/config/overload/v2alpha:pkg", - "//envoy/config/trace/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto deleted file mode 100644 index 30c276f24276b..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v2/bootstrap.proto +++ /dev/null @@ -1,352 +0,0 @@ -syntax = "proto3"; - -package envoy.config.bootstrap.v2; - -import "envoy/api/v2/auth/secret.proto"; -import "envoy/api/v2/cluster.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/core/event_service_config.proto"; -import "envoy/api/v2/core/socket_option.proto"; -import "envoy/api/v2/listener.proto"; -import "envoy/config/metrics/v2/stats.proto"; -import "envoy/config/overload/v2alpha/overload.proto"; -import "envoy/config/trace/v2/http_tracer.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.bootstrap.v2"; -option java_outer_classname = "BootstrapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Bootstrap] -// This proto is supplied via the :option:`-c` CLI flag and acts as the root -// of the Envoy v2 configuration. See the :ref:`v2 configuration overview -// ` for more detail. - -// Bootstrap :ref:`configuration overview `. -// [#next-free-field: 21] -message Bootstrap { - message StaticResources { - // Static :ref:`Listeners `. These listeners are - // available regardless of LDS configuration. - repeated api.v2.Listener listeners = 1; - - // If a network based configuration source is specified for :ref:`cds_config - // `, it's necessary - // to have some initial cluster definitions available to allow Envoy to know - // how to speak to the management server. These cluster definitions may not - // use :ref:`EDS ` (i.e. they should be static - // IP or DNS-based). - repeated api.v2.Cluster clusters = 2; - - // These static secrets can be used by :ref:`SdsSecretConfig - // ` - repeated api.v2.auth.Secret secrets = 3; - } - - message DynamicResources { - reserved 4; - - // All :ref:`Listeners ` are provided by a single - // :ref:`LDS ` configuration source. - api.v2.core.ConfigSource lds_config = 1; - - // All post-bootstrap :ref:`Cluster ` definitions are - // provided by a single :ref:`CDS ` - // configuration source. - api.v2.core.ConfigSource cds_config = 2; - - // A single :ref:`ADS ` source may be optionally - // specified. This must have :ref:`api_type - // ` :ref:`GRPC - // `. Only - // :ref:`ConfigSources ` that have - // the :ref:`ads ` field set will be - // streamed on the ADS channel. - api.v2.core.ApiConfigSource ads_config = 3; - } - - reserved 10; - - // Node identity to present to the management server and for instance - // identification purposes (e.g. in generated headers). - api.v2.core.Node node = 1; - - // Statically specified resources. - StaticResources static_resources = 2; - - // xDS configuration sources. - DynamicResources dynamic_resources = 3; - - // Configuration for the cluster manager which owns all upstream clusters - // within the server. - ClusterManager cluster_manager = 4; - - // Health discovery service config option. - // (:ref:`core.ApiConfigSource `) - api.v2.core.ApiConfigSource hds_config = 14; - - // Optional file system path to search for startup flag files. - string flags_path = 5; - - // Optional set of stats sinks. - repeated metrics.v2.StatsSink stats_sinks = 6; - - // Configuration for internal processing of stats. - metrics.v2.StatsConfig stats_config = 13; - - // Optional duration between flushes to configured stats sinks. For - // performance reasons Envoy latches counters and only flushes counters and - // gauges at a periodic interval. If not specified the default is 5000ms (5 - // seconds). - // Duration must be at least 1ms and at most 5 min. - google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { - lt {seconds: 300} - gte {nanos: 1000000} - }]; - - // Optional watchdog configuration. - Watchdog watchdog = 8; - - // Configuration for an external tracing provider. - // - // .. attention:: - // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider - // `. - trace.v2.Tracing tracing = 9; - - // Configuration for the runtime configuration provider (deprecated). If not - // specified, a “null” provider will be used which will result in all defaults - // being used. - Runtime runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Configuration for the runtime configuration provider. If not - // specified, a “null” provider will be used which will result in all defaults - // being used. - LayeredRuntime layered_runtime = 17; - - // Configuration for the local administration HTTP server. - Admin admin = 12; - - // Optional overload manager configuration. - overload.v2alpha.OverloadManager overload_manager = 15; - - // Enable :ref:`stats for event dispatcher `, defaults to false. - // Note that this records a value for each iteration of the event loop on every thread. This - // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value - // over the wire individually because the statsd protocol doesn't have any way to represent a - // histogram summary. Be aware that this can be a very large volume of data. - bool enable_dispatcher_stats = 16; - - // Optional string which will be used in lieu of x-envoy in prefixing headers. - // - // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be - // transformed into x-foo-retry-on etc. - // - // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the - // headers Envoy will trust for core code and core extensions only. Be VERY careful making - // changes to this string, especially in multi-layer Envoy deployments or deployments using - // extensions which are not upstream. - string header_prefix = 18; - - // Optional proxy version which will be used to set the value of :ref:`server.version statistic - // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. - google.protobuf.UInt64Value stats_server_version_override = 19; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // This may be overridden on a per-cluster basis in cds_config, - // when :ref:`dns_resolvers ` and - // :ref:`use_tcp_for_dns_lookups ` are - // specified. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - bool use_tcp_for_dns_lookups = 20; -} - -// Administration interface :ref:`operations documentation -// `. -message Admin { - // The path to write the access log for the administration server. If no - // access log is desired specify ‘/dev/null’. This is only required if - // :ref:`address ` is set. - string access_log_path = 1; - - // The cpu profiler output path for the administration server. If no profile - // path is specified, the default is ‘/var/log/envoy/envoy.prof’. - string profile_path = 2; - - // The TCP address that the administration server will listen on. - // If not specified, Envoy will not start an administration server. - api.v2.core.Address address = 3; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated api.v2.core.SocketOption socket_options = 4; -} - -// Cluster manager :ref:`architecture overview `. -message ClusterManager { - message OutlierDetection { - // Specifies the path to the outlier event log. - string event_log_path = 1; - - // [#not-implemented-hide:] - // The gRPC service for the outlier detection event service. - // If empty, outlier detection events won't be sent to a remote endpoint. - api.v2.core.EventServiceConfig event_service = 2; - } - - // Name of the local cluster (i.e., the cluster that owns the Envoy running - // this configuration). In order to enable :ref:`zone aware routing - // ` this option must be set. - // If *local_cluster_name* is defined then :ref:`clusters - // ` must be defined in the :ref:`Bootstrap - // static cluster resources - // `. This is unrelated to - // the :option:`--service-cluster` option which does not `affect zone aware - // routing `_. - string local_cluster_name = 1; - - // Optional global configuration for outlier detection. - OutlierDetection outlier_detection = 2; - - // Optional configuration used to bind newly established upstream connections. - // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. - api.v2.core.BindConfig upstream_bind_config = 3; - - // A management server endpoint to stream load stats to via - // *StreamLoadStats*. This must have :ref:`api_type - // ` :ref:`GRPC - // `. - api.v2.core.ApiConfigSource load_stats_config = 4; -} - -// Envoy process watchdog configuration. When configured, this monitors for -// nonresponsive threads and kills the process after the configured thresholds. -// See the :ref:`watchdog documentation ` for more information. -message Watchdog { - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_miss* statistic. If not specified the default is 200ms. - google.protobuf.Duration miss_timeout = 1; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_mega_miss* statistic. If not specified the default is - // 1000ms. - google.protobuf.Duration megamiss_timeout = 2; - - // If a watched thread has been nonresponsive for this duration, assume a - // programming error and kill the entire Envoy process. Set to 0 to disable - // kill behavior. If not specified the default is 0 (disabled). - google.protobuf.Duration kill_timeout = 3; - - // If at least two watched threads have been nonresponsive for at least this - // duration assume a true deadlock and kill the entire Envoy process. Set to 0 - // to disable this behavior. If not specified the default is 0 (disabled). - google.protobuf.Duration multikill_timeout = 4; -} - -// Runtime :ref:`configuration overview ` (deprecated). -message Runtime { - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. Envoy - // will watch the location for changes and reload the file system tree when - // they happen. If this parameter is not set, there will be no disk based - // runtime. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 2; - - // Specifies an optional subdirectory to load within the root directory. If - // specified and the directory exists, configuration values within this - // directory will override those found in the primary subdirectory. This is - // useful when Envoy is deployed across many different types of servers. - // Sometimes it is useful to have a per service cluster directory for runtime - // configuration. See below for exactly how the override directory is used. - string override_subdirectory = 3; - - // Static base runtime. This will be :ref:`overridden - // ` by other runtime layers, e.g. - // disk or admin. This follows the :ref:`runtime protobuf JSON representation - // encoding `. - google.protobuf.Struct base = 4; -} - -// [#next-free-field: 6] -message RuntimeLayer { - // :ref:`Disk runtime ` layer. - message DiskLayer { - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. - // Envoy will watch the location for changes and reload the file system tree - // when they happen. See documentation on runtime :ref:`atomicity - // ` for further details on how reloads are - // treated. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 3; - - // :ref:`Append ` the - // service cluster to the path under symlink root. - bool append_service_cluster = 2; - } - - // :ref:`Admin console runtime ` layer. - message AdminLayer { - } - - // :ref:`Runtime Discovery Service (RTDS) ` layer. - message RtdsLayer { - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; - - // RTDS configuration source. - api.v2.core.ConfigSource rtds_config = 2; - } - - // Descriptive name for the runtime layer. This is only used for the runtime - // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof layer_specifier { - option (validate.required) = true; - - // :ref:`Static runtime ` layer. - // This follows the :ref:`runtime protobuf JSON representation encoding - // `. Unlike static xDS resources, this static - // layer is overridable by later layers in the runtime virtual filesystem. - google.protobuf.Struct static_layer = 2; - - DiskLayer disk_layer = 3; - - AdminLayer admin_layer = 4; - - RtdsLayer rtds_layer = 5; - } -} - -// Runtime :ref:`configuration overview `. -message LayeredRuntime { - // The :ref:`layers ` of the runtime. This is ordered - // such that later layers in the list overlay earlier entries. - repeated RuntimeLayer layers = 1; -} diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD b/generated_api_shadow/envoy/config/bootstrap/v3/BUILD deleted file mode 100644 index 48e8fb522c993..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v3/BUILD +++ /dev/null @@ -1,22 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/bootstrap/v2:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/listener/v3:pkg", - "//envoy/config/metrics/v3:pkg", - "//envoy/config/overload/v3:pkg", - "//envoy/config/trace/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto deleted file mode 100644 index 9171d066a4302..0000000000000 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ /dev/null @@ -1,648 +0,0 @@ -syntax = "proto3"; - -package envoy.config.bootstrap.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/cluster/v3/cluster.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/event_service_config.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/resolver.proto"; -import "envoy/config/core/v3/socket_option.proto"; -import "envoy/config/listener/v3/listener.proto"; -import "envoy/config/metrics/v3/stats.proto"; -import "envoy/config/overload/v3/overload.proto"; -import "envoy/config/trace/v3/http_tracer.proto"; -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.bootstrap.v3"; -option java_outer_classname = "BootstrapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Bootstrap] -// This proto is supplied via the :option:`-c` CLI flag and acts as the root -// of the Envoy v3 configuration. See the :ref:`v3 configuration overview -// ` for more detail. - -// Bootstrap :ref:`configuration overview `. -// [#next-free-field: 33] -message Bootstrap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.Bootstrap"; - - message StaticResources { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.Bootstrap.StaticResources"; - - // Static :ref:`Listeners `. These listeners are - // available regardless of LDS configuration. - repeated listener.v3.Listener listeners = 1; - - // If a network based configuration source is specified for :ref:`cds_config - // `, it's necessary - // to have some initial cluster definitions available to allow Envoy to know - // how to speak to the management server. These cluster definitions may not - // use :ref:`EDS ` (i.e. they should be static - // IP or DNS-based). - repeated cluster.v3.Cluster clusters = 2; - - // These static secrets can be used by :ref:`SdsSecretConfig - // ` - repeated envoy.extensions.transport_sockets.tls.v3.Secret secrets = 3; - } - - // [#next-free-field: 7] - message DynamicResources { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.Bootstrap.DynamicResources"; - - reserved 4; - - // All :ref:`Listeners ` are provided by a single - // :ref:`LDS ` configuration source. - core.v3.ConfigSource lds_config = 1; - - // xdstp:// resource locator for listener collection. - // [#not-implemented-hide:] - string lds_resources_locator = 5; - - // All post-bootstrap :ref:`Cluster ` definitions are - // provided by a single :ref:`CDS ` - // configuration source. - core.v3.ConfigSource cds_config = 2; - - // xdstp:// resource locator for cluster collection. - // [#not-implemented-hide:] - string cds_resources_locator = 6; - - // A single :ref:`ADS ` source may be optionally - // specified. This must have :ref:`api_type - // ` :ref:`GRPC - // `. Only - // :ref:`ConfigSources ` that have - // the :ref:`ads ` field set will be - // streamed on the ADS channel. - core.v3.ApiConfigSource ads_config = 3; - } - - reserved 10; - - // Node identity to present to the management server and for instance - // identification purposes (e.g. in generated headers). - core.v3.Node node = 1; - - // A list of :ref:`Node ` field names - // that will be included in the context parameters of the effective - // xdstp:// URL that is sent in a discovery request when resource - // locators are used for LDS/CDS. Any non-string field will have its JSON - // encoding set as the context parameter value, with the exception of - // metadata, which will be flattened (see example below). The supported field - // names are: - // - "cluster" - // - "id" - // - "locality.region" - // - "locality.sub_zone" - // - "locality.zone" - // - "metadata" - // - "user_agent_build_version.metadata" - // - "user_agent_build_version.version" - // - "user_agent_name" - // - "user_agent_version" - // - // The node context parameters act as a base layer dictionary for the context - // parameters (i.e. more specific resource specific context parameters will - // override). Field names will be prefixed with “udpa.node.” when included in - // context parameters. - // - // For example, if node_context_params is ``["user_agent_name", "metadata"]``, - // the implied context parameters might be:: - // - // node.user_agent_name: "envoy" - // node.metadata.foo: "{\"bar\": \"baz\"}" - // node.metadata.some: "42" - // node.metadata.thing: "\"thing\"" - // - // [#not-implemented-hide:] - repeated string node_context_params = 26; - - // Statically specified resources. - StaticResources static_resources = 2; - - // xDS configuration sources. - DynamicResources dynamic_resources = 3; - - // Configuration for the cluster manager which owns all upstream clusters - // within the server. - ClusterManager cluster_manager = 4; - - // Health discovery service config option. - // (:ref:`core.ApiConfigSource `) - core.v3.ApiConfigSource hds_config = 14; - - // Optional file system path to search for startup flag files. - string flags_path = 5; - - // Optional set of stats sinks. - repeated metrics.v3.StatsSink stats_sinks = 6; - - // Configuration for internal processing of stats. - metrics.v3.StatsConfig stats_config = 13; - - // Optional duration between flushes to configured stats sinks. For - // performance reasons Envoy latches counters and only flushes counters and - // gauges at a periodic interval. If not specified the default is 5000ms (5 - // seconds). Only one of `stats_flush_interval` or `stats_flush_on_admin` - // can be set. - // Duration must be at least 1ms and at most 5 min. - google.protobuf.Duration stats_flush_interval = 7 [ - (validate.rules).duration = { - lt {seconds: 300} - gte {nanos: 1000000} - }, - (udpa.annotations.field_migrate).oneof_promotion = "stats_flush" - ]; - - oneof stats_flush { - // Flush stats to sinks only when queried for on the admin interface. If set, - // a flush timer is not created. Only one of `stats_flush_on_admin` or - // `stats_flush_interval` can be set. - bool stats_flush_on_admin = 29 [(validate.rules).bool = {const: true}]; - } - - // Optional watchdog configuration. - // This is for a single watchdog configuration for the entire system. - // Deprecated in favor of *watchdogs* which has finer granularity. - Watchdog watchdog = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional watchdogs configuration. - // This is used for specifying different watchdogs for the different subsystems. - // [#extension-category: envoy.guarddog_actions] - Watchdogs watchdogs = 27; - - // Configuration for an external tracing provider. - // - // .. attention:: - // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider - // `. - trace.v3.Tracing tracing = 9 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Configuration for the runtime configuration provider. If not - // specified, a “null” provider will be used which will result in all defaults - // being used. - LayeredRuntime layered_runtime = 17; - - // Configuration for the local administration HTTP server. - Admin admin = 12; - - // Optional overload manager configuration. - overload.v3.OverloadManager overload_manager = 15 [ - (udpa.annotations.security).configure_for_untrusted_downstream = true, - (udpa.annotations.security).configure_for_untrusted_upstream = true - ]; - - // Enable :ref:`stats for event dispatcher `, defaults to false. - // Note that this records a value for each iteration of the event loop on every thread. This - // should normally be minimal overhead, but when using - // :ref:`statsd `, it will send each observed value - // over the wire individually because the statsd protocol doesn't have any way to represent a - // histogram summary. Be aware that this can be a very large volume of data. - bool enable_dispatcher_stats = 16; - - // Optional string which will be used in lieu of x-envoy in prefixing headers. - // - // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be - // transformed into x-foo-retry-on etc. - // - // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the - // headers Envoy will trust for core code and core extensions only. Be VERY careful making - // changes to this string, especially in multi-layer Envoy deployments or deployments using - // extensions which are not upstream. - string header_prefix = 18; - - // Optional proxy version which will be used to set the value of :ref:`server.version statistic - // ` if specified. Envoy will not process this value, it will be sent as is to - // :ref:`stats sinks `. - google.protobuf.UInt64Value stats_server_version_override = 19; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // This may be overridden on a per-cluster basis in cds_config, - // when :ref:`dns_resolvers ` and - // :ref:`use_tcp_for_dns_lookups ` are - // specified. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - bool use_tcp_for_dns_lookups = 20 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // This may be overridden on a per-cluster basis in cds_config, when - // :ref:`dns_resolution_config ` - // is specified. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v3.DnsResolutionConfig dns_resolution_config = 30; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - core.v3.TypedExtensionConfig typed_dns_resolver_config = 31; - - // Specifies optional bootstrap extensions to be instantiated at startup time. - // Each item contains extension specific configuration. - // [#extension-category: envoy.bootstrap] - repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21; - - // Specifies optional extensions instantiated at startup time and - // invoked during crash time on the request that caused the crash. - repeated FatalAction fatal_actions = 28; - - // Configuration sources that will participate in - // xdstp:// URL authority resolution. The algorithm is as - // follows: - // 1. The authority field is taken from the xdstp:// URL, call - // this *resource_authority*. - // 2. *resource_authority* is compared against the authorities in any peer - // *ConfigSource*. The peer *ConfigSource* is the configuration source - // message which would have been used unconditionally for resolution - // with opaque resource names. If there is a match with an authority, the - // peer *ConfigSource* message is used. - // 3. *resource_authority* is compared sequentially with the authorities in - // each configuration source in *config_sources*. The first *ConfigSource* - // to match wins. - // 4. As a fallback, if no configuration source matches, then - // *default_config_source* is used. - // 5. If *default_config_source* is not specified, resolution fails. - // [#not-implemented-hide:] - repeated core.v3.ConfigSource config_sources = 22; - - // Default configuration source for xdstp:// URLs if all - // other resolution fails. - // [#not-implemented-hide:] - core.v3.ConfigSource default_config_source = 23; - - // Optional overriding of default socket interface. The value must be the name of one of the - // socket interface factories initialized through a bootstrap extension - string default_socket_interface = 24; - - // Global map of CertificateProvider instances. These instances are referred to by name in the - // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name - // ` - // field. - // [#not-implemented-hide:] - map certificate_provider_instances = 25; - - // Specifies a set of headers that need to be registered as inline header. This configuration - // allows users to customize the inline headers on-demand at Envoy startup without modifying - // Envoy's source code. - // - // Note that the 'set-cookie' header cannot be registered as inline header. - repeated CustomInlineHeader inline_headers = 32; - - Runtime hidden_envoy_deprecated_runtime = 11 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// Administration interface :ref:`operations documentation -// `. -// [#next-free-field: 6] -message Admin { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Admin"; - - // Configuration for :ref:`access logs ` - // emitted by the administration server. - repeated accesslog.v3.AccessLog access_log = 5; - - // The path to write the access log for the administration server. If no - // access log is desired specify ‘/dev/null’. This is only required if - // :ref:`address ` is set. - // Deprecated in favor of *access_log* which offers more options. - string access_log_path = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The cpu profiler output path for the administration server. If no profile - // path is specified, the default is ‘/var/log/envoy/envoy.prof’. - string profile_path = 2; - - // The TCP address that the administration server will listen on. - // If not specified, Envoy will not start an administration server. - core.v3.Address address = 3; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.v3.SocketOption socket_options = 4; -} - -// Cluster manager :ref:`architecture overview `. -message ClusterManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.ClusterManager"; - - message OutlierDetection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.ClusterManager.OutlierDetection"; - - // Specifies the path to the outlier event log. - string event_log_path = 1; - - // [#not-implemented-hide:] - // The gRPC service for the outlier detection event service. - // If empty, outlier detection events won't be sent to a remote endpoint. - core.v3.EventServiceConfig event_service = 2; - } - - // Name of the local cluster (i.e., the cluster that owns the Envoy running - // this configuration). In order to enable :ref:`zone aware routing - // ` this option must be set. - // If *local_cluster_name* is defined then :ref:`clusters - // ` must be defined in the :ref:`Bootstrap - // static cluster resources - // `. This is unrelated to - // the :option:`--service-cluster` option which does not `affect zone aware - // routing `_. - string local_cluster_name = 1; - - // Optional global configuration for outlier detection. - OutlierDetection outlier_detection = 2; - - // Optional configuration used to bind newly established upstream connections. - // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. - core.v3.BindConfig upstream_bind_config = 3; - - // A management server endpoint to stream load stats to via - // *StreamLoadStats*. This must have :ref:`api_type - // ` :ref:`GRPC - // `. - core.v3.ApiConfigSource load_stats_config = 4; -} - -// Allows you to specify different watchdog configs for different subsystems. -// This allows finer tuned policies for the watchdog. If a subsystem is omitted -// the default values for that system will be used. -message Watchdogs { - // Watchdog for the main thread. - Watchdog main_thread_watchdog = 1; - - // Watchdog for the worker threads. - Watchdog worker_watchdog = 2; -} - -// Envoy process watchdog configuration. When configured, this monitors for -// nonresponsive threads and kills the process after the configured thresholds. -// See the :ref:`watchdog documentation ` for more information. -// [#next-free-field: 8] -message Watchdog { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog"; - - message WatchdogAction { - // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS. - // Within an event type, actions execute in the order they are configured. - // For KILL/MULTIKILL there is a default PANIC that will run after the - // registered actions and kills the process if it wasn't already killed. - // It might be useful to specify several debug actions, and possibly an - // alternate FATAL action. - enum WatchdogEvent { - UNKNOWN = 0; - KILL = 1; - MULTIKILL = 2; - MEGAMISS = 3; - MISS = 4; - } - - // Extension specific configuration for the action. - core.v3.TypedExtensionConfig config = 1; - - WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // Register actions that will fire on given WatchDog events. - // See *WatchDogAction* for priority of events. - repeated WatchdogAction actions = 7; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_miss* statistic. If not specified the default is 200ms. - google.protobuf.Duration miss_timeout = 1; - - // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_mega_miss* statistic. If not specified the default is - // 1000ms. - google.protobuf.Duration megamiss_timeout = 2; - - // If a watched thread has been nonresponsive for this duration, assume a - // programming error and kill the entire Envoy process. Set to 0 to disable - // kill behavior. If not specified the default is 0 (disabled). - google.protobuf.Duration kill_timeout = 3; - - // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is - // enabled. Enabling this feature would help to reduce risk of synchronized - // watchdog kill events across proxies due to external triggers. Set to 0 to - // disable. If not specified the default is 0 (disabled). - google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; - - // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) - // threads have been nonresponsive for at least this duration kill the entire - // Envoy process. Set to 0 to disable this behavior. If not specified the - // default is 0 (disabled). - google.protobuf.Duration multikill_timeout = 4; - - // Sets the threshold for *multikill_timeout* in terms of the percentage of - // nonresponsive threads required for the *multikill_timeout*. - // If not specified the default is 0. - type.v3.Percent multikill_threshold = 5; -} - -// Fatal actions to run while crashing. Actions can be safe (meaning they are -// async-signal safe) or unsafe. We run all safe actions before we run unsafe actions. -// If using an unsafe action that could get stuck or deadlock, it important to -// have an out of band system to terminate the process. -// -// The interface for the extension is ``Envoy::Server::Configuration::FatalAction``. -// *FatalAction* extensions live in the ``envoy.extensions.fatal_actions`` API -// namespace. -message FatalAction { - // Extension specific configuration for the action. It's expected to conform - // to the ``Envoy::Server::Configuration::FatalAction`` interface. - core.v3.TypedExtensionConfig config = 1; -} - -// Runtime :ref:`configuration overview ` (deprecated). -message Runtime { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Runtime"; - - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. Envoy - // will watch the location for changes and reload the file system tree when - // they happen. If this parameter is not set, there will be no disk based - // runtime. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 2; - - // Specifies an optional subdirectory to load within the root directory. If - // specified and the directory exists, configuration values within this - // directory will override those found in the primary subdirectory. This is - // useful when Envoy is deployed across many different types of servers. - // Sometimes it is useful to have a per service cluster directory for runtime - // configuration. See below for exactly how the override directory is used. - string override_subdirectory = 3; - - // Static base runtime. This will be :ref:`overridden - // ` by other runtime layers, e.g. - // disk or admin. This follows the :ref:`runtime protobuf JSON representation - // encoding `. - google.protobuf.Struct base = 4; -} - -// [#next-free-field: 6] -message RuntimeLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.RuntimeLayer"; - - // :ref:`Disk runtime ` layer. - message DiskLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.RuntimeLayer.DiskLayer"; - - // The implementation assumes that the file system tree is accessed via a - // symbolic link. An atomic link swap is used when a new tree should be - // switched to. This parameter specifies the path to the symbolic link. - // Envoy will watch the location for changes and reload the file system tree - // when they happen. See documentation on runtime :ref:`atomicity - // ` for further details on how reloads are - // treated. - string symlink_root = 1; - - // Specifies the subdirectory to load within the root directory. This is - // useful if multiple systems share the same delivery mechanism. Envoy - // configuration elements can be contained in a dedicated subdirectory. - string subdirectory = 3; - - // :ref:`Append ` the - // service cluster to the path under symlink root. - bool append_service_cluster = 2; - } - - // :ref:`Admin console runtime ` layer. - message AdminLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.RuntimeLayer.AdminLayer"; - } - - // :ref:`Runtime Discovery Service (RTDS) ` layer. - message RtdsLayer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer"; - - // Resource to subscribe to at *rtds_config* for the RTDS layer. - string name = 1; - - // RTDS configuration source. - core.v3.ConfigSource rtds_config = 2; - } - - // Descriptive name for the runtime layer. This is only used for the runtime - // :http:get:`/runtime` output. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof layer_specifier { - option (validate.required) = true; - - // :ref:`Static runtime ` layer. - // This follows the :ref:`runtime protobuf JSON representation encoding - // `. Unlike static xDS resources, this static - // layer is overridable by later layers in the runtime virtual filesystem. - google.protobuf.Struct static_layer = 2; - - DiskLayer disk_layer = 3; - - AdminLayer admin_layer = 4; - - RtdsLayer rtds_layer = 5; - } -} - -// Runtime :ref:`configuration overview `. -message LayeredRuntime { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.bootstrap.v2.LayeredRuntime"; - - // The :ref:`layers ` of the runtime. This is ordered - // such that later layers in the list overlay earlier entries. - repeated RuntimeLayer layers = 1; -} - -// Used to specify the header that needs to be registered as an inline header. -// -// If request or response contain multiple headers with the same name and the header -// name is registered as an inline header. Then multiple headers will be folded -// into one, and multiple header values will be concatenated by a suitable delimiter. -// The delimiter is generally a comma. -// -// For example, if 'foo' is registered as an inline header, and the headers contains -// the following two headers: -// -// .. code-block:: text -// -// foo: bar -// foo: eep -// -// Then they will eventually be folded into: -// -// .. code-block:: text -// -// foo: bar, eep -// -// Inline headers provide O(1) search performance, but each inline header imposes -// an additional memory overhead on all instances of the corresponding type of -// HeaderMap or TrailerMap. -message CustomInlineHeader { - enum InlineHeaderType { - REQUEST_HEADER = 0; - REQUEST_TRAILER = 1; - RESPONSE_HEADER = 2; - RESPONSE_TRAILER = 3; - } - - // The name of the header that is expected to be set as the inline header. - string inline_header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The type of the header that is expected to be set as the inline header. - InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/BUILD b/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto deleted file mode 100644 index a0fdadd75724c..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/aggregate/v2alpha/cluster.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.aggregate.v2alpha; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.aggregate.v2alpha"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.clusters.aggregate.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Aggregate cluster configuration] - -// Configuration for the aggregate cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.aggregate] -message ClusterConfig { - // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they - // appear in this list. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD b/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD deleted file mode 100644 index 25c228fd56093..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto deleted file mode 100644 index 33f5ffe057e3a..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.dynamic_forward_proxy.v2alpha; - -import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.clusters.dynamic_forward_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamic forward proxy cluster configuration] - -// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.dynamic_forward_proxy] -message ClusterConfig { - // The DNS cache configuration that the cluster will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy HTTP filter configuration - // `. - common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/cluster/redis/BUILD b/generated_api_shadow/envoy/config/cluster/redis/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/redis/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto b/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto deleted file mode 100644 index abe88f76a6ff8..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/redis/redis_cluster.proto +++ /dev/null @@ -1,81 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.redis; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.redis"; -option java_outer_classname = "RedisClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Redis Cluster Configuration] -// This cluster adds support for `Redis Cluster `_, as part -// of :ref:`Envoy's support for Redis Cluster `. -// -// Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its primary fails over to a replica, and designates it as the new primary). -// However, as there is no unified frontend or proxy service in front of Redis Cluster, the client -// (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the -// topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS -// command `_. This result is then stored locally, and -// updated at user-configured intervals. -// -// Additionally, if -// :ref:`enable_redirection` -// is true, then moved and ask redirection errors from upstream servers will trigger a topology -// refresh when they exceed a user-configured error threshold. -// -// Example: -// -// .. code-block:: yaml -// -// name: name -// connect_timeout: 0.25s -// dns_lookup_family: V4_ONLY -// hosts: -// - socket_address: -// address: foo.bar.com -// port_value: 22120 -// cluster_type: -// name: envoy.clusters.redis -// typed_config: -// "@type": type.googleapis.com/google.protobuf.Struct -// value: -// cluster_refresh_rate: 30s -// cluster_refresh_timeout: 0.5s -// redirect_refresh_interval: 10s -// redirect_refresh_threshold: 10 -// [#extension: envoy.clusters.redis] - -// [#next-free-field: 7] -message RedisClusterConfig { - // Interval between successive topology refresh requests. If not set, this defaults to 5s. - google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}]; - - // Timeout for topology refresh request. If not set, this defaults to 3s. - google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}]; - - // The minimum interval that must pass after triggering a topology refresh request before a new - // request can possibly be triggered again. Any errors received during one of these - // time intervals are ignored. If not set, this defaults to 5s. - google.protobuf.Duration redirect_refresh_interval = 3; - - // The number of redirection errors that must be received before - // triggering a topology refresh request. If not set, this defaults to 5. - // If this is set to 0, topology refresh after redirect is disabled. - google.protobuf.UInt32Value redirect_refresh_threshold = 4; - - // The number of failures that must be received before triggering a topology refresh request. - // If not set, this defaults to 0, which disables the topology refresh due to failure. - uint32 failure_refresh_threshold = 5; - - // The number of hosts became degraded or unhealthy before triggering a topology refresh request. - // If not set, this defaults to 0, which disables the topology refresh due to degraded or - // unhealthy host. - uint32 host_degraded_refresh_threshold = 6; -} diff --git a/generated_api_shadow/envoy/config/cluster/v3/BUILD b/generated_api_shadow/envoy/config/cluster/v3/BUILD deleted file mode 100644 index 53f05bbbd9eba..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/BUILD +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/cluster:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto b/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto deleted file mode 100644 index 82cd329b91a72..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/circuit_breaker.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v3"; -option java_outer_classname = "CircuitBreakerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Circuit breakers] - -// :ref:`Circuit breaking` settings can be -// specified individually for each defined priority. -message CircuitBreakers { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.cluster.CircuitBreakers"; - - // A Thresholds defines CircuitBreaker settings for a - // :ref:`RoutingPriority`. - // [#next-free-field: 9] - message Thresholds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.cluster.CircuitBreakers.Thresholds"; - - message RetryBudget { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.cluster.CircuitBreakers.Thresholds.RetryBudget"; - - // Specifies the limit on concurrent retries as a percentage of the sum of active requests and - // active pending requests. For example, if there are 100 active requests and the - // budget_percent is set to 25, there may be 25 active retries. - // - // This parameter is optional. Defaults to 20%. - type.v3.Percent budget_percent = 1; - - // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the - // number of active retries may never go below this number. - // - // This parameter is optional. Defaults to 3. - google.protobuf.UInt32Value min_retry_concurrency = 2; - } - - // The :ref:`RoutingPriority` - // the specified CircuitBreaker settings apply to. - core.v3.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; - - // The maximum number of connections that Envoy will make to the upstream - // cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_connections = 2; - - // The maximum number of pending requests that Envoy will allow to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 3; - - // The maximum number of parallel requests that Envoy will make to the - // upstream cluster. If not specified, the default is 1024. - google.protobuf.UInt32Value max_requests = 4; - - // The maximum number of parallel retries that Envoy will allow to the - // upstream cluster. If not specified, the default is 3. - google.protobuf.UInt32Value max_retries = 5; - - // Specifies a limit on concurrent retries in relation to the number of active requests. This - // parameter is optional. - // - // .. note:: - // - // If this field is set, the retry budget will override any configured retry circuit - // breaker. - RetryBudget retry_budget = 8; - - // If track_remaining is true, then stats will be published that expose - // the number of resources remaining until the circuit breakers open. If - // not specified, the default is false. - // - // .. note:: - // - // If a retry budget is used in lieu of the max_retries circuit breaker, - // the remaining retry resources remaining will not be tracked. - bool track_remaining = 6; - - // The maximum number of connection pools per cluster that Envoy will concurrently support at - // once. If not specified, the default is unlimited. Set this for clusters which create a - // large number of connection pools. See - // :ref:`Circuit Breaking ` for - // more details. - google.protobuf.UInt32Value max_connection_pools = 7; - } - - // If multiple :ref:`Thresholds` - // are defined with the same :ref:`RoutingPriority`, - // the first one in the list is used. If no Thresholds is defined for a given - // :ref:`RoutingPriority`, the default values - // are used. - repeated Thresholds thresholds = 1; -} diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto deleted file mode 100644 index 2e40700c3ace7..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ /dev/null @@ -1,1163 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v3; - -import "envoy/config/cluster/v3/circuit_breaker.proto"; -import "envoy/config/cluster/v3/filter.proto"; -import "envoy/config/cluster/v3/outlier_detection.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/health_check.proto"; -import "envoy/config/core/v3/protocol.proto"; -import "envoy/config/core/v3/resolver.proto"; -import "envoy/config/endpoint/v3/endpoint.proto"; -import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/collection_entry.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v3"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Cluster configuration] - -// Cluster list collections. Entries are *Cluster* resources or references. -// [#not-implemented-hide:] -message ClusterCollection { - xds.core.v3.CollectionEntry entries = 1; -} - -// Configuration for a single upstream cluster. -// [#next-free-field: 56] -message Cluster { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; - - // Refer to :ref:`service discovery type ` - // for an explanation on each type. - enum DiscoveryType { - // Refer to the :ref:`static discovery type` - // for an explanation. - STATIC = 0; - - // Refer to the :ref:`strict DNS discovery - // type` - // for an explanation. - STRICT_DNS = 1; - - // Refer to the :ref:`logical DNS discovery - // type` - // for an explanation. - LOGICAL_DNS = 2; - - // Refer to the :ref:`service discovery type` - // for an explanation. - EDS = 3; - - // Refer to the :ref:`original destination discovery - // type` - // for an explanation. - ORIGINAL_DST = 4; - } - - // Refer to :ref:`load balancer type ` architecture - // overview section for information on each type. - enum LbPolicy { - // Refer to the :ref:`round robin load balancing - // policy` - // for an explanation. - ROUND_ROBIN = 0; - - // Refer to the :ref:`least request load balancing - // policy` - // for an explanation. - LEAST_REQUEST = 1; - - // Refer to the :ref:`ring hash load balancing - // policy` - // for an explanation. - RING_HASH = 2; - - // Refer to the :ref:`random load balancing - // policy` - // for an explanation. - RANDOM = 3; - - // Refer to the :ref:`Maglev load balancing policy` - // for an explanation. - MAGLEV = 5; - - // This load balancer type must be specified if the configured cluster provides a cluster - // specific load balancer. Consult the configured cluster's documentation for whether to set - // this option or not. - CLUSTER_PROVIDED = 6; - - // Use the new :ref:`load_balancing_policy - // ` field to determine the LB policy. - // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - // and instead using the new load_balancing_policy field as the one and only mechanism for - // configuring this.] - LOAD_BALANCING_POLICY_CONFIG = 7; - - hidden_envoy_deprecated_ORIGINAL_DST_LB = 4 [ - deprecated = true, - (envoy.annotations.disallowed_by_default_enum) = true, - (envoy.annotations.deprecated_at_minor_version_enum) = "3.0" - ]; - } - - // When V4_ONLY is selected, the DNS resolver will only perform a lookup for - // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will - // only perform a lookup for addresses in the IPv6 family. If AUTO is - // specified, the DNS resolver will first perform a lookup for addresses in - // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. - // For cluster types other than - // :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS`, - // this setting is - // ignored. - enum DnsLookupFamily { - AUTO = 0; - V4_ONLY = 1; - V6_ONLY = 2; - } - - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - - // TransportSocketMatch specifies what transport socket config will be used - // when the match conditions are satisfied. - message TransportSocketMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.TransportSocketMatch"; - - // The name of the match, used in stats generation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria. - // The connection to the endpoint with metadata matching what is set in this field - // will use the transport socket configuration specified here. - // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match - // against the values specified in this field. - google.protobuf.Struct match = 2; - - // The configuration of the transport socket. - // [#extension-category: envoy.transport_sockets.upstream] - core.v3.TransportSocket transport_socket = 3; - } - - // Extended cluster type. - message CustomClusterType { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CustomClusterType"; - - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - // [#extension-category: envoy.clusters] - google.protobuf.Any typed_config = 2; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.EdsClusterConfig"; - - // Configuration for the source of EDS updates for this Cluster. - core.v3.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. This may be a xdstp:// URL. - string service_name = 2; - } - - // Optionally divide the endpoints in this cluster into subsets defined by - // endpoint metadata and selected by route and weighted cluster metadata. - // [#next-free-field: 8] - message LbSubsetConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.LbSubsetConfig"; - - // If NO_FALLBACK is selected, a result - // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, - // any cluster endpoint may be returned (subject to policy, health checks, - // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - enum LbSubsetFallbackPolicy { - NO_FALLBACK = 0; - ANY_ENDPOINT = 1; - DEFAULT_SUBSET = 2; - } - - // Specifications for subsets. - message LbSubsetSelector { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.LbSubsetConfig.LbSubsetSelector"; - - // Allows to override top level fallback policy per selector. - enum LbSubsetSelectorFallbackPolicy { - // If NOT_DEFINED top level config fallback policy is used instead. - NOT_DEFINED = 0; - - // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. - NO_FALLBACK = 1; - - // If ANY_ENDPOINT is selected, any cluster endpoint may be returned - // (subject to policy, health checks, etc). - ANY_ENDPOINT = 2; - - // If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - DEFAULT_SUBSET = 3; - - // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata - // keys reduced to - // :ref:`fallback_keys_subset`. - // It allows for a fallback to a different, less specific selector if some of the keys of - // the selector are considered optional. - KEYS_SUBSET = 4; - } - - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - - // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for - // choosing a host, but updating hosts is faster, especially for large numbers of hosts. - // - // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy. - // - // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains - // only one entry. - // - // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys` - // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge - // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are - // present in the current configuration. - bool single_host_per_subset = 4; - - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum = {defined_only: true}]; - - // Subset of - // :ref:`keys` used by - // :ref:`KEYS_SUBSET` - // fallback policy. - // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. - // For any other fallback policy the parameter is not used and should not be set. - // Only values also present in - // :ref:`keys` are allowed, but - // `fallback_keys_subset` cannot be equal to `keys`. - repeated string fallback_keys_subset = 3; - } - - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - - // For each entry, LbEndpoint.Metadata's - // *envoy.lb* namespace is traversed and a subset is created for each unique - // combination of key and value. For example: - // - // .. code-block:: json - // - // { "subset_selectors": [ - // { "keys": [ "version" ] }, - // { "keys": [ "stage", "hardware_type" ] } - // ]} - // - // A subset is matched when the metadata from the selected route and - // weighted cluster contains the same keys and values as the subset's - // metadata. The same host may appear in multiple subsets. - repeated LbSubsetSelector subset_selectors = 3; - - // If true, routing to subsets will take into account the localities and locality weights of the - // endpoints when making the routing decision. - // - // There are some potential pitfalls associated with enabling this feature, as the resulting - // traffic split after applying both a subset match and locality weights might be undesirable. - // - // Consider for example a situation in which you have 50/50 split across two localities X/Y - // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 - // host selected but Y having 100, then a lot more load is being dumped on the single host in X - // than originally anticipated in the load balancing assignment delivered via EDS. - bool locality_weight_aware = 4; - - // When used with locality_weight_aware, scales the weight of each locality by the ratio - // of hosts in the subset vs hosts in the original subset. This aims to even out the load - // going to an individual locality if said locality is disproportionately affected by the - // subset predicate. - bool scale_locality_weight = 5; - - // If true, when a fallback policy is configured and its corresponding subset fails to find - // a host this will cause any host to be selected instead. - // - // This is useful when using the default subset as the fallback policy, given the default - // subset might become empty. With this option enabled, if that happens the LB will attempt - // to select a host from the entire cluster. - bool panic_mode_any = 6; - - // If true, metadata specified for a metadata key will be matched against the corresponding - // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value - // and any of the elements in the list matches the criteria. - bool list_as_any = 7; - } - - // Specific configuration for the LeastRequest load balancing policy. - message LeastRequestLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.LeastRequestLbConfig"; - - // The number of random healthy hosts from which the host with the fewest active requests will - // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; - - // The following formula is used to calculate the dynamic weights when hosts have different load - // balancing weights: - // - // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` - // - // The larger the active request bias is, the more aggressively active requests will lower the - // effective weight when all host weights are not equal. - // - // `active_request_bias` must be greater than or equal to 0.0. - // - // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number - // of active requests at the time it picks a host and behaves like the Round Robin Load - // Balancer. - // - // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing - // weight by the number of active requests at the time it does a pick. - // - // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's - // host sets changes, e.g., whenever there is a host membership update or a host load balancing - // weight change. - // - // .. note:: - // This setting only takes effect if all host weights are not equal. - core.v3.RuntimeDouble active_request_bias = 2; - } - - // Specific configuration for the :ref:`RingHash` - // load balancing policy. - message RingHashLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.RingHashLbConfig"; - - // The hash function used to hash hosts onto the ketama ring. - enum HashFunction { - // Use `xxHash `_, this is the default hash function. - XX_HASH = 0; - - // Use `MurmurHash2 `_, this is compatible with - // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled - // on Linux and not macOS. - MURMUR_HASH_2 = 1; - } - - reserved 2; - - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; - - // The hash function used to hash hosts onto the ketama ring. The value defaults to - // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; - - // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered - // to further constrain resource use. See also - // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; - } - - // Specific configuration for the :ref:`Maglev` - // load balancing policy. - message MaglevLbConfig { - // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. - // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same - // upstream as it was before. Increasing the table size reduces the amount of disruption. - // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. - google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}]; - } - - // Specific configuration for the - // :ref:`Original Destination ` - // load balancing policy. - message OriginalDstLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.OriginalDstLbConfig"; - - // When true, :ref:`x-envoy-original-dst-host - // ` can be used to override destination - // address. - // - // .. attention:: - // - // This header isn't sanitized by default, so enabling this feature allows HTTP clients to - // route traffic to arbitrary hosts and/or ports, which may have serious security - // consequences. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - bool use_http_header = 1; - } - - // Common configuration for all load balancer implementations. - // [#next-free-field: 8] - message CommonLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CommonLbConfig"; - - // Configuration for :ref:`zone aware routing - // `. - message ZoneAwareLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CommonLbConfig.ZoneAwareLbConfig"; - - // Configures percentage of requests that will be considered for zone aware routing - // if zone aware routing is configured. If not specified, the default is 100%. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - type.v3.Percent routing_enabled = 1; - - // Configures minimum upstream cluster size required for zone aware routing - // If upstream cluster size is less than specified, zone aware routing is not performed - // even if zone aware routing is configured. If not specified, the default is 6. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - google.protobuf.UInt64Value min_cluster_size = 2; - - // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic - // mode`. Instead, the cluster will fail all - // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a - // failing service. - bool fail_traffic_on_panic = 3; - } - - // Configuration for :ref:`locality weighted load balancing - // ` - message LocalityWeightedLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; - } - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - message ConsistentHashingLbConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; - - // If set to `true`, the cluster will use hostname instead of the resolved - // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. - bool use_hostname_for_hashing = 1; - - // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 - // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. - // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. - // Minimum is 100. - // - // Applies to both Ring Hash and Maglev load balancers. - // - // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified - // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests - // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing - // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify - // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the - // cascading overflow effect when choosing the next host in the ring/table). - // - // If weights are specified on the hosts, they are respected. - // - // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts - // being probed, so use a higher value if you require better performance. - google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}]; - } - - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - type.v3.Percent healthy_panic_threshold = 1; - - oneof locality_config_specifier { - ZoneAwareLbConfig zone_aware_lb_config = 2; - - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } - - // If set, all health check/weight/metadata updates that happen within this duration will be - // merged and delivered in one shot when the duration expires. The start of the duration is when - // the first update happens. This is useful for big clusters, with potentially noisy deploys - // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes - // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new - // cluster). Please always keep in mind that the use of sandbox technologies may change this - // behavior. - // - // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge - // window to 0. - // - // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is - // because merging those updates isn't currently safe. See - // https://github.com/envoyproxy/envoy/pull/3941. - google.protobuf.Duration update_merge_window = 4; - - // If set to true, Envoy will :ref:`exclude ` new hosts - // when computing load balancing weights until they have been health checked for the first time. - // This will have no effect unless active health checking is also configured. - bool ignore_new_hosts_until_first_hc = 5; - - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - bool close_connections_on_host_set_change = 6; - - // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - ConsistentHashingLbConfig consistent_hashing_lb_config = 7; - } - - message RefreshRate { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.RefreshRate"; - - // Specifies the base interval between refreshes. This parameter is required and must be greater - // than zero and less than - // :ref:`max_interval `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {nanos: 1000000} - }]; - - // Specifies the maximum interval between refreshes. This parameter is optional, but must be - // greater than or equal to the - // :ref:`base_interval ` if set. The default - // is 10 times the :ref:`base_interval `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; - } - - message PreconnectPolicy { - // Indicates how many streams (rounded up) can be anticipated per-upstream for each - // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting - // will only be done if the upstream is healthy and the cluster has traffic. - // - // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be - // established, one for the new incoming stream, and one for a presumed follow-up stream. For - // HTTP/2, only one connection would be established by default as one connection can - // serve both the original and presumed follow-up stream. - // - // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 - // active streams, there would be 100 connections in use, and 50 connections preconnected. - // This might be a useful value for something like short lived single-use connections, - // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection - // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP - // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more - // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue - // in case of unexpected disconnects where the connection could not be reused. - // - // If this value is not set, or set explicitly to one, Envoy will fetch as many connections - // as needed to serve streams in flight. This means in steady state if a connection is torn down, - // a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection. - // - // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can - // harm latency more than the preconnecting helps. - google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1 - [(validate.rules).double = {lte: 3.0 gte: 1.0}]; - - // Indicates how many many streams (rounded up) can be anticipated across a cluster for each - // stream, useful for low QPS services. This is currently supported for a subset of - // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). - // Unlike *per_upstream_preconnect_ratio* this preconnects across the upstream instances in a - // cluster, doing best effort predictions of what upstream would be picked next and - // pre-establishing a connection. - // - // Preconnecting will be limited to one preconnect per configured upstream in the cluster and will - // only be done if there are healthy upstreams and the cluster has traffic. - // - // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first - // incoming stream, 2 connections will be preconnected - one to the first upstream for this - // cluster, one to the second on the assumption there will be a follow-up stream. - // - // If this value is not set, or set explicitly to one, Envoy will fetch as many connections - // as needed to serve streams in flight, so during warm up and in steady state if a connection - // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for - // connection establishment. - // - // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, - // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each - // upstream. - google.protobuf.DoubleValue predictive_preconnect_ratio = 2 - [(validate.rules).double = {lte: 3.0 gte: 1.0}]; - } - - reserved 12, 15; - - // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket_match* in the - // :ref:`LbEndpoint.Metadata ` - // is used to match against the transport sockets as they appear in the list. The first - // :ref:`match ` is used. - // For example, with the following match - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "enableMTLS" - // match: - // acceptMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - name: "defaultToPlaintext" - // match: {} - // transport_socket: - // name: envoy.transport_sockets.raw_buffer - // - // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* - // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. - // - // If a :ref:`socket match ` with empty match - // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" - // socket match in case above. - // - // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any - // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - // *transport_socket* specified in this cluster. - // - // This field allows gradual and flexible transport socket configuration changes. - // - // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, - // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", - // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic - // has "acceptPlaintext": "true" metadata information. - // - // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS - // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - // *TransportSocketMatch* in this field. Other client Envoys receive CDS without - // *transport_socket_match* set, and still send plain text traffic to the same cluster. - // - // This field can be used to specify custom transport socket configurations for health - // checks by adding matching key/value pairs in a health check's - // :ref:`transport socket match criteria ` field. - // - // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] - repeated TransportSocketMatch transport_socket_matches = 43; - - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // An optional alternative to the cluster name to be used for observability. This name is used - // emitting stats for the cluster and access logging the cluster name. This will appear as - // additional information in configuration dumps of a cluster's current status as - // :ref:`observability_name ` - // and as an additional tag "upstream_cluster.name" while tracing. Note: access logging using - // this field is presently enabled with runtime feature - // `envoy.reloadable_features.use_observable_cluster_name`. Any ``:`` in the name will be - // converted to ``_`` when emitting statistics. This should not be confused with :ref:`Router - // Filter Header `. - string alt_stat_name = 28 [(udpa.annotations.field_migrate).rename = "observability_name"]; - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - // If not set, a default value of 5s will be used. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 - [(udpa.annotations.security).configure_for_untrusted_upstream = true]; - - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes the *hosts* field in the v2 API. - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // - endpoint.v3.ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.v3.HealthCheck health_checks = 8; - - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - // - // .. attention:: - // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. - google.protobuf.UInt32Value max_requests_per_connection = 9 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional :ref:`circuit breaking ` for the cluster. - CircuitBreakers circuit_breakers = 10; - - // HTTP protocol options that are applied only to upstream HTTP connections. - // These options apply to all HTTP versions. - // This has been deprecated in favor of - // :ref:`upstream_http_protocol_options ` - // in the :ref:`http_protocol_options ` message. - // upstream_http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Additional options when handling HTTP requests upstream. These options will be applicable to - // both HTTP1 and HTTP2 requests. - // This has been deprecated in favor of - // :ref:`common_http_protocol_options ` - // in the :ref:`http_protocol_options ` message. - // common_http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v3.HttpProtocolOptions common_http_protocol_options = 29 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Additional options when handling HTTP1 requests. - // This has been deprecated in favor of http_protocol_options fields in the - // :ref:`http_protocol_options ` message. - // http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v3.Http1ProtocolOptions http_protocol_options = 13 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Even if default HTTP2 protocol options are desired, this field must be - // set so that Envoy will assume that the upstream supports HTTP/2 when - // making new HTTP connection pool connections. Currently, Envoy only - // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - // connections to happen over plain text. - // This has been deprecated in favor of http2_protocol_options fields in the - // :ref:`http_protocol_options ` - // message. http2_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - // See :ref:`upstream_http_protocol_options - // ` - // for example usage. - core.v3.Http2ProtocolOptions http2_protocol_options = 14 [ - deprecated = true, - (udpa.annotations.security).configure_for_untrusted_upstream = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - // [#next-major-version: make this a list of typed extensions.] - map typed_extension_protocol_options = 36; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. The value configured must be at least 1ms. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 - [(validate.rules).duration = {gt {nanos: 1000000}}]; - - // If the DNS failure refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types - // other than :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS` this setting is - // ignored. - RefreshRate dns_failure_refresh_rate = 44; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; - - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; - - // If DNS resolvers are specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used to specify the cluster’s dns resolvers. - // If this setting is not specified, the value defaults to the default - // resolver, which uses /etc/resolv.conf for configuration. For cluster types - // other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - repeated core.v3.Address dns_resolvers = 18 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - bool use_tcp_for_dns_lookups = 45 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - core.v3.DnsResolutionConfig dns_resolution_config = 53; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - core.v3.TypedExtensionConfig typed_dns_resolver_config = 55; - - // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`. - // If true, cluster readiness blocks on warm-up. If false, the cluster will complete - // initialization whether or not warm-up has completed. Defaults to true. - google.protobuf.BoolValue wait_for_warm_on_init = 54; - - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - OutlierDetection outlier_detection = 19; - - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; - - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.v3.BindConfig upstream_bind_config = 21; - - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH`, - // :ref:`MAGLEV` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - - // Optional configuration for the Maglev load balancing policy. - MaglevLbConfig maglev_lb_config = 52; - - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - - // Common configuration for all load balancer implementations. - CommonLbConfig common_lb_config = 27; - - // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`UpstreamTlsContexts ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - core.v3.TransportSocket transport_socket = 24; - - // The Metadata field can be used to provide additional information about the - // cluster. It can be used for stats, logging, and varying filter behavior. - // Fields should use reverse DNS notation to denote which entity within Envoy - // will need the information. For instance, if the metadata is intended for - // the Router filter, the filter name should be specified as *envoy.filters.http.router*. - core.v3.Metadata metadata = 25; - - // Determines how Envoy selects the protocol used to speak to upstream hosts. - // This has been deprecated in favor of setting explicit protocol selection - // in the :ref:`http_protocol_options - // ` message. - // http_protocol_options can be set via the cluster's - // :ref:`extension_protocol_options`. - ClusterProtocolSelection protocol_selection = 26 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional options for upstream connections. - UpstreamConnectionOptions upstream_connection_options = 30; - - // If an upstream host becomes unhealthy (as determined by the configured health checks - // or outlier detection), immediately close all connections to the failed host. - // - // .. note:: - // - // This is currently only supported for connections created by tcp_proxy. - // - // .. note:: - // - // The current implementation of this feature closes all connections immediately when - // the unhealthy status is detected. If there are a large number of connections open - // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of - // time exclusively closing these connections, and not processing any other traffic. - bool close_connections_on_host_health_failure = 31; - - // If set to true, Envoy will ignore the health value of a host when processing its removal - // from service discovery. This means that if active health checking is used, Envoy will *not* - // wait for the endpoint to go unhealthy before removing it. - bool ignore_health_on_host_removal = 32; - - // An (optional) network filter chain, listed in the order the filters should be applied. - // The chain will be applied to all outgoing connections that Envoy makes to the upstream - // servers of this cluster. - repeated Filter filters = 40; - - // New mechanism for LB policy configuration. Used only if the - // :ref:`lb_policy` field has the value - // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - LoadBalancingPolicy load_balancing_policy = 41; - - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - core.v3.ConfigSource lrs_server = 42; - - // If track_timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - // - // .. attention:: - // - // This field has been deprecated in favor of `timeout_budgets`, part of - // :ref:`track_cluster_stats `. - bool track_timeout_budgets = 47 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Optional customization and configuration of upstream connection pool, and upstream type. - // - // Currently this field only applies for HTTP traffic but is designed for eventual use for custom - // TCP upstreams. - // - // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream - // HTTP, using the http connection pool and the codec from `http2_protocol_options` - // - // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT - // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. - // - // The default pool used is the generic connection pool which creates the HTTP upstream for most - // HTTP requests, and the TCP upstream if CONNECT termination is configured. - // - // If users desire custom connection pool or upstream behavior, for example terminating - // CONNECT only if a custom filter indicates it is appropriate, the custom factories - // can be registered and configured here. - // [#extension-category: envoy.upstreams] - core.v3.TypedExtensionConfig upstream_config = 48; - - // Configuration to track optional cluster stats. - TrackClusterStats track_cluster_stats = 49; - - // Preconnect configuration for this cluster. - PreconnectPolicy preconnect_policy = 50; - - // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate - // connection pool for every downstream connection - bool connection_pool_per_downstream_connection = 51; - - repeated core.v3.Address hidden_envoy_deprecated_hosts = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext hidden_envoy_deprecated_tls_context = - 11 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - map hidden_envoy_deprecated_extension_protocol_options = 35 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// Extensible load balancing policy configuration. -// -// Every LB policy defined via this mechanism will be identified via a unique name using reverse -// DNS notation. If the policy needs configuration parameters, it must define a message for its -// own configuration, which will be stored in the config field. The name of the policy will tell -// clients which type of message they should expect to see in the config field. -// -// Note that there are cases where it is useful to be able to independently select LB policies -// for choosing a locality and for choosing an endpoint within that locality. For example, a -// given deployment may always use the same policy to choose the locality, but for choosing the -// endpoint within the locality, some clusters may use weighted-round-robin, while others may -// use some sort of session-based balancing. -// -// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a -// child LB policy for each locality. For each request, the parent chooses the locality and then -// delegates to the child policy for that locality to choose the endpoint within the locality. -// -// To facilitate this, the config message for the top-level LB policy may include a field of -// type LoadBalancingPolicy that specifies the child policy. -message LoadBalancingPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LoadBalancingPolicy"; - - message Policy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.LoadBalancingPolicy.Policy"; - - reserved 1, 3; - - reserved "name", "typed_config"; - - core.v3.TypedExtensionConfig typed_extension_config = 4; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Each client will iterate over the list in order and stop at the first policy that it - // supports. This provides a mechanism for starting to use new LB policies that are not yet - // supported by all clients. - repeated Policy policies = 1; -} - -// An extensible structure containing the address Envoy should bind to when -// establishing upstream connections. -message UpstreamBindConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.UpstreamBindConfig"; - - // The address Envoy should bind to when establishing upstream connections. - core.v3.Address source_address = 1; -} - -message UpstreamConnectionOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.UpstreamConnectionOptions"; - - // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - core.v3.TcpKeepalive tcp_keepalive = 1; -} - -message TrackClusterStats { - // If timeout_budgets is true, the :ref:`timeout budget histograms - // ` will be published for each - // request. These show what percentage of a request's per try and global timeout was used. A value - // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - // of 100 would indicate that the request took the entirety of the timeout given to it. - bool timeout_budgets = 1; - - // If request_response_sizes is true, then the :ref:`histograms - // ` tracking header and body sizes - // of requests and responses will be published. - bool request_response_sizes = 2; -} diff --git a/generated_api_shadow/envoy/config/cluster/v3/filter.proto b/generated_api_shadow/envoy/config/cluster/v3/filter.proto deleted file mode 100644 index 7d11b87bcd5d5..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/filter.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v3; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v3"; -option java_outer_classname = "FilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Upstream filters] -// Upstream filters apply to the connections to the upstream cluster hosts. - -message Filter { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.Filter"; - - // The name of the filter to instantiate. The name must match a - // supported upstream filter. Note that Envoy's :ref:`downstream network - // filters ` are not valid upstream filters. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any typed_config = 2; -} diff --git a/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto b/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto deleted file mode 100644 index b19e95db99b74..0000000000000 --- a/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto +++ /dev/null @@ -1,157 +0,0 @@ -syntax = "proto3"; - -package envoy.config.cluster.v3; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.cluster.v3"; -option java_outer_classname = "OutlierDetectionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Outlier detection] - -// See the :ref:`architecture overview ` for -// more information on outlier detection. -// [#next-free-field: 22] -message OutlierDetection { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.cluster.OutlierDetection"; - - // The number of consecutive 5xx responses or local origin errors that are mapped - // to 5xx error codes before a consecutive 5xx ejection - // occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_5xx = 1; - - // The time interval between ejection analysis sweeps. This can result in - // both new ejections as well as hosts being returned to service. Defaults - // to 10000ms or 10s. - google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; - - // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected and is - // capped by :ref:`max_ejection_time`. - // Defaults to 30000ms or 30s. - google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; - - // The maximum % of an upstream cluster that can be ejected due to outlier - // detection. Defaults to 10% but will eject at least one host regardless of the value. - google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive 5xx. This setting can be used to disable - // ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics. This setting can be used to - // disable ejection or to ramp it up slowly. Defaults to 100. - google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; - - // The number of hosts in a cluster that must have enough request volume to - // detect success rate outliers. If the number of hosts is less than this - // setting, outlier detection via success rate statistics is not performed - // for any host in the cluster. Defaults to 5. - google.protobuf.UInt32Value success_rate_minimum_hosts = 7; - - // The minimum number of total requests that must be collected in one - // interval (as defined by the interval duration above) to include this host - // in success rate based outlier detection. If the volume is lower than this - // setting, outlier detection via success rate statistics is not performed - // for that host. Defaults to 100. - google.protobuf.UInt32Value success_rate_request_volume = 8; - - // This factor is used to determine the ejection threshold for success rate - // outlier ejection. The ejection threshold is the difference between the - // mean success rate, and the product of this factor and the standard - // deviation of the mean success rate: mean - (stdev * - // success_rate_stdev_factor). This factor is divided by a thousand to get a - // double. That is, if the desired factor is 1.9, the runtime value should - // be 1900. Defaults to 1900. - google.protobuf.UInt32Value success_rate_stdev_factor = 9; - - // The number of consecutive gateway failures (502, 503, 504 status codes) - // before a consecutive gateway failure ejection occurs. Defaults to 5. - google.protobuf.UInt32Value consecutive_gateway_failure = 10; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive gateway failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 - [(validate.rules).uint32 = {lte: 100}]; - - // Determines whether to distinguish local origin failures from external errors. If set to true - // the following configuration parameters are taken into account: - // :ref:`consecutive_local_origin_failure`, - // :ref:`enforcing_consecutive_local_origin_failure` - // and - // :ref:`enforcing_local_origin_success_rate`. - // Defaults to false. - bool split_external_local_origin_errors = 12; - - // The number of consecutive locally originated failures before ejection - // occurs. Defaults to 5. Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value consecutive_local_origin_failure = 13; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through consecutive locally originated failures. This setting can be - // used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status - // is detected through success rate statistics for locally originated errors. - // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. - // Parameter takes effect only when - // :ref:`split_external_local_origin_errors` - // is set to true. - google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 - [(validate.rules).uint32 = {lte: 100}]; - - // The failure percentage to use when determining failure percentage-based outlier detection. If - // the failure percentage of a given host is greater than or equal to this value, it will be - // ejected. Defaults to 85. - google.protobuf.UInt32Value failure_percentage_threshold = 16 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // failure percentage statistics. This setting can be used to disable ejection or to ramp it up - // slowly. Defaults to 0. - // - // [#next-major-version: setting this without setting failure_percentage_threshold should be - // invalid in v4.] - google.protobuf.UInt32Value enforcing_failure_percentage = 17 - [(validate.rules).uint32 = {lte: 100}]; - - // The % chance that a host will be actually ejected when an outlier status is detected through - // local-origin failure percentage statistics. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 0. - google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 - [(validate.rules).uint32 = {lte: 100}]; - - // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. - // If the total number of hosts in the cluster is less than this value, failure percentage-based - // ejection will not be performed. Defaults to 5. - google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; - - // The minimum number of total requests that must be collected in one interval (as defined by the - // interval duration above) to perform failure percentage-based ejection for this host. If the - // volume is lower than this setting, failure percentage-based ejection will not be performed for - // this host. Defaults to 50. - google.protobuf.UInt32Value failure_percentage_request_volume = 20; - - // The maximum time that a host is ejected for. See :ref:`base_ejection_time` - // for more information. If not specified, the default value (300000ms or 300s) or - // :ref:`base_ejection_time` value is applied, whatever is larger. - google.protobuf.Duration max_ejection_time = 21 [(validate.rules).duration = {gt {}}]; -} diff --git a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD b/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD deleted file mode 100644 index 631cd93a3964e..0000000000000 --- a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto b/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto deleted file mode 100644 index 3941c20aeb805..0000000000000 --- a/generated_api_shadow/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto +++ /dev/null @@ -1,85 +0,0 @@ -syntax = "proto3"; - -package envoy.config.common.dynamic_forward_proxy.v2alpha; - -import "envoy/api/v2/cluster.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v2alpha"; -option java_outer_classname = "DnsCacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.common.dynamic_forward_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamic forward proxy common configuration] - -// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview -// ` for more information. -// [#next-free-field: 7] -message DnsCacheConfig { - // The name of the cache. Multiple named caches allow independent dynamic forward proxy - // configurations to operate within a single Envoy process using different configurations. All - // configurations with the same name *must* otherwise have the same settings when referenced - // from different configuration components. Configuration will fail to load if this is not - // the case. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The DNS lookup family to use during resolution. - // - // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy eyeballs" mode. The - // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and - // then configures a host to have a primary and fall back address. With this, we could very - // likely build a "happy eyeballs" connection pool which would race the primary / fall back - // address and return the one that wins. This same method could potentially also be used for - // QUIC to TCP fall back.] - api.v2.Cluster.DnsLookupFamily dns_lookup_family = 2 - [(validate.rules).enum = {defined_only: true}]; - - // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. - // - // .. note: - // - // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be - // added in a future change. - // - // .. note: - // - // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. - google.protobuf.Duration dns_refresh_rate = 3 - [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // The TTL for hosts that are unused. Hosts that have not been used in the configured time - // interval will be purged. If not specified defaults to 5m. - // - // .. note: - // - // The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This - // means that if the configured TTL is shorter than the refresh rate the host may not be removed - // immediately. - // - // .. note: - // - // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. - google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; - - // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum hosts in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; - - // If the DNS failure refresh rate is specified, - // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the dns_refresh_rate. - api.v2.Cluster.RefreshRate dns_failure_refresh_rate = 6; -} diff --git a/generated_api_shadow/envoy/config/common/matcher/v3/BUILD b/generated_api_shadow/envoy/config/common/matcher/v3/BUILD deleted file mode 100644 index 2f90ace882d93..0000000000000 --- a/generated_api_shadow/envoy/config/common/matcher/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto b/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto deleted file mode 100644 index d7deb71d0b469..0000000000000 --- a/generated_api_shadow/envoy/config/common/matcher/v3/matcher.proto +++ /dev/null @@ -1,226 +0,0 @@ -syntax = "proto3"; - -package envoy.config.common.matcher.v3; - -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.common.matcher.v3"; -option java_outer_classname = "MatcherProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Unified Matcher API] - -// A matcher, which may traverse a matching tree in order to result in a match action. -// During matching, the tree will be traversed until a match is found, or if no match -// is found the action specified by the most specific on_no_match will be evaluated. -// As an on_no_match might result in another matching tree being evaluated, this process -// might repeat several times until the final OnMatch (or no match) is decided. -// -// [#alpha:] -message Matcher { - // What to do if a match is successful. - message OnMatch { - oneof on_match { - option (validate.required) = true; - - // Nested matcher to evaluate. - // If the nested matcher does not match and does not specify - // on_no_match, then this matcher is considered not to have - // matched, even if a predicate at this level or above returned - // true. - Matcher matcher = 1; - - // Protocol-specific action to take. - core.v3.TypedExtensionConfig action = 2; - } - } - - // A linear list of field matchers. - // The field matchers are evaluated in order, and the first match - // wins. - message MatcherList { - // Predicate to determine if a match is successful. - message Predicate { - // Predicate for a single input field. - message SinglePredicate { - // Protocol-specific specification of input field to match on. - // [#extension-category: envoy.matching.common_inputs] - core.v3.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; - - oneof matcher { - option (validate.required) = true; - - // Built-in string matcher. - type.matcher.v3.StringMatcher value_match = 2; - - // Extension for custom matching logic. - // [#extension-category: envoy.matching.input_matchers] - core.v3.TypedExtensionConfig custom_match = 3; - } - } - - // A list of two or more matchers. Used to allow using a list within a oneof. - message PredicateList { - repeated Predicate predicate = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof match_type { - option (validate.required) = true; - - // A single predicate to evaluate. - SinglePredicate single_predicate = 1; - - // A list of predicates to be OR-ed together. - PredicateList or_matcher = 2; - - // A list of predicates to be AND-ed together. - PredicateList and_matcher = 3; - - // The invert of a predicate - Predicate not_matcher = 4; - } - } - - // An individual matcher. - message FieldMatcher { - // Determines if the match succeeds. - Predicate predicate = 1 [(validate.rules).message = {required: true}]; - - // What to do if the match succeeds. - OnMatch on_match = 2 [(validate.rules).message = {required: true}]; - } - - // A list of matchers. First match wins. - repeated FieldMatcher matchers = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - message MatcherTree { - // A map of configured matchers. Used to allow using a map within a oneof. - message MatchMap { - map map = 1 [(validate.rules).map = {min_pairs: 1}]; - } - - // Protocol-specific specification of input field to match on. - core.v3.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; - - // Exact or prefix match maps in which to look up the input value. - // If the lookup succeeds, the match is considered successful, and - // the corresponding OnMatch is used. - oneof tree_type { - option (validate.required) = true; - - MatchMap exact_match_map = 2; - - // Longest matching prefix wins. - MatchMap prefix_match_map = 3; - - // Extension for custom matching logic. - core.v3.TypedExtensionConfig custom_match = 4; - } - } - - oneof matcher_type { - option (validate.required) = true; - - // A linear list of matchers to evaluate. - MatcherList matcher_list = 1; - - // A match tree to evaluate. - MatcherTree matcher_tree = 2; - } - - // Optional OnMatch to use if the matcher failed. - // If specified, the OnMatch is used, and the matcher is considered - // to have matched. - // If not specified, the matcher is considered not to have matched. - OnMatch on_no_match = 3; -} - -// Match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 11] -message MatchPredicate { - // A set of match configurations used for logical operations. - message MatchSet { - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - - // HTTP request generic body match configuration. - HttpGenericBodyMatch http_request_generic_body_match = 9; - - // HTTP response generic body match configuration. - HttpGenericBodyMatch http_response_generic_body_match = 10; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - // HTTP headers to match. - repeated route.v3.HeaderMatcher headers = 1; -} - -// HTTP generic body match configuration. -// List of text strings and hex strings to be located in HTTP body. -// All specified strings must be found in the HTTP body for positive match. -// The search may be limited to specified number of bytes from the body start. -// -// .. attention:: -// -// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. -// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified -// to scan only part of the http body. -message HttpGenericBodyMatch { - message GenericTextMatch { - oneof rule { - option (validate.required) = true; - - // Text string to be located in HTTP body. - string string_match = 1 [(validate.rules).string = {min_len: 1}]; - - // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; - } - } - - // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). - uint32 bytes_limit = 1; - - // List of patterns to match. - repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD b/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD deleted file mode 100644 index 3aed5a34a4002..0000000000000 --- a/generated_api_shadow/envoy/config/common/tap/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/service/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto b/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto deleted file mode 100644 index 6db1ecceddc4f..0000000000000 --- a/generated_api_shadow/envoy/config/common/tap/v2alpha/common.proto +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.config.common.tap.v2alpha; - -import "envoy/service/tap/v2alpha/common.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.common.tap.v2alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.tap.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common tap extension configuration] - -// Common configuration for all tap extensions. -message CommonExtensionConfig { - oneof config_type { - option (validate.required) = true; - - // If specified, the tap filter will be configured via an admin handler. - AdminConfig admin_config = 1; - - // If specified, the tap filter will be configured via a static configuration that cannot be - // changed. - service.tap.v2alpha.TapConfig static_config = 2; - } -} - -// Configuration for the admin handler. See :ref:`here ` for -// more information. -message AdminConfig { - // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is - // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/BUILD b/generated_api_shadow/envoy/config/core/v3/BUILD deleted file mode 100644 index 72e10b6df8440..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/core/v3/address.proto b/generated_api_shadow/envoy/config/core/v3/address.proto deleted file mode 100644 index 06876d5f8e41e..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/address.proto +++ /dev/null @@ -1,160 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/socket_option.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "AddressProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Network addresses] - -message Pipe { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Pipe"; - - // Unix Domain Socket path. On Linux, paths starting with '@' will use the - // abstract namespace. The starting '@' is replaced by a null byte by Envoy. - // Paths starting with '@' will result in an error in environments other than - // Linux. - string path = 1 [(validate.rules).string = {min_len: 1}]; - - // The mode for the Pipe. Not applicable for abstract sockets. - uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; -} - -// [#not-implemented-hide:] The address represents an envoy internal listener. -// TODO(lambdai): Make this address available for listener and endpoint. -// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. -message EnvoyInternalAddress { - oneof address_name_specifier { - option (validate.required) = true; - - // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. - string server_listener_name = 1; - } -} - -// [#next-free-field: 7] -message SocketAddress { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketAddress"; - - enum Protocol { - TCP = 0; - UDP = 1; - } - - Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; - - // The address for this socket. :ref:`Listeners ` will bind - // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` - // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: - // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used - // within an upstream :ref:`BindConfig `, the address - // controls the source address of outbound connections. For :ref:`clusters - // `, the cluster type determines whether the - // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized - // via :ref:`resolver_name `. - string address = 2 [(validate.rules).string = {min_len: 1}]; - - oneof port_specifier { - option (validate.required) = true; - - uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; - - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. - string named_port = 4; - } - - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. - string resolver_name = 5; - - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. - bool ipv4_compat = 6; -} - -message TcpKeepalive { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TcpKeepalive"; - - // Maximum number of keepalive probes to send without response before deciding - // the connection is dead. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 9.) - google.protobuf.UInt32Value keepalive_probes = 1; - - // The number of seconds a connection needs to be idle before keep-alive probes - // start being sent. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 7200s (i.e., 2 hours.) - google.protobuf.UInt32Value keepalive_time = 2; - - // The number of seconds between keep-alive probes. Default is to use the OS - // level configuration (unless overridden, Linux defaults to 75s.) - google.protobuf.UInt32Value keepalive_interval = 3; -} - -message BindConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BindConfig"; - - // The address to bind to when creating a socket. - SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; - - // Whether to set the *IP_FREEBIND* option when creating the socket. When this - // flag is set to true, allows the :ref:`source_address - // ` to be an IP address - // that is not configured on the system running Envoy. When this flag is set - // to false, the option *IP_FREEBIND* is disabled on the socket. When this - // flag is not set (default), the socket is not modified, i.e. the option is - // neither enabled nor disabled. - google.protobuf.BoolValue freebind = 2; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated SocketOption socket_options = 3; -} - -// Addresses specify either a logical or physical address and port, which are -// used to tell Envoy where to bind/listen, connect to upstream and find -// management servers. -message Address { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Address"; - - oneof address { - option (validate.required) = true; - - SocketAddress socket_address = 1; - - Pipe pipe = 2; - - // [#not-implemented-hide:] - EnvoyInternalAddress envoy_internal_address = 3; - } -} - -// CidrRange specifies an IP Address and a prefix length to construct -// the subnet mask for a `CIDR `_ range. -message CidrRange { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.CidrRange"; - - // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/backoff.proto b/generated_api_shadow/envoy/config/core/v3/backoff.proto deleted file mode 100644 index 3ffa97bb0299c..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/backoff.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "BackoffProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Backoff Strategy] - -// Configuration defining a jittered exponential back off strategy. -message BackoffStrategy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BackoffStrategy"; - - // The base interval to be used for the next back off computation. It should - // be greater than zero and less than or equal to :ref:`max_interval - // `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, - // but must be greater than or equal to the :ref:`base_interval - // ` if set. The default - // is 10 times the :ref:`base_interval - // `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/base.proto b/generated_api_shadow/envoy/config/core/v3/base.proto deleted file mode 100644 index 9b1ca815723b2..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/base.proto +++ /dev/null @@ -1,456 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/backoff.proto"; -import "envoy/config/core/v3/http_uri.proto"; -import "envoy/type/v3/percent.proto"; -import "envoy/type/v3/semantic_version.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/context_params.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "BaseProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common types] - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -enum RoutingPriority { - DEFAULT = 0; - HIGH = 1; -} - -// HTTP request method. -enum RequestMethod { - METHOD_UNSPECIFIED = 0; - GET = 1; - HEAD = 2; - POST = 3; - PUT = 4; - DELETE = 5; - CONNECT = 6; - OPTIONS = 7; - TRACE = 8; - PATCH = 9; -} - -// Identifies the direction of the traffic relative to the local Envoy. -enum TrafficDirection { - // Default option is unspecified. - UNSPECIFIED = 0; - - // The transport is used for incoming traffic. - INBOUND = 1; - - // The transport is used for outgoing traffic. - OUTBOUND = 2; -} - -// Identifies location of where either Envoy runs or where upstream hosts run. -message Locality { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Locality"; - - // Region this :ref:`zone ` belongs to. - string region = 1; - - // Defines the local service zone where Envoy is running. Though optional, it - // should be set if discovery service routing is used and the discovery - // service exposes :ref:`zone data `, - // either in this message or via :option:`--service-zone`. The meaning of zone - // is context dependent, e.g. `Availability Zone (AZ) - // `_ - // on AWS, `Zone `_ on - // GCP, etc. - string zone = 2; - - // When used for locality of upstream hosts, this field further splits zone - // into smaller chunks of sub-zones so they can be load balanced - // independently. - string sub_zone = 3; -} - -// BuildVersion combines SemVer version of extension with free-form build information -// (i.e. 'alpha', 'private-build') as a set of strings. -message BuildVersion { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BuildVersion"; - - // SemVer version of extension. - type.v3.SemanticVersion version = 1; - - // Free-form build information. - // Envoy defines several well known keys in the source/common/version/version.h file - google.protobuf.Struct metadata = 2; -} - -// Version and identification for an Envoy extension. -// [#next-free-field: 6] -message Extension { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Extension"; - - // This is the name of the Envoy filter as specified in the Envoy - // configuration, e.g. envoy.filters.http.router, com.acme.widget. - string name = 1; - - // Category of the extension. - // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" - // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from - // acme.com vendor. - // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] - string category = 2; - - // [#not-implemented-hide:] Type descriptor of extension configuration proto. - // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] - // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] - string type_descriptor = 3; - - // The version is a property of the extension and maintained independently - // of other extensions and the Envoy API. - // This field is not set when extension did not provide version information. - BuildVersion version = 4; - - // Indicates that the extension is present but was disabled via dynamic configuration. - bool disabled = 5; -} - -// Identifies a specific Envoy instance. The node identifier is presented to the -// management server, which may use this identifier to distinguish per Envoy -// configuration for serving. -// [#next-free-field: 13] -message Node { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Node"; - - // An opaque node identifier for the Envoy node. This also provides the local - // service node name. It should be set if any of the following features are - // used: :ref:`statsd `, :ref:`CDS - // `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-node`. - string id = 1; - - // Defines the local service cluster name where Envoy is running. Though - // optional, it should be set if any of the following features are used: - // :ref:`statsd `, :ref:`health check cluster - // verification - // `, - // :ref:`runtime override directory `, - // :ref:`user agent addition - // `, - // :ref:`HTTP global rate limiting `, - // :ref:`CDS `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-cluster`. - string cluster = 2; - - // Opaque metadata extending the node identifier. Envoy will pass this - // directly to the management server. - google.protobuf.Struct metadata = 3; - - // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike - // other fields in this message). For example, the xDS client may have a shard identifier that - // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the - // dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic - // parameter then appears in this field during future discovery requests. - map dynamic_parameters = 12; - - // Locality specifying where the Envoy instance is running. - Locality locality = 4; - - // Free-form string that identifies the entity requesting config. - // E.g. "envoy" or "grpc" - string user_agent_name = 6; - - oneof user_agent_version_type { - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - string user_agent_version = 7; - - // Structured version of the entity requesting config. - BuildVersion user_agent_build_version = 8; - } - - // List of extensions and their versions supported by the node. - repeated Extension extensions = 9; - - // Client feature support list. These are well known features described - // in the Envoy API repository for a given major version of an API. Client features - // use reverse DNS naming scheme, for example `com.acme.feature`. - // See :ref:`the list of features ` that xDS client may - // support. - repeated string client_features = 10; - - // Known listening ports on the node as a generic hint to the management server - // for filtering :ref:`listeners ` to be returned. For example, - // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. - repeated Address listening_addresses = 11 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - string hidden_envoy_deprecated_build_version = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// Metadata provides additional inputs to filters based on matched listeners, -// filter chains, routes and endpoints. It is structured as a map, usually from -// filter name (in reverse DNS format) to metadata specific to the filter. Metadata -// key-values for a filter are merged as connection and request handling occurs, -// with later values for the same key overriding earlier values. -// -// An example use of metadata is providing additional values to -// http_connection_manager in the envoy.http_connection_manager.access_log -// namespace. -// -// Another example use of metadata is to per service config info in cluster metadata, which may get -// consumed by multiple filters. -// -// For load balancing, Metadata provides a means to subset cluster endpoints. -// Endpoints have a Metadata object associated and routes contain a Metadata -// object to match against. There are some well defined metadata used today for -// this purpose: -// -// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an -// endpoint and is also used during header processing -// (x-envoy-upstream-canary) and for stats purposes. -// [#next-major-version: move to type/metadata/v2] -message Metadata { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Metadata"; - - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - // If both *filter_metadata* and - // :ref:`typed_filter_metadata ` - // fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. - map filter_metadata = 1; - - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - // The value is encoded as google.protobuf.Any. - // If both :ref:`filter_metadata ` - // and *typed_filter_metadata* fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. - map typed_filter_metadata = 2; -} - -// Runtime derived uint32 with a default when not specified. -message RuntimeUInt32 { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeUInt32"; - - // Default value if runtime value is not available. - uint32 default_value = 2; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived percentage with a default when not specified. -message RuntimePercent { - // Default value if runtime value is not available. - type.v3.Percent default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived double with a default when not specified. -message RuntimeDouble { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeDouble"; - - // Default value if runtime value is not available. - double default_value = 1; - - // Runtime key to get value for comparison. This value is used if defined. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Runtime derived bool with a default when not specified. -message RuntimeFeatureFlag { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.RuntimeFeatureFlag"; - - // Default value if runtime value is not available. - google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key to get value for comparison. This value is used if defined. The boolean value must - // be represented via its - // `canonical JSON encoding `_. - string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Header name/value pair. -message HeaderValue { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValue"; - - // Header name. - string key = 1 - [(validate.rules).string = - {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Header value. - // - // The same :ref:`format specifier ` as used for - // :ref:`HTTP access logging ` applies here, however - // unknown header values are replaced with the empty string instead of `-`. - string value = 2 [ - (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} - ]; -} - -// Header name/value pair plus option to control append behavior. -message HeaderValueOption { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HeaderValueOption"; - - // Header name/value pair that this option applies to. - HeaderValue header = 1 [(validate.rules).message = {required: true}]; - - // Should the value be appended? If true (default), the value is appended to - // existing values. Otherwise it replaces any existing values. - google.protobuf.BoolValue append = 2; -} - -// Wrapper for a set of headers. -message HeaderMap { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderMap"; - - repeated HeaderValue headers = 1; -} - -// A directory that is watched for changes, e.g. by inotify on Linux. Move/rename -// events inside this directory trigger the watch. -message WatchedDirectory { - // Directory path to watch. - string path = 1 [(validate.rules).string = {min_len: 1}]; -} - -// Data source consisting of either a file or an inline value. -message DataSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.DataSource"; - - oneof specifier { - option (validate.required) = true; - - // Local filesystem data source. - string filename = 1 [(validate.rules).string = {min_len: 1}]; - - // Bytes inlined in the configuration. - bytes inline_bytes = 2; - - // String inlined in the configuration. - string inline_string = 3; - } -} - -// The message specifies the retry policy of remote data source when fetching fails. -message RetryPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RetryPolicy"; - - // Specifies parameters that control :ref:`retry backoff strategy `. - // This parameter is optional, in which case the default base interval is 1000 milliseconds. The - // default maximum interval is 10 times the base interval. - BackoffStrategy retry_back_off = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. - google.protobuf.UInt32Value num_retries = 2 - [(udpa.annotations.field_migrate).rename = "max_retries"]; -} - -// The message specifies how to fetch data from remote and how to verify it. -message RemoteDataSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RemoteDataSource"; - - // The HTTP URI to fetch the remote data. - HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; - - // SHA256 string for verifying data. - string sha256 = 2 [(validate.rules).string = {min_len: 1}]; - - // Retry policy for fetching remote data. - RetryPolicy retry_policy = 3; -} - -// Async data source which support async data fetch. -message AsyncDataSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.AsyncDataSource"; - - oneof specifier { - option (validate.required) = true; - - // Local async data source. - DataSource local = 1; - - // Remote async data source. - RemoteDataSource remote = 2; - } -} - -// Configuration for transport socket in :ref:`listeners ` and -// :ref:`clusters `. If the configuration is -// empty, a default transport socket implementation and configuration will be -// chosen based on the platform and existence of tls_context. -message TransportSocket { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TransportSocket"; - - // The name of the transport socket to instantiate. The name must match a supported transport - // socket implementation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Implementation specific configuration which depends on the implementation being instantiated. - // See the supported transport socket implementations for further documentation. - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not -// specified via a runtime key. -// -// .. note:: -// -// Parsing of the runtime key's data is implemented such that it may be represented as a -// :ref:`FractionalPercent ` proto represented as JSON/YAML -// and may also be represented as an integer with the assumption that the value is an integral -// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse -// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. -message RuntimeFractionalPercent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.RuntimeFractionalPercent"; - - // Default value if the runtime value's for the numerator/denominator keys are not available. - type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; - - // Runtime key for a YAML representation of a FractionalPercent. - string runtime_key = 2; -} - -// Identifies a specific ControlPlane instance that Envoy is connected to. -message ControlPlane { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ControlPlane"; - - // An opaque control plane identifier that uniquely identifies an instance - // of control plane. This can be used to identify which control plane instance, - // the Envoy is connected to. - string identifier = 1; -} diff --git a/generated_api_shadow/envoy/config/core/v3/config_source.proto b/generated_api_shadow/envoy/config/core/v3/config_source.proto deleted file mode 100644 index c24a0a6537d85..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/config_source.proto +++ /dev/null @@ -1,216 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/authority.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ConfigSourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Configuration sources] - -// xDS API and non-xDS services version. This is used to describe both resource and transport -// protocol versions (in distinct configuration fields). -enum ApiVersion { - // When not specified, we assume v2, to ease migration to Envoy's stable API - // versioning. If a client does not support v2 (e.g. due to deprecation), this - // is an invalid value. - AUTO = 0 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; - - // Use xDS v2 API. - V2 = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; - - // Use xDS v3 API. - V3 = 2; -} - -// API configuration source. This identifies the API type and cluster that Envoy -// will use to fetch an xDS API. -// [#next-free-field: 9] -message ApiConfigSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ApiConfigSource"; - - // APIs may be fetched via either REST or gRPC. - enum ApiType { - // Ideally this would be 'reserved 0' but one can't reserve the default - // value. Instead we throw an exception if this is ever used. - hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY = 0 [ - deprecated = true, - (envoy.annotations.disallowed_by_default_enum) = true, - (envoy.annotations.deprecated_at_minor_version_enum) = "3.0" - ]; - - // REST-JSON v2 API. The `canonical JSON encoding - // `_ for - // the v2 protos is used. - REST = 1; - - // SotW gRPC service. - GRPC = 2; - - // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} - // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state - // with every update, the xDS server only sends what has changed since the last update. - DELTA_GRPC = 3; - - // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be - // multiplexed on a single connection to an ADS endpoint. - // [#not-implemented-hide:] - AGGREGATED_GRPC = 5; - - // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be - // multiplexed on a single connection to an ADS endpoint. - // [#not-implemented-hide:] - AGGREGATED_DELTA_GRPC = 6; - } - - // API type (gRPC, REST, delta gRPC) - ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; - - // Cluster names should be used only with REST. If > 1 - // cluster is defined, clusters will be cycled through if any kind of failure - // occurs. - // - // .. note:: - // - // The cluster with name ``cluster_name`` must be statically defined and its - // type must not be ``EDS``. - repeated string cluster_names = 2; - - // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - // services will be cycled through if any kind of failure occurs. - repeated GrpcService grpc_services = 4; - - // For REST APIs, the delay between successive polls. - google.protobuf.Duration refresh_delay = 3; - - // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; - - // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be - // rate limited. - RateLimitSettings rate_limit_settings = 6; - - // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. - bool set_node_on_first_message_only = 7; -} - -// Aggregated Discovery Service (ADS) options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that ADS is to be used. -message AggregatedConfigSource { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.AggregatedConfigSource"; -} - -// [#not-implemented-hide:] -// Self-referencing config source options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that other data can be obtained from the same server. -message SelfConfigSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SelfConfigSource"; - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; -} - -// Rate Limit settings to be applied for discovery requests made by Envoy. -message RateLimitSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.RateLimitSettings"; - - // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a - // default value of 100 will be used. - google.protobuf.UInt32Value max_tokens = 1; - - // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens - // per second will be used. - google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; -} - -// Configuration for :ref:`listeners `, :ref:`clusters -// `, :ref:`routes -// `, :ref:`endpoints -// ` etc. may either be sourced from the -// filesystem or from an xDS API source. Filesystem configs are watched with -// inotify for updates. -// [#next-free-field: 8] -message ConfigSource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; - - // Authorities that this config source may be used for. An authority specified in a xdstp:// URL - // is resolved to a *ConfigSource* prior to configuration fetch. This field provides the - // association between authority name and configuration source. - // [#not-implemented-hide:] - repeated xds.core.v3.Authority authorities = 7; - - oneof config_source_specifier { - option (validate.required) = true; - - // Path on the filesystem to source and watch for configuration updates. - // When sourcing configuration for :ref:`secret `, - // the certificate and key files are also watched for updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - string path = 1; - - // API configuration source. - ApiConfigSource api_config_source = 2; - - // When set, ADS will be used to fetch resources. The ADS API configuration - // source in the bootstrap configuration is used. - AggregatedConfigSource ads = 3; - - // [#not-implemented-hide:] - // When set, the client will access the resources from the same server it got the - // ConfigSource from, although not necessarily from the same stream. This is similar to the - // :ref:`ads` field, except that the client may use a - // different stream to the same server. As a result, this field can be used for things - // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) - // LDS to RDS on the same server without requiring the management server to know its name - // or required credentials. - // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since - // this field can implicitly mean to use the same stream in the case where the ConfigSource - // is provided via ADS and the specified data can also be obtained via ADS.] - SelfConfigSource self = 5; - } - - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - google.protobuf.Duration initial_fetch_timeout = 4; - - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. - ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/event_service_config.proto b/generated_api_shadow/envoy/config/core/v3/event_service_config.proto deleted file mode 100644 index b3552e3975a36..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/event_service_config.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "EventServiceConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#not-implemented-hide:] -// Configuration of the event reporting service endpoint. -message EventServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.EventServiceConfig"; - - oneof config_source_specifier { - option (validate.required) = true; - - // Specifies the gRPC service that hosts the event reporting service. - GrpcService grpc_service = 1; - } -} diff --git a/generated_api_shadow/envoy/config/core/v3/extension.proto b/generated_api_shadow/envoy/config/core/v3/extension.proto deleted file mode 100644 index ba66da6a8e363..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/extension.proto +++ /dev/null @@ -1,61 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/config_source.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ExtensionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Extension configuration] - -// Message type for extension configuration. -// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. -message TypedExtensionConfig { - // The name of an extension. This is not used to select the extension, instead - // it serves the role of an opaque identifier. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The typed config for the extension. The type URL will be used to identify - // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, - // the inner type URL of *TypedStruct* will be utilized. See the - // :ref:`extension configuration overview - // ` for further details. - google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; -} - -// Configuration source specifier for a late-bound extension configuration. The -// parent resource is warmed until all the initial extension configurations are -// received, unless the flag to apply the default configuration is set. -// Subsequent extension updates are atomic on a per-worker basis. Once an -// extension configuration is applied to a request or a connection, it remains -// constant for the duration of processing. If the initial delivery of the -// extension configuration fails, due to a timeout for example, the optional -// default configuration is applied. Without a default configuration, the -// extension is disabled, until an extension configuration is received. The -// behavior of a disabled extension depends on the context. For example, a -// filter chain with a disabled extension filter rejects all incoming streams. -message ExtensionConfigSource { - ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; - - // Optional default configuration to use as the initial configuration if - // there is a failure to receive the initial extension configuration or if - // `apply_default_config_without_warming` flag is set. - google.protobuf.Any default_config = 2; - - // Use the default config as the initial configuration without warming and - // waiting for the first discovery response. Requires the default configuration - // to be supplied. - bool apply_default_config_without_warming = 3; - - // A set of permitted extension type URLs. Extension configuration updates are rejected - // if they do not match any type URL in the set. - repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto b/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto deleted file mode 100644 index e79ec24e0201f..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/grpc_method_list.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "GrpcMethodListProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC method list] - -// A list of gRPC methods which can be used as an allowlist, for example. -message GrpcMethodList { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcMethodList"; - - message Service { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcMethodList.Service"; - - // The name of the gRPC service. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The names of the gRPC methods in this service. - repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; - } - - repeated Service services = 1; -} diff --git a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto b/generated_api_shadow/envoy/config/core/v3/grpc_service.proto deleted file mode 100644 index b8e033da93830..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/grpc_service.proto +++ /dev/null @@ -1,296 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "GrpcServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC services] - -// gRPC service configuration. This is used by :ref:`ApiConfigSource -// ` and filter configurations. -// [#next-free-field: 6] -message GrpcService { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService"; - - message EnvoyGrpc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.EnvoyGrpc"; - - // The name of the upstream gRPC cluster. SSL credentials will be supplied - // in the :ref:`Cluster ` :ref:`transport_socket - // `. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. - // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. - string authority = 2 - [(validate.rules).string = - {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // [#next-free-field: 9] - message GoogleGrpc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc"; - - // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. - message SslCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials"; - - // PEM encoded server root certificates. - DataSource root_certs = 1; - - // PEM encoded client private key. - DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // PEM encoded client certificate chain. - DataSource cert_chain = 3; - } - - // Local channel credentials. Only UDS is supported for now. - // See https://github.com/grpc/grpc/pull/15909. - message GoogleLocalCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials"; - } - - // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call - // credential types. - message ChannelCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials"; - - oneof credential_specifier { - option (validate.required) = true; - - SslCredentials ssl_credentials = 1; - - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_default = 2; - - GoogleLocalCredentials local_credentials = 3; - } - } - - // [#next-free-field: 8] - message CallCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials"; - - message ServiceAccountJWTAccessCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials." - "ServiceAccountJWTAccessCredentials"; - - string json_key = 1; - - uint64 token_lifetime_seconds = 2; - } - - message GoogleIAMCredentials { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials"; - - string authorization_token = 1; - - string authority_selector = 2; - } - - message MetadataCredentialsFromPlugin { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials." - "MetadataCredentialsFromPlugin"; - - string name = 1; - - // [#extension-category: envoy.grpc_credentials] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - // Security token service configuration that allows Google gRPC to - // fetch security token from an OAuth 2.0 authorization server. - // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and - // https://github.com/grpc/grpc/pull/19587. - // [#next-free-field: 10] - message StsService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService"; - - // URI of the token exchange service that handles token exchange requests. - // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - // https://github.com/envoyproxy/protoc-gen-validate/issues/303] - string token_exchange_service_uri = 1; - - // Location of the target service or resource where the client - // intends to use the requested security token. - string resource = 2; - - // Logical name of the target service where the client intends to - // use the requested security token. - string audience = 3; - - // The desired scope of the requested security token in the - // context of the service or resource where the token will be used. - string scope = 4; - - // Type of the requested security token. - string requested_token_type = 5; - - // The path of subject token, a security token that represents the - // identity of the party on behalf of whom the request is being made. - string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; - - // Type of the subject token. - string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; - - // The path of actor token, a security token that represents the identity - // of the acting party. The acting party is authorized to use the - // requested security token and act on behalf of the subject. - string actor_token_path = 8; - - // Type of the actor token. - string actor_token_type = 9; - } - - oneof credential_specifier { - option (validate.required) = true; - - // Access token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. - string access_token = 1; - - // Google Compute Engine credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - google.protobuf.Empty google_compute_engine = 2; - - // Google refresh token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. - string google_refresh_token = 3; - - // Service Account JWT Access credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. - ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; - - // Google IAM credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. - GoogleIAMCredentials google_iam = 5; - - // Custom authenticator credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. - // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. - MetadataCredentialsFromPlugin from_plugin = 6; - - // Custom security token service which implements OAuth 2.0 token exchange. - // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 - // See https://github.com/grpc/grpc/pull/19587. - StsService sts_service = 7; - } - } - - // Channel arguments. - message ChannelArgs { - message Value { - // Pointer values are not supported, since they don't make any sense when - // delivered via the API. - oneof value_specifier { - option (validate.required) = true; - - string string_value = 1; - - int64 int_value = 2; - } - } - - // See grpc_types.h GRPC_ARG #defines for keys that work here. - map args = 1; - } - - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - string target_uri = 1 [(validate.rules).string = {min_len: 1}]; - - ChannelCredentials channel_credentials = 2; - - // A set of call credentials that can be composed with `channel credentials - // `_. - repeated CallCredentials call_credentials = 3; - - // The human readable prefix to use when emitting statistics for the gRPC - // service. - // - // .. csv-table:: - // :header: Name, Type, Description - // :widths: 1, 1, 2 - // - // streams_total, Counter, Total number of streams opened - // streams_closed_, Counter, Total streams closed with - string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; - - // The name of the Google gRPC credentials factory to use. This must have been registered with - // Envoy. If this is empty, a default credentials factory will be used that sets up channel - // credentials based on other configuration parameters. - string credentials_factory_name = 5; - - // Additional configuration for site-specific customizations of the Google - // gRPC library. - google.protobuf.Struct config = 6; - - // How many bytes each stream can buffer internally. - // If not set an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; - - // Custom channels args. - ChannelArgs channel_args = 8; - } - - reserved 4; - - oneof target_specifier { - option (validate.required) = true; - - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - EnvoyGrpc envoy_grpc = 1; - - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - GoogleGrpc google_grpc = 2; - } - - // The timeout for the gRPC request. This is the timeout for a specific - // request. - google.protobuf.Duration timeout = 3; - - // Additional metadata to include in streams initiated to the GrpcService. This can be used for - // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to - // be injected. For more information, including details on header value syntax, see the - // documentation on :ref:`custom request headers - // `. - repeated HeaderValue initial_metadata = 5; -} diff --git a/generated_api_shadow/envoy/config/core/v3/health_check.proto b/generated_api_shadow/envoy/config/core/v3/health_check.proto deleted file mode 100644 index dc7adc97a3257..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/health_check.proto +++ /dev/null @@ -1,377 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/event_service_config.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/v3/http.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Health check] -// * Health checking :ref:`architecture overview `. -// * If health checking is configured for a cluster, additional statistics are emitted. They are -// documented :ref:`here `. - -// Endpoint health status. -enum HealthStatus { - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - UNKNOWN = 0; - - // Healthy. - HEALTHY = 1; - - // Unhealthy. - UNHEALTHY = 2; - - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - DRAINING = 3; - - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - TIMEOUT = 4; - - // Degraded. - DEGRADED = 5; -} - -// [#next-free-field: 25] -message HealthCheck { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; - - // Describes the encoding of the payload bytes in the payload. - message Payload { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.Payload"; - - oneof payload { - option (validate.required) = true; - - // Hex encoded payload. E.g., "000000FF". - string text = 1 [(validate.rules).string = {min_len: 1}]; - - // [#not-implemented-hide:] Binary payload. - bytes binary = 2; - } - } - - // [#next-free-field: 12] - message HttpHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.HttpHealthCheck"; - - // The value of the host header in the HTTP health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The host header can be customized for a specific endpoint by setting the - // :ref:`hostname ` field. - string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Specifies the HTTP path that will be requested during health checking. For example - // */healthcheck*. - string path = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // [#not-implemented-hide:] HTTP specific payload. - Payload send = 3; - - // [#not-implemented-hide:] HTTP specific response. - Payload receive = 4; - - // Specifies a list of HTTP headers that should be added to each request that is sent to the - // health checked cluster. For more information, including details on header value syntax, see - // the documentation on :ref:`custom request headers - // `. - repeated HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request that is sent to the - // health checked cluster. - repeated string request_headers_to_remove = 8 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default - // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - // semantics of :ref:`Int64Range `. The start and end of each - // range are required. Only statuses in the range [100, 600) are allowed. - repeated type.v3.Int64Range expected_statuses = 9; - - // Use specified application protocol for health checks. - type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; - - // An optional service name parameter which is used to validate the identity of - // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview - // ` for more information. - type.matcher.v3.StringMatcher service_name_matcher = 11; - - string hidden_envoy_deprecated_service_name = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - bool hidden_envoy_deprecated_use_http2 = 7 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - message TcpHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.TcpHealthCheck"; - - // Empty payloads imply a connect-only health check. - Payload send = 1; - - // When checking the response, “fuzzy” matching is performed such that each - // binary block must be found, and in the order specified, but not - // necessarily contiguous. - repeated Payload receive = 2; - } - - message RedisHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.RedisHealthCheck"; - - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; - } - - // `grpc.health.v1.Health - // `_-based - // healthcheck. See `gRPC doc `_ - // for details. - message GrpcHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.GrpcHealthCheck"; - - // An optional service name parameter which will be sent to gRPC service in - // `grpc.health.v1.HealthCheckRequest - // `_. - // message. See `gRPC health-checking overview - // `_ for more information. - string service_name = 1; - - // The value of the :authority header in the gRPC health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The authority header can be customized for a specific endpoint by setting - // the :ref:`hostname ` field. - string authority = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // Custom health check. - message CustomHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.CustomHealthCheck"; - - // The registered name of the custom health checker. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A custom health checker specific configuration which depends on the custom health checker - // being instantiated. See :api:`envoy/config/health_checker` for reference. - // [#extension-category: envoy.health_checkers] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - // Health checks occur over the transport socket specified for the cluster. This implies that if a - // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. - // - // This allows overriding the cluster TLS settings, just for health check connections. - message TlsOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HealthCheck.TlsOptions"; - - // Specifies the ALPN protocols for health check connections. This is useful if the - // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks - // versus data connections. If empty, no ALPN protocols will be set on health check connections. - repeated string alpn_protocols = 1; - } - - reserved 10; - - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - google.protobuf.Duration timeout = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // The interval between health checks. - google.protobuf.Duration interval = 2 [(validate.rules).duration = { - required: true - gt {} - }]; - - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - google.protobuf.Duration initial_jitter = 20; - - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - google.protobuf.Duration interval_jitter = 3; - - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - uint32 interval_jitter_percent = 18; - - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; - - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Non-serving port for health checking. - google.protobuf.UInt32Value alt_port = 6; - - // Reuse health check connection between health checks. Default is true. - google.protobuf.BoolValue reuse_connection = 7; - - oneof health_checker { - option (validate.required) = true; - - // HTTP health check. - HttpHealthCheck http_health_check = 8; - - // TCP health check. - TcpHealthCheck tcp_health_check = 9; - - // gRPC health check. - GrpcHealthCheck grpc_health_check = 11; - - // Custom health check. - CustomHealthCheck custom_health_check = 13; - } - - // The "no traffic interval" is a special health check interval that is used when a cluster has - // never had traffic routed to it. This lower interval allows cluster information to be kept up to - // date, without sending a potentially large amount of active health checking traffic for no - // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. Note that this interval takes precedence over - // any other. - // - // The default value for "no traffic interval" is 60 seconds. - google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; - - // The "no traffic healthy interval" is a special health check interval that - // is used for hosts that are currently passing active health checking - // (including new hosts) when the cluster has received no traffic. - // - // This is useful for when we want to send frequent health checks with - // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once - // a host in the cluster is marked as healthy. - // - // Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. - // - // If no_traffic_healthy_interval is not set, it will default to the - // no traffic interval and send that interval regardless of health state. - google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy interval" is a health check interval that is used for hosts that are marked as - // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the - // standard health check interval that is defined. - // - // The default value for "unhealthy interval" is the same as "interval". - google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; - - // The "unhealthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as unhealthy. For subsequent health checks - // Envoy will shift back to using either "unhealthy interval" if present or the standard health - // check interval that is defined. - // - // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; - - // The "healthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as healthy. For subsequent health checks - // Envoy will shift back to using the standard health check interval that is defined. - // - // The default value for "healthy edge interval" is the same as the default interval. - google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; - - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - string event_log_path = 17; - - // [#not-implemented-hide:] - // The gRPC service for the health check event service. - // If empty, health check events won't be sent to a remote endpoint. - EventServiceConfig event_service = 22; - - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - bool always_log_health_check_failures = 19; - - // This allows overriding the cluster TLS settings, just for health check connections. - TlsOptions tls_options = 21; - - // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's - // :ref:`tranport socket matches `. - // For example, the following match criteria - // - // .. code-block:: yaml - // - // transport_socket_match_criteria: - // useMTLS: true - // - // Will match the following :ref:`cluster socket match ` - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "useMTLS" - // match: - // useMTLS: true - // transport_socket: - // name: envoy.transport_sockets.tls - // config: { ... } # tls socket configuration - // - // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the - // :ref:`LbEndpoint.Metadata `. - // This allows using different transport socket capabilities for health checking versus proxying to the - // endpoint. - // - // If the key/values pairs specified do not match any - // :ref:`transport socket matches `, - // the cluster's :ref:`transport socket ` - // will be used for health check socket configuration. - google.protobuf.Struct transport_socket_match_criteria = 23; -} diff --git a/generated_api_shadow/envoy/config/core/v3/http_uri.proto b/generated_api_shadow/envoy/config/core/v3/http_uri.proto deleted file mode 100644 index 5d1fc239e07ed..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/http_uri.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "HttpUriProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP Service URI ] - -// Envoy external URI descriptor -message HttpUri { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpUri"; - - // The HTTP server URI. It should be a full FQDN with protocol, host and path. - // - // Example: - // - // .. code-block:: yaml - // - // uri: https://www.googleapis.com/oauth2/v1/certs - // - string uri = 1 [(validate.rules).string = {min_len: 1}]; - - // Specify how `uri` is to be fetched. Today, this requires an explicit - // cluster, but in the future we may support dynamic cluster creation or - // inline DNS resolution. See `issue - // `_. - oneof http_upstream_type { - option (validate.required) = true; - - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // - string cluster = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - google.protobuf.Duration timeout = 3 [(validate.rules).duration = { - required: true - gte {} - }]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto deleted file mode 100644 index 8f2347eb55179..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ /dev/null @@ -1,494 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/extension.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Protocol options] - -// [#not-implemented-hide:] -message TcpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.TcpProtocolOptions"; -} - -// QUIC protocol options which apply to both downstream and upstream connections. -message QuicProtocolOptions { - // Maximum number of streams that the client can negotiate per connection. 100 - // if not specified. - google.protobuf.UInt32Value max_concurrent_streams = 1; - - // `Initial stream-level flow-control receive window - // `_ size. Valid values range from - // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16). - // - // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. - // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the stream buffers. - google.protobuf.UInt32Value initial_stream_window_size = 2 - [(validate.rules).uint32 = {lte: 16777216 gte: 1}]; - - // Similar to *initial_stream_window_size*, but for connection-level - // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16). - // window. Currently, this has the same minimum/default as *initial_stream_window_size*. - // - // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default - // window size now, so it's also the minimum. - google.protobuf.UInt32Value initial_connection_window_size = 3 - [(validate.rules).uint32 = {lte: 25165824 gte: 1}]; -} - -message UpstreamHttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.UpstreamHttpProtocolOptions"; - - // Set transport socket `SNI `_ for new - // upstream connections based on the downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - bool auto_sni = 1; - - // Automatic validate upstream presented certificate for new upstream connections based on the - // downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - // This field is intended to set with `auto_sni` field. - bool auto_san_validation = 2; -} - -// Configures the alternate protocols cache which tracks alternate protocols that can be used to -// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for -// HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 -// for the "HTTPS" DNS resource record. -message AlternateProtocolsCacheOptions { - // The name of the cache. Multiple named caches allow independent alternate protocols cache - // configurations to operate within a single Envoy process using different configurations. All - // alternate protocols cache options with the same name *must* be equal in all fields when - // referenced from different configuration components. Configuration will fail to load if this is - // not the case. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The maximum number of entries that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum entries in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; -} - -// [#next-free-field: 7] -message HttpProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.HttpProtocolOptions"; - - // Action to take when Envoy receives client request with header names containing underscore - // characters. - // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented - // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore - // characters. - enum HeadersWithUnderscoresAction { - // Allow headers with underscores. This is the default behavior. - ALLOW = 0; - - // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests - // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter - // is incremented for each rejected request. - REJECT_REQUEST = 1; - - // Drop the header with name containing underscores. The header is dropped before the filter chain is - // invoked and as such filters will not see dropped headers. The - // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. - DROP_HEADER = 2; - } - - // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. When the - // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 - // downstream connection a drain sequence will occur prior to closing the connection, see - // :ref:`drain_timeout - // `. - // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled for downstream connections according to the value for - // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. - google.protobuf.Duration idle_timeout = 1; - - // The maximum duration of a connection. The duration is defined as a period since a connection - // was established. If not set, there is no max duration. When max_connection_duration is reached - // the connection will be closed. Drain sequence will occur prior to closing the connection if - // if's applicable. See :ref:`drain_timeout - // `. - // Note: not implemented for upstream connections. - google.protobuf.Duration max_connection_duration = 3; - - // The maximum number of headers. If unconfigured, the default - // maximum number of request headers allowed is 100. Requests that exceed this limit will receive - // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be - // reset independent of any other timeouts. If not specified, this value is not set. - google.protobuf.Duration max_stream_duration = 4; - - // Action to take when a client request with a header name containing underscore characters is received. - // If this setting is not specified, the value defaults to ALLOW. - // Note: upstream responses are not affected by this setting. - HeadersWithUnderscoresAction headers_with_underscores_action = 5; - - // Optional maximum requests for both upstream and downstream connections. - // If not specified, there is no limit. - // Setting this parameter to 1 will effectively disable keep alive. - // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. - google.protobuf.UInt32Value max_requests_per_connection = 6; -} - -// [#next-free-field: 8] -message Http1ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http1ProtocolOptions"; - - // [#next-free-field: 9] - message HeaderKeyFormat { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat"; - - message ProperCaseWords { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords"; - } - - oneof header_format { - option (validate.required) = true; - - // Formats the header by proper casing words: the first character and any character following - // a special character will be capitalized if it's an alpha character. For example, - // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". - // Note that while this results in most headers following conventional casing, certain headers - // are not covered. For example, the "TE" header will be formatted as "Te". - ProperCaseWords proper_case_words = 1; - - // Configuration for stateful formatter extensions that allow using received headers to - // affect the output of encoding headers. E.g., preserving case during proxying. - // [#extension-category: envoy.http.stateful_header_formatters] - TypedExtensionConfig stateful_formatter = 8; - } - } - - // Handle HTTP requests with absolute URLs in the requests. These requests - // are generally sent by clients to forward/explicit proxies. This allows clients to configure - // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - // *http_proxy* environment variable. - google.protobuf.BoolValue allow_absolute_url = 1; - - // Handle incoming HTTP/1.0 and HTTP 0.9 requests. - // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 - // style connect logic, dechunking, and handling lack of client host iff - // *default_host_for_http_10* is configured. - bool accept_http_10 = 2; - - // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as - // Envoy does not otherwise support HTTP/1.0 without a Host header. - // This is a no-op if *accept_http_10* is not true. - string default_host_for_http_10 = 3; - - // Describes how the keys for response headers should be formatted. By default, all header keys - // are lower cased. - HeaderKeyFormat header_key_format = 4; - - // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. - // - // .. attention:: - // - // Note that this only happens when Envoy is chunk encoding which occurs when: - // - The request is HTTP/1.1. - // - Is neither a HEAD only request nor a HTTP Upgrade. - // - Not a response to a HEAD request. - // - The content length header is not present. - bool enable_trailers = 5; - - // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding` - // headers set. By default such messages are rejected, but if option is enabled - Envoy will - // remove Content-Length header and process message. - // See `RFC7230, sec. 3.3.3 ` for details. - // - // .. attention:: - // Enabling this option might lead to request smuggling vulnerability, especially if traffic - // is proxied via multiple layers of proxies. - bool allow_chunked_length = 6; - - // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate - // HTTP/1.1 connections upon receiving an invalid HTTP message. However, - // when this option is true, then Envoy will leave the HTTP/1.1 connection - // open where possible. - // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // `. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; -} - -message KeepaliveSettings { - // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. - // If this is zero, interval PINGs will not be sent. - google.protobuf.Duration interval = 1 [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // How long to wait for a response to a keepalive PING. If a response is not received within this - // time period, the connection will be aborted. - google.protobuf.Duration timeout = 2 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // A random jitter amount as a percentage of interval that will be added to each interval. - // A value of zero means there will be no jitter. - // The default value is 15%. - type.v3.Percent interval_jitter = 3; - - // If the connection has been idle for this duration, send a HTTP/2 ping ahead - // of new stream creation, to quickly detect dead connections. - // If this is zero, this type of PING will not be sent. - // If an interval ping is outstanding, a second ping will not be sent as the - // interval ping will determine if the connection is dead. - google.protobuf.Duration connection_idle_interval = 4 - [(validate.rules).duration = {gte {nanos: 1000000}}]; -} - -// [#next-free-field: 16] -message Http2ProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http2ProtocolOptions"; - - // Defines a parameter to be sent in the SETTINGS frame. - // See `RFC7540, sec. 6.5.1 `_ for details. - message SettingsParameter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter"; - - // The 16 bit parameter identifier. - google.protobuf.UInt32Value identifier = 1 [ - (validate.rules).uint32 = {lte: 65535 gte: 0}, - (validate.rules).message = {required: true} - ]; - - // The 32 bit parameter value. - google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; - } - - // `Maximum table size `_ - // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values - // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header - // compression. - google.protobuf.UInt32Value hpack_table_size = 1; - - // `Maximum concurrent streams `_ - // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) - // and defaults to 2147483647. - // - // For upstream connections, this also limits how many streams Envoy will initiate concurrently - // on a single connection. If the limit is reached, Envoy may queue requests or establish - // additional connections (as allowed per circuit breaker limits). - // - // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given - // connection based on upstream settings. Config dumps will reflect the configured upper bound, - // not the per-connection negotiated limits. - google.protobuf.UInt32Value max_concurrent_streams = 2 - [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; - - // `Initial stream-level flow-control window - // `_ size. Valid values range from 65535 - // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 - // (256 * 1024 * 1024). - // - // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default - // window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the codec buffers. - google.protobuf.UInt32Value initial_stream_window_size = 3 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Similar to *initial_stream_window_size*, but for connection-level flow-control - // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. - google.protobuf.UInt32Value initial_connection_window_size = 4 - [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; - - // Allows proxying Websocket and other upgrades over H2 connect. - bool allow_connect = 5; - - // [#not-implemented-hide:] Hiding until envoy has full metadata support. - // Still under implementation. DO NOT USE. - // - // Allows metadata. See [metadata - // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more - // information. - bool allow_metadata = 6; - - // Limit the number of pending outbound downstream frames of all types (frames that are waiting to - // be written into the socket). Exceeding this limit triggers flood mitigation and connection is - // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due - // to flood mitigation. The default limit is 10000. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, - // preventing high memory utilization when receiving continuous stream of these frames. Exceeding - // this limit triggers flood mitigation and connection is terminated. The - // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood - // mitigation. The default limit is 1000. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; - - // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an - // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but - // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` - // stat tracks the number of connections terminated due to flood mitigation. - // Setting this to 0 will terminate connection upon receiving first frame with an empty payload - // and no end stream flag. The default limit is 1. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; - - // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number - // of PRIORITY frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // max_inbound_priority_frames_per_stream * (1 + opened_streams) - // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when - // Envoy receives complete response headers from the upstream server. For upstream connection the - // `opened_streams` is incremented when Envoy send the HEADERS frame for a new stream. The - // ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 100. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; - - // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number - // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // 5 + 2 * (opened_streams + - // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) - // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when - // Envoy receives complete response headers from the upstream server. For upstream connections the - // `opened_streams` is incremented when Envoy sends the HEADERS frame for a new stream. The - // ``http2.inbound_priority_frames_flood`` stat tracks the number of connections terminated due to - // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. - // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, - // but more complex implementations that try to estimate available bandwidth require at least 2. - // NOTE: flood and abuse mitigation for upstream connections is presently enabled by the - // `envoy.reloadable_features.upstream_http2_flood_checks` flag. - google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 - [(validate.rules).uint32 = {gte: 1}]; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging - // ` - // iff present. - // - // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message - // ` - // - // See `RFC7540, sec. 8.1 `_ for details. - bool stream_error_on_invalid_http_messaging = 12 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // ` - // - // See `RFC7540, sec. 8.1 `_ for details. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; - - // [#not-implemented-hide:] - // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: - // - // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by - // Envoy. - // - // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field - // 'allow_connect'. - // - // Note that custom parameters specified through this field can not also be set in the - // corresponding named parameters: - // - // .. code-block:: text - // - // ID Field Name - // ---------------- - // 0x1 hpack_table_size - // 0x3 max_concurrent_streams - // 0x4 initial_stream_window_size - // - // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies - // between custom parameters with the same identifier will trigger a failure. - // - // See `IANA HTTP/2 Settings - // `_ for - // standardized identifiers. - repeated SettingsParameter custom_settings_parameters = 13; - - // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer - // does not respond within the configured timeout, the connection will be aborted. - KeepaliveSettings connection_keepalive = 15; -} - -// [#not-implemented-hide:] -message GrpcProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.core.GrpcProtocolOptions"; - - Http2ProtocolOptions http2_protocol_options = 1; -} - -// A message which allows using HTTP/3. -message Http3ProtocolOptions { - QuicProtocolOptions quic_protocol_options = 1; - - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging - // `. - google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 2; -} - -// A message to control transformations to the :scheme header -message SchemeHeaderTransformation { - oneof transformation { - // Overwrite any Scheme header with the contents of this string. - string scheme_to_overwrite = 1 [(validate.rules).string = {in: "http" in: "https"}]; - } -} diff --git a/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto b/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto deleted file mode 100644 index 40b33f33ff5b3..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/proxy_protocol.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Proxy Protocol] - -message ProxyProtocolConfig { - enum Version { - // PROXY protocol version 1. Human readable format. - V1 = 0; - - // PROXY protocol version 2. Binary format. - V2 = 1; - } - - // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details - Version version = 1; -} diff --git a/generated_api_shadow/envoy/config/core/v3/resolver.proto b/generated_api_shadow/envoy/config/core/v3/resolver.proto deleted file mode 100644 index 21d40425f7a6b..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/resolver.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/address.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "ResolverProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Resolver] - -// Configuration of DNS resolver option flags which control the behavior of the DNS resolver. -message DnsResolverOptions { - // Use TCP for all DNS queries instead of the default protocol UDP. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only uses UDP for DNS resolution. - bool use_tcp_for_dns_lookups = 1; - - // Do not use the default search domains; only query hostnames as-is or as aliases. - bool no_default_search_domain = 2; -} - -// DNS resolution configuration which includes the underlying dns resolver addresses and options. -message DnsResolutionConfig { - // A list of dns resolver addresses. If specified, the DNS client library will perform resolution - // via the underlying DNS resolvers. Otherwise, the default system resolvers - // (e.g., /etc/resolv.conf) will be used. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple's API only allows overriding DNS resolvers via system settings. - repeated Address resolvers = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. - DnsResolverOptions dns_resolver_options = 2; -} diff --git a/generated_api_shadow/envoy/config/core/v3/socket_option.proto b/generated_api_shadow/envoy/config/core/v3/socket_option.proto deleted file mode 100644 index b22169b86aeb8..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/socket_option.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "SocketOptionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Socket Option ] - -// Generic socket option message. This would be used to set socket options that -// might not exist in upstream kernels or precompiled Envoy binaries. -// [#next-free-field: 7] -message SocketOption { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketOption"; - - enum SocketState { - // Socket options are applied after socket creation but before binding the socket to a port - STATE_PREBIND = 0; - - // Socket options are applied after binding the socket to a port but before calling listen() - STATE_BOUND = 1; - - // Socket options are applied after calling listen() - STATE_LISTENING = 2; - } - - // An optional name to give this socket option for debugging, etc. - // Uniqueness is not required and no special meaning is assumed. - string description = 1; - - // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - int64 level = 2; - - // The numeric name as passed to setsockopt - int64 name = 3; - - oneof value { - option (validate.required) = true; - - // Because many sockopts take an int value. - int64 int_value = 4; - - // Otherwise it's a byte buffer. - bytes buf_value = 5; - } - - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. - SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto b/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto deleted file mode 100644 index b2a1c5e13ee43..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/substitution_format_string.proto +++ /dev/null @@ -1,114 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; - -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "SubstitutionFormatStringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Substitution format string] - -// Configuration to use multiple :ref:`command operators ` -// to generate a new string in either plain text or JSON format. -// [#next-free-field: 7] -message SubstitutionFormatString { - oneof format { - option (validate.required) = true; - - // Specify a format with command operators to form a text string. - // Its details is described in :ref:`format string`. - // - // For example, setting ``text_format`` like below, - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // generates plain text similar to: - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - // Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field. - string text_format = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Specify a format with command operators to form a JSON string. - // Its details is described in :ref:`format dictionary`. - // Values are rendered as strings, numbers, or boolean values as appropriate. - // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). - // See the documentation for a specific command operator for details. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // json_format: - // status: "%RESPONSE_CODE%" - // message: "%LOCAL_REPLY_BODY%" - // - // The following JSON object would be created: - // - // .. code-block:: json - // - // { - // "status": 500, - // "message": "My error message" - // } - // - google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; - - // Specify a format with command operators to form a text string. - // Its details is described in :ref:`format string`. - // - // For example, setting ``text_format`` like below, - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format_source: - // inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // generates plain text similar to: - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - DataSource text_format_source = 5; - } - - // If set to true, when command operators are evaluated to null, - // - // * for ``text_format``, the output of the empty operator is changed from ``-`` to an - // empty string, so that empty values are omitted entirely. - // * for ``json_format`` the keys with null values are omitted in the output structure. - bool omit_empty_values = 3; - - // Specify a *content_type* field. - // If this field is not set then ``text/plain`` is used for *text_format* and - // ``application/json`` is used for *json_format*. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // content_type: "text/html; charset=UTF-8" - // - string content_type = 4; - - // Specifies a collection of Formatter plugins that can be called from the access log configuration. - // See the formatters extensions documentation for details. - // [#extension-category: envoy.formatter] - repeated TypedExtensionConfig formatters = 6; -} diff --git a/generated_api_shadow/envoy/config/core/v3/udp_socket_config.proto b/generated_api_shadow/envoy/config/core/v3/udp_socket_config.proto deleted file mode 100644 index 00033eabdb8af..0000000000000 --- a/generated_api_shadow/envoy/config/core/v3/udp_socket_config.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "UdpSocketConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: UDP socket config] - -// Generic UDP socket configuration. -message UdpSocketConfig { - // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate - // more memory per socket. Received datagrams above this size will be dropped. If not set - // defaults to 1500 bytes. - google.protobuf.UInt64Value max_rx_datagram_size = 1 - [(validate.rules).uint64 = {lt: 65536 gt: 0}]; - - // Configures whether Generic Receive Offload (GRO) - // _ is preferred when reading from the - // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. - // This option affects performance but not functionality. If GRO is not supported by the operating - // system, non-GRO receive will be used. - google.protobuf.BoolValue prefer_gro = 2; -} diff --git a/generated_api_shadow/envoy/config/endpoint/v3/BUILD b/generated_api_shadow/envoy/config/endpoint/v3/BUILD deleted file mode 100644 index 7cde9465f0911..0000000000000 --- a/generated_api_shadow/envoy/config/endpoint/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/endpoint:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto deleted file mode 100644 index afcaa41134c41..0000000000000 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; - -package envoy.config.endpoint.v3; - -import "envoy/config/endpoint/v3/endpoint_components.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; -option java_outer_classname = "EndpointProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Endpoint configuration] -// Endpoint discovery :ref:`architecture overview ` - -// Each route from RDS will map to a single cluster or traffic split across -// clusters using weights expressed in the RDS WeightedCluster. -// -// With EDS, each cluster is treated independently from a LB perspective, with -// LB taking place between the Localities within a cluster and at a finer -// granularity between the hosts within a locality. The percentage of traffic -// for each endpoint is determined by both its load_balancing_weight, and the -// load_balancing_weight of its locality. First, a locality will be selected, -// then an endpoint within that locality will be chose based on its weight. -// [#next-free-field: 6] -message ClusterLoadAssignment { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment"; - - // Load balancing policy settings. - // [#next-free-field: 6] - message Policy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ClusterLoadAssignment.Policy"; - - // [#not-implemented-hide:] - message DropOverload { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; - - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_len: 1}]; - - // Percentage of traffic that should be dropped for the category. - type.v3.FractionalPercent drop_percentage = 2; - } - - reserved 1; - - // Action to trim the overall incoming traffic to protect the upstream - // hosts. This action allows protection in case the hosts are unable to - // recover from an outage, or unable to autoscale or unable to handle - // incoming traffic volume for any reason. - // - // At the client each category is applied one after the other to generate - // the 'actual' drop percentage on all outgoing traffic. For example: - // - // .. code-block:: json - // - // { "drop_overloads": [ - // { "category": "throttle", "drop_percentage": 60 } - // { "category": "lb", "drop_percentage": 50 } - // ]} - // - // The actual drop percentages applied to the traffic at the clients will be - // "throttle"_drop = 60% - // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. - // actual_outgoing_load = 20% // remaining after applying all categories. - // [#not-implemented-hide:] - repeated DropOverload drop_overloads = 2; - - // Priority levels and localities are considered overprovisioned with this - // factor (in percentage). This means that we don't consider a priority - // level or locality unhealthy until the fraction of healthy hosts - // multiplied by the overprovisioning factor drops below 100. - // With the default value 140(1.4), Envoy doesn't consider a priority level - // or a locality unhealthy until their percentage of healthy hosts drops - // below 72%. For example: - // - // .. code-block:: json - // - // { "overprovisioning_factor": 100 } - // - // Read more at :ref:`priority levels ` and - // :ref:`localities `. - google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; - - // The max time until which the endpoints from this assignment can be used. - // If no new assignments are received before this time expires the endpoints - // are considered stale and should be marked unhealthy. - // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; - - bool hidden_envoy_deprecated_disable_overprovisioning = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Name of the cluster. This will be the :ref:`service_name - // ` value if specified - // in the cluster :ref:`EdsClusterConfig - // `. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // List of endpoints to load balance to. - repeated LocalityLbEndpoints endpoints = 2; - - // Map of named endpoints that can be referenced in LocalityLbEndpoints. - // [#not-implemented-hide:] - map named_endpoints = 5; - - // Load balancing policy settings. - Policy policy = 4; -} diff --git a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto b/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto deleted file mode 100644 index 1faf64e20c2c6..0000000000000 --- a/generated_api_shadow/envoy/config/endpoint/v3/endpoint_components.proto +++ /dev/null @@ -1,188 +0,0 @@ -syntax = "proto3"; - -package envoy.config.endpoint.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/health_check.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; -option java_outer_classname = "EndpointComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Endpoints] - -// Upstream host identifier. -message Endpoint { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.Endpoint"; - - // The optional health check configuration. - message HealthCheckConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.Endpoint.HealthCheckConfig"; - - // Optional alternative health check port value. - // - // By default the health check address port of an upstream host is the same - // as the host's serving address port. This provides an alternative health - // check port. Setting this with a non-zero value allows an upstream host - // to have different health check address port. - uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; - - // By default, the host header for L7 health checks is controlled by cluster level configuration - // (see: :ref:`host ` and - // :ref:`authority `). Setting this - // to a non-empty value allows overriding the cluster level configuration for a specific - // endpoint. - string hostname = 2; - } - - // The upstream host address. - // - // .. attention:: - // - // The form of host address depends on the given cluster type. For STATIC or EDS, - // it is expected to be a direct IP address (or something resolvable by the - // specified :ref:`resolver ` - // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, - // and will be resolved via DNS. - core.v3.Address address = 1; - - // The optional health check configuration is used as configuration for the - // health checker to contact the health checked host. - // - // .. attention:: - // - // This takes into effect only for upstream clusters with - // :ref:`active health checking ` enabled. - HealthCheckConfig health_check_config = 2; - - // The hostname associated with this endpoint. This hostname is not used for routing or address - // resolution. If provided, it will be associated with the endpoint, and can be used for features - // that require a hostname, like - // :ref:`auto_host_rewrite `. - string hostname = 3; -} - -// An Endpoint that Envoy can route traffic to. -// [#next-free-field: 6] -message LbEndpoint { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LbEndpoint"; - - // Upstream host identifier or a named reference. - oneof host_identifier { - Endpoint endpoint = 1; - - // [#not-implemented-hide:] - string endpoint_name = 5; - } - - // Optional health status when known and supplied by EDS server. - core.v3.HealthStatus health_status = 2; - - // The endpoint metadata specifies values that may be used by the load - // balancer to select endpoints in a cluster for a given request. The filter - // name should be specified as *envoy.lb*. An example boolean key-value pair - // is *canary*, providing the optional canary status of the upstream host. - // This may be matched against in a route's - // :ref:`RouteAction ` metadata_match field - // to subset the endpoints considered in cluster load balancing. - core.v3.Metadata metadata = 3; - - // The optional load balancing weight of the upstream host; at least 1. - // Envoy uses the load balancing weight in some of the built in load - // balancers. The load balancing weight for an endpoint is divided by the sum - // of the weights of all endpoints in the endpoint's locality to produce a - // percentage of traffic for the endpoint. This percentage is then further - // weighted by the endpoint's locality's load balancing weight from - // LocalityLbEndpoints. If unspecified, each host is presumed to have equal - // weight in a locality. The sum of the weights of all endpoints in the - // endpoint's locality must not exceed uint32_t maximal value (4294967295). - google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; -} - -// [#not-implemented-hide:] -// A configuration for a LEDS collection. -message LedsClusterLocalityConfig { - // Configuration for the source of LEDS updates for a Locality. - core.v3.ConfigSource leds_config = 1; - - // The xDS transport protocol glob collection resource name. - // The service is only supported in delta xDS (incremental) mode. - string leds_collection_name = 2; -} - -// A group of endpoints belonging to a Locality. -// One can have multiple LocalityLbEndpoints for a locality, but this is -// generally only done if the different groups need to have different load -// balancing weights or different priorities. -// [#next-free-field: 9] -message LocalityLbEndpoints { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.LocalityLbEndpoints"; - - // [#not-implemented-hide:] - // A list of endpoints of a specific locality. - message LbEndpointList { - repeated LbEndpoint lb_endpoints = 1; - } - - // Identifies location of where the upstream hosts run. - core.v3.Locality locality = 1; - - // The group of endpoints belonging to the locality specified. - // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be - // deprecated and replaced by *load_balancer_endpoints*.] - repeated LbEndpoint lb_endpoints = 2; - - // [#not-implemented-hide:] - oneof lb_config { - // The group of endpoints belonging to the locality. - // [#comment:TODO(adisuissa): Once LEDS is implemented the *lb_endpoints* field - // needs to be deprecated.] - LbEndpointList load_balancer_endpoints = 7; - - // LEDS Configuration for the current locality. - LedsClusterLocalityConfig leds_cluster_locality_config = 8; - } - - // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load - // balancing weight for a locality is divided by the sum of the weights of all - // localities at the same priority level to produce the effective percentage - // of traffic for the locality. The sum of the weights of all localities at - // the same priority level must not exceed uint32_t maximal value (4294967295). - // - // Locality weights are only considered when :ref:`locality weighted load - // balancing ` is - // configured. These weights are ignored otherwise. If no weights are - // specified when locality weighted load balancing is enabled, the locality is - // assigned no load. - google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Optional: the priority for this LocalityLbEndpoints. If unspecified this will - // default to the highest priority (0). - // - // Under usual circumstances, Envoy will only select endpoints for the highest - // priority (0). In the event all endpoints for a particular priority are - // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the - // next highest priority group. - // - // Priorities should range from 0 (highest) to N (lowest) without skipping. - uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; - - // Optional: Per locality proximity value which indicates how close this - // locality is from the source locality. This value only provides ordering - // information (lower the value, closer it is to the source locality). - // This will be consumed by load balancing schemes that need proximity order - // to determine where to route the requests. - // [#not-implemented-hide:] - google.protobuf.UInt32Value proximity = 6; -} diff --git a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto b/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto deleted file mode 100644 index c114fa726622d..0000000000000 --- a/generated_api_shadow/envoy/config/endpoint/v3/load_report.proto +++ /dev/null @@ -1,167 +0,0 @@ -syntax = "proto3"; - -package envoy.config.endpoint.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; -option java_outer_classname = "LoadReportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Load Report] - -// These are stats Envoy reports to the management server at a frequency defined by -// :ref:`LoadStatsResponse.load_reporting_interval`. -// Stats per upstream region/zone and optionally per subzone. -// [#next-free-field: 9] -message UpstreamLocalityStats { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.UpstreamLocalityStats"; - - // Name of zone, region and optionally endpoint group these metrics were - // collected from. Zone and region names could be empty if unknown. - core.v3.Locality locality = 1; - - // The total number of requests successfully completed by the endpoints in the - // locality. - uint64 total_successful_requests = 2; - - // The total number of unfinished requests - uint64 total_requests_in_progress = 3; - - // The total number of requests that failed due to errors at the endpoint, - // aggregated over all endpoints in the locality. - uint64 total_error_requests = 4; - - // The total number of requests that were issued by this Envoy since - // the last report. This information is aggregated over all the - // upstream endpoints in the locality. - uint64 total_issued_requests = 8; - - // Stats for multi-dimensional load balancing. - repeated EndpointLoadMetricStats load_metric_stats = 5; - - // Endpoint granularity stats information for this locality. This information - // is populated if the Server requests it by setting - // :ref:`LoadStatsResponse.report_endpoint_granularity`. - repeated UpstreamEndpointStats upstream_endpoint_stats = 7; - - // [#not-implemented-hide:] The priority of the endpoint group these metrics - // were collected from. - uint32 priority = 6; -} - -// [#next-free-field: 8] -message UpstreamEndpointStats { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.UpstreamEndpointStats"; - - // Upstream host address. - core.v3.Address address = 1; - - // Opaque and implementation dependent metadata of the - // endpoint. Envoy will pass this directly to the management server. - google.protobuf.Struct metadata = 6; - - // The total number of requests successfully completed by the endpoints in the - // locality. These include non-5xx responses for HTTP, where errors - // originate at the client and the endpoint responded successfully. For gRPC, - // the grpc-status values are those not covered by total_error_requests below. - uint64 total_successful_requests = 2; - - // The total number of unfinished requests for this endpoint. - uint64 total_requests_in_progress = 3; - - // The total number of requests that failed due to errors at the endpoint. - // For HTTP these are responses with 5xx status codes and for gRPC the - // grpc-status values: - // - // - DeadlineExceeded - // - Unimplemented - // - Internal - // - Unavailable - // - Unknown - // - DataLoss - uint64 total_error_requests = 4; - - // The total number of requests that were issued to this endpoint - // since the last report. A single TCP connection, HTTP or gRPC - // request or stream is counted as one request. - uint64 total_issued_requests = 7; - - // Stats for multi-dimensional load balancing. - repeated EndpointLoadMetricStats load_metric_stats = 5; -} - -message EndpointLoadMetricStats { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.EndpointLoadMetricStats"; - - // Name of the metric; may be empty. - string metric_name = 1; - - // Number of calls that finished and included this metric. - uint64 num_requests_finished_with_metric = 2; - - // Sum of metric values across all calls that finished with this metric for - // load_reporting_interval. - double total_metric_value = 3; -} - -// Per cluster load stats. Envoy reports these stats a management server in a -// :ref:`LoadStatsRequest` -// Next ID: 7 -// [#next-free-field: 7] -message ClusterStats { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.ClusterStats"; - - message DroppedRequests { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.endpoint.ClusterStats.DroppedRequests"; - - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_len: 1}]; - - // Total number of deliberately dropped requests for the category. - uint64 dropped_count = 2; - } - - // The name of the cluster. - string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The eds_cluster_config service_name of the cluster. - // It's possible that two clusters send the same service_name to EDS, - // in that case, the management server is supposed to do aggregation on the load reports. - string cluster_service_name = 6; - - // Need at least one. - repeated UpstreamLocalityStats upstream_locality_stats = 2 - [(validate.rules).repeated = {min_items: 1}]; - - // Cluster-level stats such as total_successful_requests may be computed by - // summing upstream_locality_stats. In addition, below there are additional - // cluster-wide stats. - // - // The total number of dropped requests. This covers requests - // deliberately dropped by the drop_overload policy and circuit breaking. - uint64 total_dropped_requests = 3; - - // Information about deliberately dropped requests for each category specified - // in the DropOverload policy. - repeated DroppedRequests dropped_requests = 5; - - // Period over which the actual load report occurred. This will be guaranteed to include every - // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy - // and the *LoadStatsResponse* message sent from the management server, this may be longer than - // the requested load reporting interval in the *LoadStatsResponse*. - google.protobuf.Duration load_report_interval = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/README.md b/generated_api_shadow/envoy/config/filter/README.md deleted file mode 100644 index 6ec297d6bc77b..0000000000000 --- a/generated_api_shadow/envoy/config/filter/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Protocol buffer definitions for filters. - -Visibility of the definitions should be constrained to none except for -shared definitions between explicitly enumerated filters (e.g. accesslog and fault definitions). diff --git a/generated_api_shadow/envoy/config/filter/accesslog/v2/BUILD b/generated_api_shadow/envoy/config/filter/accesslog/v2/BUILD deleted file mode 100644 index f7c626ac0e5a7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/accesslog/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto b/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto deleted file mode 100644 index 25d27bfbd1064..0000000000000 --- a/generated_api_shadow/envoy/config/filter/accesslog/v2/accesslog.proto +++ /dev/null @@ -1,256 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.accesslog.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.accesslog.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common access log types] - -message AccessLog { - // The name of the access log implementation to instantiate. The name must - // match a statically registered access log. Current built-in loggers include: - // - // #. "envoy.access_loggers.file" - // #. "envoy.access_loggers.http_grpc" - // #. "envoy.access_loggers.tcp_grpc" - string name = 1; - - // Filter which is used to determine if the access log needs to be written. - AccessLogFilter filter = 2; - - // Custom configuration that depends on the access log being instantiated. Built-in - // configurations include: - // - // #. "envoy.access_loggers.file": :ref:`FileAccessLog - // ` - // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig - // ` - // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig - // ` - oneof config_type { - google.protobuf.Struct config = 3 [deprecated = true]; - - google.protobuf.Any typed_config = 4; - } -} - -// [#next-free-field: 12] -message AccessLogFilter { - oneof filter_specifier { - option (validate.required) = true; - - // Status code filter. - StatusCodeFilter status_code_filter = 1; - - // Duration filter. - DurationFilter duration_filter = 2; - - // Not health check filter. - NotHealthCheckFilter not_health_check_filter = 3; - - // Traceable filter. - TraceableFilter traceable_filter = 4; - - // Runtime filter. - RuntimeFilter runtime_filter = 5; - - // And filter. - AndFilter and_filter = 6; - - // Or filter. - OrFilter or_filter = 7; - - // Header filter. - HeaderFilter header_filter = 8; - - // Response flag filter. - ResponseFlagFilter response_flag_filter = 9; - - // gRPC status filter. - GrpcStatusFilter grpc_status_filter = 10; - - // Extension filter. - ExtensionFilter extension_filter = 11; - } -} - -// Filter on an integer comparison. -message ComparisonFilter { - enum Op { - // = - EQ = 0; - - // >= - GE = 1; - - // <= - LE = 2; - } - - // Comparison operator. - Op op = 1 [(validate.rules).enum = {defined_only: true}]; - - // Value to compare against. - api.v2.core.RuntimeUInt32 value = 2; -} - -// Filters on HTTP response/status code. -message StatusCodeFilter { - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters on total request duration in milliseconds. -message DurationFilter { - // Comparison. - ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; -} - -// Filters for requests that are not health check requests. A health check -// request is marked by the health check filter. -message NotHealthCheckFilter { -} - -// Filters for requests that are traceable. See the tracing overview for more -// information on how a request becomes traceable. -message TraceableFilter { -} - -// Filters for random sampling of requests. -message RuntimeFilter { - // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - // If found in runtime, this value will replace the default numerator. - string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. - type.FractionalPercent percent_sampled = 2; - - // By default, sampling pivots on the header - // :ref:`x-request-id` being present. If - // :ref:`x-request-id` is present, the filter will - // consistently sample across multiple hosts based on the runtime key value and the value - // extracted from :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - // on the runtime key value alone. *use_independent_randomness* can be used for logging kill - // switches within complex nested :ref:`AndFilter - // ` and :ref:`OrFilter - // ` blocks that are easier to reason about - // from a probability perspective (i.e., setting to true will cause the filter to behave like - // an independent random variable when composed within logical operator filters). - bool use_independent_randomness = 3; -} - -// Performs a logical “and” operation on the result of each filter in filters. -// Filters are evaluated sequentially and if one of them returns false, the -// filter returns false immediately. -message AndFilter { - repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// Performs a logical “or” operation on the result of each individual filter. -// Filters are evaluated sequentially and if one of them returns true, the -// filter returns true immediately. -message OrFilter { - repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; -} - -// Filters requests based on the presence or value of a request header. -message HeaderFilter { - // Only requests with a header which matches the specified HeaderMatcher will pass the filter - // check. - api.v2.route.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; -} - -// Filters requests that received responses with an Envoy response flag set. -// A list of the response flags can be found -// in the access log formatter :ref:`documentation`. -message ResponseFlagFilter { - // Only responses with the any of the flags listed in this field will be logged. - // This field is optional. If it is not specified, then any response flag will pass - // the filter check. - repeated string flags = 1 [(validate.rules).repeated = { - items { - string { - in: "LH" - in: "UH" - in: "UT" - in: "LR" - in: "UR" - in: "UF" - in: "UC" - in: "UO" - in: "NR" - in: "DI" - in: "FI" - in: "RL" - in: "UAEX" - in: "RLSE" - in: "DC" - in: "URX" - in: "SI" - in: "IH" - in: "DPE" - } - } - }]; -} - -// Filters gRPC requests based on their response status. If a gRPC status is not provided, the -// filter will infer the status from the HTTP status code. -message GrpcStatusFilter { - enum Status { - OK = 0; - CANCELED = 1; - UNKNOWN = 2; - INVALID_ARGUMENT = 3; - DEADLINE_EXCEEDED = 4; - NOT_FOUND = 5; - ALREADY_EXISTS = 6; - PERMISSION_DENIED = 7; - RESOURCE_EXHAUSTED = 8; - FAILED_PRECONDITION = 9; - ABORTED = 10; - OUT_OF_RANGE = 11; - UNIMPLEMENTED = 12; - INTERNAL = 13; - UNAVAILABLE = 14; - DATA_LOSS = 15; - UNAUTHENTICATED = 16; - } - - // Logs only responses that have any one of the gRPC statuses in this field. - repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; - - // If included and set to true, the filter will instead block all responses with a gRPC status or - // inferred gRPC status enumerated in statuses, and allow all other responses. - bool exclude = 2; -} - -// Extension filter is statically registered at runtime. -message ExtensionFilter { - // The name of the filter implementation to instantiate. The name must - // match a statically registered filter. - string name = 1; - - // Custom configuration that depends on the filter being instantiated. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} diff --git a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto b/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto deleted file mode 100644 index 2e35bb7f7c5b3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/dubbo/router/v2alpha1/router.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.dubbo.router.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.dubbo_proxy.router.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Router] -// Dubbo router :ref:`configuration overview `. - -message Router { -} diff --git a/generated_api_shadow/envoy/config/filter/fault/v2/BUILD b/generated_api_shadow/envoy/config/filter/fault/v2/BUILD deleted file mode 100644 index 29613b4c3487b..0000000000000 --- a/generated_api_shadow/envoy/config/filter/fault/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto b/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto deleted file mode 100644 index 016140d10f84a..0000000000000 --- a/generated_api_shadow/envoy/config/filter/fault/v2/fault.proto +++ /dev/null @@ -1,87 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.fault.v2; - -import "envoy/type/percent.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.fault.v2"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.common.fault.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common fault injection types] - -// Delay specification is used to inject latency into the -// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. -// [#next-free-field: 6] -message FaultDelay { - enum FaultDelayType { - // Unused and deprecated. - FIXED = 0; - } - - // Fault delays are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderDelay { - } - - reserved 2; - - // Unused and deprecated. Will be removed in the next release. - FaultDelayType type = 1 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - oneof fault_delay_secifier { - option (validate.required) = true; - - // Add a fixed delay before forwarding the operation upstream. See - // https://developers.google.com/protocol-buffers/docs/proto3#json for - // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified - // delay will be injected before a new request/operation. For TCP - // connections, the proxying of the connection upstream will be delayed - // for the specified period. This is required if type is FIXED. - google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; - - // Fault delays are controlled via an HTTP header (if applicable). - HeaderDelay header_delay = 5; - } - - // The percentage of operations/connections/requests on which the delay will be injected. - type.FractionalPercent percentage = 4; -} - -// Describes a rate limit to be applied. -message FaultRateLimit { - // Describes a fixed/constant rate limit. - message FixedLimit { - // The limit supplied in KiB/s. - uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}]; - } - - // Rate limits are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderLimit { - } - - oneof limit_type { - option (validate.required) = true; - - // A fixed rate limit. - FixedLimit fixed_limit = 1; - - // Rate limits are controlled via an HTTP header (if applicable). - HeaderLimit header_limit = 3; - } - - // The percentage of operations/connections/requests on which the rate limit will be injected. - type.FractionalPercent percentage = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD deleted file mode 100644 index 2ffbc958786b3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto b/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto deleted file mode 100644 index 6860b6d6ef2b4..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto +++ /dev/null @@ -1,94 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.adaptive_concurrency.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha"; -option java_outer_classname = "AdaptiveConcurrencyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.adaptive_concurrency.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Adaptive Concurrency] -// Adaptive Concurrency Control :ref:`configuration overview -// `. -// [#extension: envoy.filters.http.adaptive_concurrency] - -// Configuration parameters for the gradient controller. -message GradientControllerConfig { - // Parameters controlling the periodic recalculation of the concurrency limit from sampled request - // latencies. - message ConcurrencyLimitCalculationParams { - // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000. - google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}]; - - // The period of time samples are taken to recalculate the concurrency limit. - google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = { - required: true - gt {} - }]; - } - - // Parameters controlling the periodic minRTT recalculation. - // [#next-free-field: 6] - message MinimumRTTCalculationParams { - // The time interval between recalculating the minimum request round-trip time. - google.protobuf.Duration interval = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // The number of requests to aggregate/sample during the minRTT recalculation window before - // updating. Defaults to 50. - google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}]; - - // Randomized time delta that will be introduced to the start of the minRTT calculation window. - // This is represented as a percentage of the interval duration. Defaults to 15%. - // - // Example: If the interval is 10s and the jitter is 15%, the next window will begin - // somewhere in the range (10s - 11.5s). - type.Percent jitter = 3; - - // The concurrency limit set while measuring the minRTT. Defaults to 3. - google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}]; - - // Amount added to the measured minRTT to add stability to the concurrency limit during natural - // variability in latency. This is expressed as a percentage of the measured value and can be - // adjusted to allow more or less tolerance to the sampled latency values. - // - // Defaults to 25%. - type.Percent buffer = 5; - } - - // The percentile to use when summarizing aggregated samples. Defaults to p50. - type.Percent sample_aggregate_percentile = 1; - - ConcurrencyLimitCalculationParams concurrency_limit_params = 2 - [(validate.rules).message = {required: true}]; - - MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}]; -} - -message AdaptiveConcurrency { - oneof concurrency_controller_config { - option (validate.required) = true; - - // Gradient concurrency control will be used. - GradientControllerConfig gradient_controller_config = 1 - [(validate.rules).message = {required: true}]; - } - - // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the - // message is unspecified, the filter will be enabled. - api.v2.core.RuntimeFeatureFlag enabled = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto b/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto deleted file mode 100644 index 43823286286a3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.aws_lambda.v2alpha; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.aws_lambda.v2alpha"; -option java_outer_classname = "AwsLambdaProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.aws_lambda.v3"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: AWS Lambda] -// AWS Lambda :ref:`configuration overview `. -// [#extension: envoy.filters.http.aws_lambda] - -// AWS Lambda filter config -message Config { - enum InvocationMode { - // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In - // this mode the output of the Lambda function becomes the response of the HTTP request. - SYNCHRONOUS = 0; - - // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be - // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the - // call which is translated to an HTTP 200 OK by the filter. - ASYNCHRONOUS = 1; - } - - // The ARN of the AWS Lambda to invoke when the filter is engaged - // Must be in the following format: - // arn::lambda:::function: - string arn = 1 [(validate.rules).string = {min_len: 1}]; - - // Whether to transform the request (headers and body) to a JSON payload or pass it as is. - bool payload_passthrough = 2; - - // Determines the way to invoke the Lambda function. - InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; -} - -// Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different -// version of the same Lambda depending on the route. -message PerRouteConfig { - Config invoke_config = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto b/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto deleted file mode 100644 index 5ebb92c01dfa8..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.aws_request_signing.v2alpha; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.aws_request_signing.v2alpha"; -option java_outer_classname = "AwsRequestSigningProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.aws_request_signing.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: AwsRequestSigning] -// AwsRequestSigning :ref:`configuration overview `. -// [#extension: envoy.filters.http.aws_request_signing] - -// Top level configuration for the AWS request signing filter. -message AwsRequestSigning { - // The `service namespace - // `_ - // of the HTTP endpoint. - // - // Example: s3 - string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The `region `_ hosting the HTTP - // endpoint. - // - // Example: us-west-2 - string region = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Indicates that before signing headers, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both signing and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for signing whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite = 3; -} diff --git a/generated_api_shadow/envoy/config/filter/http/buffer/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/buffer/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/buffer/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto b/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto deleted file mode 100644 index 56961d22fe092..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/buffer/v2/buffer.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.buffer.v2; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v2"; -option java_outer_classname = "BufferProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.buffer.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Buffer] -// Buffer :ref:`configuration overview `. -// [#extension: envoy.filters.http.buffer] - -message Buffer { - reserved 2; - - // The maximum request size that the filter will buffer before the connection - // manager will stop buffering and return a 413 response. - google.protobuf.UInt32Value max_request_bytes = 1 - [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}]; -} - -message BufferPerRoute { - oneof override { - option (validate.required) = true; - - // Disable the buffer filter for this particular vhost or route. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Override the global configuration of the filter with this new config. - Buffer buffer = 2 [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/BUILD deleted file mode 100644 index 5cbf4e821fc81..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/route:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto b/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto deleted file mode 100644 index 98035c05d45a6..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/cache/v2alpha/cache.proto +++ /dev/null @@ -1,77 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.cache.v2alpha; - -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.cache.v2alpha"; -option java_outer_classname = "CacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.cache.v3alpha"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP Cache Filter] -// [#extension: envoy.filters.http.cache] - -message CacheConfig { - // [#not-implemented-hide:] - // Modifies cache key creation by restricting which parts of the URL are included. - message KeyCreatorParams { - // If true, exclude the URL scheme from the cache key. Set to true if your origins always - // produce the same response for http and https requests. - bool exclude_scheme = 1; - - // If true, exclude the host from the cache key. Set to true if your origins' responses don't - // ever depend on host. - bool exclude_host = 2; - - // If *query_parameters_included* is nonempty, only query parameters matched - // by one or more of its matchers are included in the cache key. Any other - // query params will not affect cache lookup. - repeated api.v2.route.QueryParameterMatcher query_parameters_included = 3; - - // If *query_parameters_excluded* is nonempty, query parameters matched by one - // or more of its matchers are excluded from the cache key (even if also - // matched by *query_parameters_included*), and will not affect cache lookup. - repeated api.v2.route.QueryParameterMatcher query_parameters_excluded = 4; - } - - // Config specific to the cache storage implementation. - google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - - // List of matching rules that defines allowed *Vary* headers. - // - // The *vary* response header holds a list of header names that affect the - // contents of a response, as described by - // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. - // - // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't matched by any rules in - // *allowed_vary_headers*, that response will not be cached. - // - // During lookup, *allowed_vary_headers* controls what request headers will be - // sent to the cache storage implementation. - repeated type.matcher.StringMatcher allowed_vary_headers = 2; - - // [#not-implemented-hide:] - // - // - // Modifies cache key creation by restricting which parts of the URL are included. - KeyCreatorParams key_creator_params = 3; - - // [#not-implemented-hide:] - // - // - // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache - // storage implementation may have its own limit beyond which it will reject insertions). - uint32 max_body_bytes = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/http/compressor/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/compressor/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/compressor/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto b/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto deleted file mode 100644 index d62d0d7a42fab..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/compressor/v2/compressor.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.compressor.v2; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.compressor.v2"; -option java_outer_classname = "CompressorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.compressor.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Compressor] - -// [#next-free-field: 6] -message Compressor { - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value content_length = 1; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" - // and their synonyms. - repeated string content_type = 2; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - bool disable_on_etag_header = 3; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // .. attention: - // - // To avoid interfering with other compression filters in the same chain use this option in - // the filter closest to the upstream. - bool remove_accept_encoding_header = 4; - - // Runtime flag that controls whether the filter is enabled or not. If set to false, the - // filter will operate as a pass-through filter. If not specified, defaults to enabled. - api.v2.core.RuntimeFeatureFlag runtime_enabled = 5; -} diff --git a/generated_api_shadow/envoy/config/filter/http/cors/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/cors/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/cors/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto b/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto deleted file mode 100644 index 9060a9c38fda1..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/cors/v2/cors.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.cors.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.cors.v2"; -option java_outer_classname = "CorsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.cors.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Cors] -// CORS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.cors] - -// Cors filter config. -message Cors { -} diff --git a/generated_api_shadow/envoy/config/filter/http/csrf/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/csrf/v2/BUILD deleted file mode 100644 index aaab1df155473..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/csrf/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto b/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto deleted file mode 100644 index 3c2c9110e9fe0..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/csrf/v2/csrf.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.csrf.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/matcher/string.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2"; -option java_outer_classname = "CsrfProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.csrf.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: CSRF] -// Cross-Site Request Forgery :ref:`configuration overview `. -// [#extension: envoy.filters.http.csrf] - -// CSRF filter config. -message CsrfPolicy { - // Specifies the % of requests for which the CSRF filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - api.v2.core.RuntimeFractionalPercent filter_enabled = 1 - [(validate.rules).message = {required: true}]; - - // Specifies that CSRF policies will be evaluated and tracked, but not enforced. - // - // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* and *Destination* to determine if it's valid, but will not - // enforce any policies. - api.v2.core.RuntimeFractionalPercent shadow_enabled = 2; - - // Specifies additional source origins that will be allowed in addition to - // the destination origin. - // - // More information on how this can be configured via runtime can be found - // :ref:`here `. - repeated type.matcher.StringMatcher additional_origins = 3; -} diff --git a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD deleted file mode 100644 index 25c228fd56093..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto deleted file mode 100644 index 436bb6bf46160..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ /dev/null @@ -1,61 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.dynamic_forward_proxy.v2alpha; - -import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha"; -option java_outer_classname = "DynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.dynamic_forward_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamic forward proxy] - -// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.filters.http.dynamic_forward_proxy] -message FilterConfig { - // The DNS cache configuration that the filter will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy cluster configuration - // `. - common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; -} - -// Per route Configuration for the dynamic forward proxy HTTP filter. -message PerRouteConfig { - oneof host_rewrite_specifier { - // Indicates that before DNS lookup, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for DNS lookups whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite = 1 [(udpa.annotations.field_migrate).rename = "host_rewrite_literal"]; - - // Indicates that before DNS lookup, the host header will be swapped with - // the value of this header. If not set or empty, the original host header - // value will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite header ` - // given that the value set here would be used for DNS lookups whereas the value set in the HCM - // would be used for host header forwarding which is not the desired outcome. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string auto_host_rewrite_header = 2 - [(udpa.annotations.field_migrate).rename = "host_rewrite_header"]; - } -} diff --git a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/dynamo/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto b/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto deleted file mode 100644 index 011d22f768c8c..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/dynamo/v2/dynamo.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.dynamo.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.dynamo.v2"; -option java_outer_classname = "DynamoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.dynamo.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamo] -// Dynamo :ref:`configuration overview `. -// [#extension: envoy.filters.http.dynamo] - -// Dynamo filter config. -message Dynamo { -} diff --git a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/BUILD deleted file mode 100644 index 74e703c963cb6..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto deleted file mode 100644 index b9a807d82edb2..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ /dev/null @@ -1,234 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.ext_authz.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/grpc_service.proto"; -import "envoy/api/v2/core/http_uri.proto"; -import "envoy/type/http_status.proto"; -import "envoy/type/matcher/string.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.ext_authz.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: External Authorization] -// External Authorization :ref:`configuration overview `. -// [#extension: envoy.filters.http.ext_authz] - -// [#next-free-field: 12] -message ExtAuthz { - // External authorization service configuration. - oneof services { - // gRPC service configuration (default timeout: 200ms). - api.v2.core.GrpcService grpc_service = 1; - - // HTTP service configuration (default timeout: 200ms). - HttpService http_service = 3; - } - - // Changes filter's behaviour on errors: - // - // 1. When set to true, the filter will *accept* client request even if the communication with - // the authorization service has failed, or if the authorization service has returned a HTTP 5xx - // error. - // - // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* - // response if the communication with the authorization service has failed, or if the - // authorization service has returned a HTTP 5xx error. - // - // Note that errors can be *always* tracked in the :ref:`stats - // `. - bool failure_mode_allow = 2; - - // [#not-implemented-hide: Support for this field has been removed.] - bool use_alpha = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Enables filter to buffer the client request body and send it within the authorization request. - // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization - // request message indicating if the body data is partial. - BufferSettings with_request_body = 5; - - // Clears route cache in order to allow the external authorization service to correctly affect - // routing decisions. Filter clears all cached routes when: - // - // 1. The field is set to *true*. - // - // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. - // - // 3. At least one *authorization response header* is added to the client request, or is used for - // altering another client request header. - // - bool clear_route_cache = 6; - - // Sets the HTTP status that is returned to the client when there is a network error between the - // filter and the authorization server. The default status is HTTP 403 Forbidden. - type.HttpStatus status_on_error = 7; - - // Specifies a list of metadata namespaces whose values, if present, will be passed to the - // ext_authz service as an opaque *protobuf::Struct*. - // - // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata - // ` is set, - // then the following will pass the jwt payload to the authorization server. - // - // .. code-block:: yaml - // - // metadata_context_namespaces: - // - envoy.filters.http.jwt_authn - // - repeated string metadata_context_namespaces = 8; - - // Specifies if the filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // If this field is not specified, the filter will be enabled for all requests. - api.v2.core.RuntimeFractionalPercent filter_enabled = 9; - - // Specifies whether to deny the requests, when the filter is disabled. - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to determine whether to deny request for - // filter protected path at filter disabling. If filter is disabled in - // typed_per_filter_config for the path, requests will not be denied. - // - // If this field is not specified, all requests will be allowed when disabled. - api.v2.core.RuntimeFeatureFlag deny_at_disable = 11; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 10; -} - -// Configuration for buffering the request data. -message BufferSettings { - // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return - // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number - // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow - // `. - uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; - - // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. - // The authorization request will be dispatched and no 413 HTTP error will be returned by the - // filter. - bool allow_partial_message = 2; -} - -// HttpService is used for raw HTTP communication between the filter and the authorization service. -// When configured, the filter will parse the client request and use these attributes to call the -// authorization server. Depending on the response, the filter may reject or accept the client -// request. Note that in any of these events, metadata can be added, removed or overridden by the -// filter: -// -// *On authorization request*, a list of allowed request headers may be supplied. See -// :ref:`allowed_headers -// ` -// for details. Additional headers metadata may be added to the authorization request. See -// :ref:`headers_to_add -// ` for -// details. -// -// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and -// additional headers metadata may be added to the original client request. See -// :ref:`allowed_upstream_headers -// ` -// for details. -// -// On other authorization response statuses, the filter will not allow traffic. Additional headers -// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers -// ` -// for details. -// [#next-free-field: 9] -message HttpService { - reserved 3, 4, 5, 6; - - // Sets the HTTP server URI which the authorization requests must be sent to. - api.v2.core.HttpUri server_uri = 1; - - // Sets a prefix to the value of authorization request header *Path*. - string path_prefix = 2; - - // Settings used for controlling authorization request metadata. - AuthorizationRequest authorization_request = 7; - - // Settings used for controlling authorization response metadata. - AuthorizationResponse authorization_response = 8; -} - -message AuthorizationRequest { - // Authorization request will include the client request headers that have a correspondent match - // in the :ref:`list `. Note that in addition to the - // user's supplied matchers: - // - // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. - // - // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have - // a message body. However, the authorization request can include the buffered client request body - // (controlled by :ref:`with_request_body - // ` setting), - // consequently the value of *Content-Length* of the authorization request reflects the size of - // its payload size. - // - type.matcher.ListStringMatcher allowed_headers = 1; - - // Sets a list of headers that will be included to the request to authorization service. Note that - // client request of the same key will be overridden. - repeated api.v2.core.HeaderValue headers_to_add = 2; -} - -message AuthorizationResponse { - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the original client request. - // Note that coexistent headers will be overridden. - type.matcher.ListStringMatcher allowed_upstream_headers = 1; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that when this list is *not* set, all the authorization response headers, except *Authority - // (Host)* will be in the response to the client. When a header is included in this list, *Path*, - // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. - type.matcher.ListStringMatcher allowed_client_headers = 2; -} - -// Extra settings on a per virtualhost/route/weighted-cluster level. -message ExtAuthzPerRoute { - oneof override { - option (validate.required) = true; - - // Disable the ext auth filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Check request settings for this route. - CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; - } -} - -// Extra settings for the check request. You can use this to provide extra context for the -// external authorization server on specific virtual hosts \ routes. For example, adding a context -// extension on the virtual host level can give the ext-authz server information on what virtual -// host is used without needing to parse the host header. If CheckSettings is specified in multiple -// per-filter-configs, they will be merged in order, and the result will be used. -message CheckSettings { - // Context extensions to set on the CheckRequest's - // :ref:`AttributeContext.context_extensions` - // - // Merge semantics for this field are such that keys from more specific configs override. - // - // .. note:: - // - // These settings are only applied to a filter configured with a - // :ref:`grpc_service`. - map context_extensions = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/http/fault/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/fault/v2/BUILD deleted file mode 100644 index df4feab714ff4..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/fault/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/route:pkg", - "//envoy/config/filter/fault/v2:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto b/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto deleted file mode 100644 index cb99b0d71bbdc..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/fault/v2/fault.proto +++ /dev/null @@ -1,129 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.fault.v2; - -import "envoy/api/v2/route/route_components.proto"; -import "envoy/config/filter/fault/v2/fault.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v2"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.fault.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Fault Injection] -// Fault Injection :ref:`configuration overview `. -// [#extension: envoy.filters.http.fault] - -message FaultAbort { - // Fault aborts are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderAbort { - } - - reserved 1; - - oneof error_type { - option (validate.required) = true; - - // HTTP status code to use to abort the HTTP request. - uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // Fault aborts are controlled via an HTTP header (if applicable). - HeaderAbort header_abort = 4; - } - - // The percentage of requests/operations/connections that will be aborted with the error code - // provided. - type.FractionalPercent percentage = 3; -} - -// [#next-free-field: 14] -message HTTPFault { - // If specified, the filter will inject delays based on the values in the - // object. - filter.fault.v2.FaultDelay delay = 1; - - // If specified, the filter will abort requests based on the values in - // the object. At least *abort* or *delay* must be specified. - FaultAbort abort = 2; - - // Specifies the name of the (destination) upstream cluster that the - // filter should match on. Fault injection will be restricted to requests - // bound to the specific upstream cluster. - string upstream_cluster = 3; - - // Specifies a set of headers that the filter should match on. The fault - // injection filter can be applied selectively to requests that match a set of - // headers specified in the fault filter config. The chances of actual fault - // injection further depend on the value of the :ref:`percentage - // ` field. - // The filter will check the request's headers against all the specified - // headers in the filter config. A match will happen if all the headers in the - // config are present in the request with the same values (or based on - // presence if the *value* field is not in the config). - repeated api.v2.route.HeaderMatcher headers = 4; - - // Faults are injected for the specified list of downstream hosts. If this - // setting is not set, faults are injected for all downstream nodes. - // Downstream node name is taken from :ref:`the HTTP - // x-envoy-downstream-service-node - // ` header and compared - // against downstream_nodes list. - repeated string downstream_nodes = 5; - - // The maximum number of faults that can be active at a single time via the configured fault - // filter. Note that because this setting can be overridden at the route level, it's possible - // for the number of active faults to be greater than this value (if injected via a different - // route). If not specified, defaults to unlimited. This setting can be overridden via - // `runtime ` and any faults that are not injected - // due to overflow will be indicated via the `faults_overflow - // ` stat. - // - // .. attention:: - // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy - // limit. It's possible for the number of active faults to rise slightly above the configured - // amount due to the implementation details. - google.protobuf.UInt32Value max_active_faults = 6; - - // The response rate limit to be applied to the response body of the stream. When configured, - // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent - // ` runtime key. - // - // .. attention:: - // This is a per-stream limit versus a connection level limit. This means that concurrent streams - // will each get an independent limit. - filter.fault.v2.FaultRateLimit response_rate_limit = 7; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_delay_percent - string delay_percent_runtime = 8; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.abort_percent - string abort_percent_runtime = 9; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_duration_ms - string delay_duration_runtime = 10; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.http_status - string abort_http_status_runtime = 11; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.max_active_faults - string max_active_faults_runtime = 12; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.rate_limit.response_percent - string response_rate_limit_percent_runtime = 13; -} diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto b/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto deleted file mode 100644 index b4331dad5031c..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.grpc_http1_bridge.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_bridge.v2"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_http1_bridge.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC HTTP/1.1 Bridge] -// gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_http1_bridge] - -// gRPC HTTP/1.1 Bridge filter config. -message Config { -} diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto b/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto deleted file mode 100644 index 8b916d327e194..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] -// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview -// `. -// [#extension: envoy.filters.http.grpc_http1_reverse_bridge] - -// gRPC reverse bridge filter configuration -message FilterConfig { - // The content-type to pass to the upstream when the gRPC bridge filter is applied. - // The filter will also validate that the upstream responds with the same content type. - string content_type = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If true, Envoy will assume that the upstream doesn't understand gRPC frames and - // strip the gRPC frame from the request, and add it back in to the response. This will - // hide the gRPC semantics from the upstream, allowing it to receive and respond with a - // simple binary encoded protobuf. - bool withhold_grpc_frames = 2; -} - -// gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level. -message FilterConfigPerRoute { - // If true, disables gRPC reverse bridge filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto b/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto deleted file mode 100644 index fea48e6bb64f9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_stats/v2alpha/config.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.grpc_stats.v2alpha; - -import "envoy/api/v2/core/grpc_method_list.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_stats.v2alpha"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_stats.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC statistics] gRPC statistics filter -// :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_stats] - -// gRPC statistics filter configuration -message FilterConfig { - // If true, the filter maintains a filter state object with the request and response message - // counts. - bool emit_filter_state = 1; - - oneof per_method_stat_specifier { - // If set, specifies an allowlist of service/methods that will have individual stats - // emitted for them. Any call that does not match the allowlist will be counted - // in a stat with no method specifier: `cluster..grpc.*`. - api.v2.core.GrpcMethodList individual_method_stats_allowlist = 2; - - // If set to true, emit stats for all service/method names. - // - // If set to false, emit stats for all service/message types to the same stats without including - // the service/method in the name, with prefix `cluster..grpc`. This can be useful if - // service/method granularity is not needed, or if each cluster only receives a single method. - // - // .. attention:: - // This option is only safe if all clients are trusted. If this option is enabled - // with untrusted clients, the clients could cause unbounded growth in the number of stats in - // Envoy, using unbounded memory and potentially slowing down stats pipelines. - // - // .. attention:: - // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the - // behavior will default to `stats_for_all_methods=false`. This default value is changed due - // to the previous value being deprecated. This behavior can be changed with runtime override - // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. - google.protobuf.BoolValue stats_for_all_methods = 3; - } -} - -// gRPC statistics filter state object in protobuf form. -message FilterObject { - // Count of request messages in the request stream. - uint64 request_message_count = 1; - - // Count of response messages in the response stream. - uint64 response_message_count = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto b/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto deleted file mode 100644 index be23b4d87b585..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/grpc_web/v2/grpc_web.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.grpc_web.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_web.v2"; -option java_outer_classname = "GrpcWebProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_web.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC Web] -// gRPC Web :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_web] - -// gRPC Web filter config. -message GrpcWeb { -} diff --git a/generated_api_shadow/envoy/config/filter/http/gzip/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/gzip/v2/BUILD deleted file mode 100644 index 9cb0d12934218..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/gzip/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/filter/http/compressor/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto b/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto deleted file mode 100644 index 0c134c6208b15..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/gzip/v2/gzip.proto +++ /dev/null @@ -1,96 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.gzip.v2; - -import "envoy/config/filter/http/compressor/v2/compressor.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v2"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.gzip.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Gzip] - -// [#next-free-field: 11] -message Gzip { - enum CompressionStrategy { - DEFAULT = 0; - FILTERED = 1; - HUFFMAN = 2; - RLE = 3; - } - - message CompressionLevel { - enum Enum { - DEFAULT = 0; - BEST = 1; - SPEED = 2; - } - } - - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; - - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - google.protobuf.UInt32Value content_length = 2 [deprecated = true]; - - // A value used for selecting the zlib compression level. This setting will affect speed and - // amount of compression applied to the content. "BEST" provides higher compression at the cost of - // higher latency, "SPEED" provides lower compression with minimum impact on response time. - // "DEFAULT" provides an optimal result between speed and compression. This field will be set to - // "DEFAULT" if not specified. - CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; - - // A value used for selecting the zlib compression strategy which is directly related to the - // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though - // there are situations which changing this parameter might produce better results. For example, - // run-length encoding (RLE) is typically used when the content is known for having sequences - // which same data occurs many consecutive times. For more information about each strategy, please - // refer to zlib manual. - CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml". - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - repeated string content_type = 6 [deprecated = true]; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - bool disable_on_etag_header = 7 [deprecated = true]; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // .. attention: - // - // **This field is deprecated**. Set the `compressor` field instead. - bool remove_accept_encoding_header = 8 [deprecated = true]; - - // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 12 - // which will produce a 4096 bytes window. For more details about this parameter, please refer to - // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Set of configuration parameters common for all compression filters. If this field is set then - // the fields `content_length`, `content_type`, `disable_on_etag_header` and - // `remove_accept_encoding_header` are ignored. - compressor.v2.Compressor compressor = 10; -} diff --git a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto b/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto deleted file mode 100644 index 30de69d98b1c3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto +++ /dev/null @@ -1,100 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.header_to_metadata.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2"; -option java_outer_classname = "HeaderToMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.header_to_metadata.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Header-To-Metadata Filter] -// -// The configuration for transforming headers into metadata. This is useful -// for matching load balancer subsets, logging, etc. -// -// Header to Metadata :ref:`configuration overview `. -// [#extension: envoy.filters.http.header_to_metadata] - -message Config { - enum ValueType { - STRING = 0; - - NUMBER = 1; - - // The value is a serialized `protobuf.Value - // `_. - PROTOBUF_VALUE = 2; - } - - // ValueEncode defines the encoding algorithm. - enum ValueEncode { - // The value is not encoded. - NONE = 0; - - // The value is encoded in `Base64 `_. - // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the - // non-ASCII characters in the header. - BASE64 = 1; - } - - // [#next-free-field: 6] - message KeyValuePair { - // The namespace — if this is empty, the filter's namespace will be used. - string metadata_namespace = 1; - - // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The value to pair with the given key. - // - // When used for a `on_header_present` case, if value is non-empty it'll be used - // instead of the header value. If both are empty, no metadata is added. - // - // When used for a `on_header_missing` case, a non-empty value must be provided - // otherwise no metadata is added. - string value = 3; - - // The value's type — defaults to string. - ValueType type = 4; - - // How is the value encoded, default is NONE (not encoded). - // The value will be decoded accordingly before storing to metadata. - ValueEncode encode = 5; - } - - // A Rule defines what metadata to apply when a header is present or missing. - message Rule { - // The header that triggers this rule — required. - string header = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If the header is present, apply this metadata KeyValuePair. - // - // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header value. - KeyValuePair on_header_present = 2; - - // If the header is not present, apply this metadata KeyValuePair. - // - // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header value. - KeyValuePair on_header_missing = 3; - - // Whether or not to remove the header after a rule is applied. - // - // This prevents headers from leaking. - bool remove = 4; - } - - // The list of rules to apply to requests. - repeated Rule request_rules = 1; - - // The list of rules to apply to responses. - repeated Rule response_rules = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/health_check/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/health_check/v2/BUILD deleted file mode 100644 index 22fc8fd458e61..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/health_check/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/route:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto b/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto deleted file mode 100644 index 7f2a486b26188..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/health_check/v2/health_check.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.health_check.v2; - -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.health_check.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Health check] -// Health check :ref:`configuration overview `. -// [#extension: envoy.filters.http.health_check] - -// [#next-free-field: 6] -message HealthCheck { - reserved 2; - - // Specifies whether the filter operates in pass through mode or not. - google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; - - // If operating in pass through mode, the amount of time in milliseconds - // that the filter should cache the upstream response. - google.protobuf.Duration cache_time = 3; - - // If operating in non-pass-through mode, specifies a set of upstream cluster - // names and the minimum percentage of servers in each of those clusters that - // must be healthy or degraded in order for the filter to return a 200. - // - // .. note:: - // - // This value is interpreted as an integer by truncating, so 12.50% will be calculated - // as if it were 12%. - map cluster_min_healthy_percentages = 4; - - // Specifies a set of health check request headers to match on. The health check filter will - // check a request’s headers against all the specified headers. To specify the health check - // endpoint, set the ``:path`` header to match on. - repeated api.v2.route.HeaderMatcher headers = 5; -} diff --git a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto deleted file mode 100644 index f99b18a12c716..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.ip_tagging.v2; - -import "envoy/api/v2/core/address.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v2"; -option java_outer_classname = "IpTaggingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.ip_tagging.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: IP tagging] -// IP tagging :ref:`configuration overview `. -// [#extension: envoy.filters.http.ip_tagging] - -message IPTagging { - // The type of requests the filter should apply to. The supported types - // are internal, external or both. The - // :ref:`x-forwarded-for` header is - // used to determine if a request is internal and will result in - // :ref:`x-envoy-internal` - // being set. The filter defaults to both, and it will apply to all request types. - enum RequestType { - // Both external and internal requests will be tagged. This is the default value. - BOTH = 0; - - // Only internal requests will be tagged. - INTERNAL = 1; - - // Only external requests will be tagged. - EXTERNAL = 2; - } - - // Supplies the IP tag name and the IP address subnets. - message IPTag { - // Specifies the IP tag name to apply. - string ip_tag_name = 1; - - // A list of IP address subnets that will be tagged with - // ip_tag_name. Both IPv4 and IPv6 are supported. - repeated api.v2.core.CidrRange ip_list = 2; - } - - // The type of request the filter should apply to. - RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. - // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] - // The set of IP tags for the filter. - repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/BUILD deleted file mode 100644 index 1e485f4e158ab..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/README.md b/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/README.md deleted file mode 100644 index d253c3f234a88..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# JWT Authentication HTTP filter config - -## Overview - -1. The proto file in this folder defines an HTTP filter config for "jwt_authn" filter. - -2. This filter will verify the JWT in the HTTP request as: - - The signature should be valid - - JWT should not be expired - - Issuer and audiences are valid and specified in the filter config. - -3. [JWK](https://tools.ietf.org/html/rfc7517#appendix-A) is needed to verify JWT signature. It can be fetched from a remote server or read from a local file. If the JWKS is fetched remotely, it will be cached by the filter. - -3. If a JWT is valid, the user is authenticated and the request will be forwarded to the backend server. If a JWT is not valid, the request will be rejected with an error message. - -## The locations to extract JWT - -JWT will be extracted from the HTTP headers or query parameters. The default location is the HTTP header: -``` -Authorization: Bearer -``` -The next default location is in the query parameter as: -``` -?access_token= -``` - -If a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT. - -## HTTP header to pass successfully verified JWT - -If a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64url-encoded JWT payload in JSON. - - -## Further header options - -In addition to the `name` field, which specifies the HTTP header name, -the `from_headers` section can specify an optional `value_prefix` value, as in: - -```yaml - from_headers: - - name: bespoke - value_prefix: jwt_value -``` - -The above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`. - -Any non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped, -and all following, contiguous, JWT-legal chars will be taken as the JWT. - -This means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`: - -```text -bespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk - -bespoke: {"jwt_value": "eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk"} - -bespoke: beta:true,jwt_value:"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk",trace=1234 -``` - -The header `name` may be `Authorization`. - -The `value_prefix` must match exactly, i.e., case-sensitively. -If the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token. - -If there are no JWT-legal characters after the `value_prefix`, the entire string after it -is taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser. diff --git a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto deleted file mode 100644 index 07044f92201e9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ /dev/null @@ -1,500 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.jwt_authn.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/http_uri.proto"; -import "envoy/api/v2/route/route_components.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.jwt_authn.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: JWT Authentication] -// JWT Authentication :ref:`configuration overview `. -// [#extension: envoy.filters.http.jwt_authn] - -// Please see following for JWT authentication flow: -// -// * `JSON Web Token (JWT) `_ -// * `The OAuth 2.0 Authorization Framework `_ -// * `OpenID Connect `_ -// -// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: -// -// * issuer: the principal that issues the JWT. It has to match the one from the token. -// * allowed audiences: the ones in the token have to be listed here. -// * how to fetch public key JWKS to verify the token signature. -// * how to extract JWT token in the request. -// * how to pass successfully verified token payload. -// -// Example: -// -// .. code-block:: yaml -// -// issuer: https://example.com -// audiences: -// - bookstore_android.apps.googleusercontent.com -// - bookstore_web.apps.googleusercontent.com -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// cache_duration: -// seconds: 300 -// -// [#next-free-field: 10] -message JwtProvider { - // Specify the `principal `_ that issued - // the JWT, usually a URL or an email address. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com - // - string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The list of JWT `audiences `_ are - // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, - // will not check audiences in the token. - // - // Example: - // - // .. code-block:: yaml - // - // audiences: - // - bookstore_android.apps.googleusercontent.com - // - bookstore_web.apps.googleusercontent.com - // - repeated string audiences = 2; - - // `JSON Web Key Set (JWKS) `_ is needed to - // validate signature of a JWT. This field specifies where to fetch JWKS. - oneof jwks_source_specifier { - option (validate.required) = true; - - // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP - // URI and how the fetched JWKS should be cached. - // - // Example: - // - // .. code-block:: yaml - // - // remote_jwks: - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // cache_duration: - // seconds: 300 - // - RemoteJwks remote_jwks = 3; - - // JWKS is in local data source. It could be either in a local file or embedded in the - // inline_string. - // - // Example: local file - // - // .. code-block:: yaml - // - // local_jwks: - // filename: /etc/envoy/jwks/jwks1.txt - // - // Example: inline_string - // - // .. code-block:: yaml - // - // local_jwks: - // inline_string: ACADADADADA - // - api.v2.core.DataSource local_jwks = 4; - } - - // If false, the JWT is removed in the request after a success verification. If true, the JWT is - // not removed in the request. Default value is false. - bool forward = 5; - - // Two fields below define where to extract the JWT from an HTTP request. - // - // If no explicit location is specified, the following default locations are tried in order: - // - // 1. The Authorization header using the `Bearer schema - // `_. Example:: - // - // Authorization: Bearer . - // - // 2. `access_token `_ query parameter. - // - // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations - // its provider specified or from the default locations. - // - // Specify the HTTP headers to extract JWT token. For examples, following config: - // - // .. code-block:: yaml - // - // from_headers: - // - name: x-goog-iap-jwt-assertion - // - // can be used to extract token from header:: - // - // ``x-goog-iap-jwt-assertion: ``. - // - repeated JwtHeader from_headers = 6; - - // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. - // - // For example, if config is: - // - // .. code-block:: yaml - // - // from_params: - // - jwt_token - // - // The JWT format in query parameter is:: - // - // /path?jwt_token= - // - repeated string from_params = 7; - - // This field specifies the header name to forward a successfully verified JWT payload to the - // backend. The forwarded data is:: - // - // base64url_encoded(jwt_payload_in_JSON) - // - // If it is not specified, the payload will not be forwarded. - string forward_payload_header = 8; - - // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata - // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** - // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* - // and the value is the *protobuf::Struct* converted from JWT JSON payload. - // - // For example, if payload_in_metadata is *my_payload*: - // - // .. code-block:: yaml - // - // envoy.filters.http.jwt_authn: - // my_payload: - // iss: https://example.com - // sub: test@example.com - // aud: https://example.com - // exp: 1501281058 - // - string payload_in_metadata = 9; -} - -// This message specifies how to fetch JWKS from remote and how to cache it. -message RemoteJwks { - // The HTTP URI to fetch the JWKS. For example: - // - // .. code-block:: yaml - // - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // - api.v2.core.HttpUri http_uri = 1; - - // Duration after which the cached JWKS should be expired. If not specified, default cache - // duration is 5 minutes. - google.protobuf.Duration cache_duration = 2; -} - -// This message specifies a header location to extract JWT token. -message JwtHeader { - // The HTTP header name. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The value prefix. The value format is "value_prefix" - // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the - // end. - string value_prefix = 2; -} - -// Specify a required provider with audiences. -message ProviderWithAudiences { - // Specify a required provider name. - string provider_name = 1; - - // This field overrides the one specified in the JwtProvider. - repeated string audiences = 2; -} - -// This message specifies a Jwt requirement. An empty message means JWT verification is not -// required. Here are some config examples: -// -// .. code-block:: yaml -// -// # Example 1: not required with an empty message -// -// # Example 2: require A -// provider_name: provider-A -// -// # Example 3: require A or B -// requires_any: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 4: require A and B -// requires_all: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 5: require A and (B or C) -// requires_all: -// requirements: -// - provider_name: provider-A -// - requires_any: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 6: require A or (B and C) -// requires_any: -// requirements: -// - provider_name: provider-A -// - requires_all: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows -// missing token.) -// requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// -// # Example 8: A is optional and B is required. -// requires_all: -// requirements: -// - requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// - provider_name: provider-B -// -// [#next-free-field: 7] -message JwtRequirement { - oneof requires_type { - // Specify a required provider name. - string provider_name = 1; - - // Specify a required provider with audiences. - ProviderWithAudiences provider_and_audiences = 2; - - // Specify list of JwtRequirement. Their results are OR-ed. - // If any one of them passes, the result is passed. - JwtRequirementOrList requires_any = 3; - - // Specify list of JwtRequirement. Their results are AND-ed. - // All of them must pass, if one of them fails or missing, it fails. - JwtRequirementAndList requires_all = 4; - - // The requirement is always satisfied even if JWT is missing or the JWT - // verification fails. A typical usage is: this filter is used to only verify - // JWTs and pass the verified JWT payloads to another filter, the other filter - // will make decision. In this mode, all JWT tokens will be verified. - google.protobuf.Empty allow_missing_or_failed = 5; - - // The requirement is satisfied if JWT is missing, but failed if JWT is - // presented but invalid. Similar to allow_missing_or_failed, this is used - // to only verify JWTs and pass the verified payload to another filter. The - // different is this mode will reject requests with invalid tokens. - google.protobuf.Empty allow_missing = 6; - } -} - -// This message specifies a list of RequiredProvider. -// Their results are OR-ed; if any one of them passes, the result is passed -message JwtRequirementOrList { - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a list of RequiredProvider. -// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. -message JwtRequirementAndList { - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a Jwt requirement for a specific Route condition. -// Example 1: -// -// .. code-block:: yaml -// -// - match: -// prefix: /healthz -// -// In above example, "requires" field is empty for /healthz prefix match, -// it means that requests matching the path prefix don't require JWT authentication. -// -// Example 2: -// -// .. code-block:: yaml -// -// - match: -// prefix: / -// requires: { provider_name: provider-A } -// -// In above example, all requests matched the path prefix require jwt authentication -// from "provider-A". -message RequirementRule { - // The route matching parameter. Only when the match is satisfied, the "requires" field will - // apply. - // - // For example: following match will match all requests. - // - // .. code-block:: yaml - // - // match: - // prefix: / - // - api.v2.route.RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. - JwtRequirement requires = 2; -} - -// This message specifies Jwt requirements based on stream_info.filterState. -// This FilterState should use `Router::StringAccessor` object to set a string value. -// Other HTTP filters can use it to specify Jwt requirements dynamically. -// -// Example: -// -// .. code-block:: yaml -// -// name: jwt_selector -// requires: -// issuer_1: -// provider_name: issuer1 -// issuer_2: -// provider_name: issuer2 -// -// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, -// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. -message FilterStateRule { - // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // A map of string keys to requirements. The string key is the string value - // in the FilterState with the name specified in the *name* field above. - map requires = 3; -} - -// This is the Envoy HTTP filter config for JWT authentication. -// -// For example: -// -// .. code-block:: yaml -// -// providers: -// provider1: -// issuer: issuer1 -// audiences: -// - audience1 -// - audience2 -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// provider2: -// issuer: issuer2 -// local_jwks: -// inline_string: jwks_string -// -// rules: -// # Not jwt verification is required for /health path -// - match: -// prefix: /health -// -// # Jwt verification for provider1 is required for path prefixed with "prefix" -// - match: -// prefix: /prefix -// requires: -// provider_name: provider1 -// -// # Jwt verification for either provider1 or provider2 is required for all other requests. -// - match: -// prefix: / -// requires: -// requires_any: -// requirements: -// - provider_name: provider1 -// - provider_name: provider2 -// -message JwtAuthentication { - // Map of provider names to JwtProviders. - // - // .. code-block:: yaml - // - // providers: - // provider1: - // issuer: issuer1 - // audiences: - // - audience1 - // - audience2 - // remote_jwks: - // http_uri: - // uri: https://example.com/.well-known/jwks.json - // cluster: example_jwks_cluster - // provider2: - // issuer: provider2 - // local_jwks: - // inline_string: jwks_string - // - map providers = 1; - - // Specifies requirements based on the route matches. The first matched requirement will be - // applied. If there are overlapped match conditions, please put the most specific match first. - // - // Examples - // - // .. code-block:: yaml - // - // rules: - // - match: - // prefix: /healthz - // - match: - // prefix: /baz - // requires: - // provider_name: provider1 - // - match: - // prefix: /foo - // requires: - // requires_any: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - match: - // prefix: /bar - // requires: - // requires_all: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - repeated RequirementRule rules = 2; - - // This message specifies Jwt requirements based on stream_info.filterState. - // Other HTTP filters can use it to specify Jwt requirements dynamically. - // The *rules* field above is checked first, if it could not find any matches, - // check this one. - FilterStateRule filter_state_rules = 3; - - // When set to true, bypass the `CORS preflight request - // `_ regardless of JWT - // requirements specified in the rules. - bool bypass_cors_preflight = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/http/lua/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/lua/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/lua/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto b/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto deleted file mode 100644 index 068b5e255df5d..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/lua/v2/lua.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.lua.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v2"; -option java_outer_classname = "LuaProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.lua.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Lua] -// Lua :ref:`configuration overview `. -// [#extension: envoy.filters.http.lua] - -message Lua { - // The Lua code that Envoy will execute. This can be a very small script that - // further loads code from disk if desired. Note that if JSON configuration is used, the code must - // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line - // strings so complex scripts can be easily expressed inline in the configuration. - string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/on_demand/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto b/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto deleted file mode 100644 index 4c5aadf442cf9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/on_demand/v2/on_demand.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.on_demand.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.on_demand.v2"; -option java_outer_classname = "OnDemandProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.on_demand.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: OnDemand] -// IP tagging :ref:`configuration overview `. -// [#extension: envoy.filters.http.on_demand] - -message OnDemand { -} diff --git a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto b/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto deleted file mode 100644 index 8dfb4354d238f..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/original_src/v2alpha1/original_src.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.original_src.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1"; -option java_outer_classname = "OriginalSrcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.original_src.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Original Src Filter] -// Use the Original source address on upstream connections. - -// The Original Src filter binds upstream connections to the original source address determined -// for the request. This address could come from something like the Proxy Protocol filter, or it -// could come from trusted http headers. -// [#extension: envoy.filters.http.original_src] -message OriginalSrc { - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to - // ensure that non-local addresses may be routed back through envoy when binding to the original - // source address. The option will not be applied if the mark is 0. - uint32 mark = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/BUILD deleted file mode 100644 index 5b66057a82cd7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto deleted file mode 100644 index b9361476bcfde..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/rate_limit/v2/rate_limit.proto +++ /dev/null @@ -1,66 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.rate_limit.v2; - -import "envoy/config/ratelimit/v2/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v2"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.ratelimit] - -// [#next-free-field: 8] -message RateLimit { - // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Specifies the rate limit configurations to be applied with the same - // stage number. If not set, the default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The type of requests the filter should apply to. The supported - // types are *internal*, *external* or *both*. A request is considered internal if - // :ref:`x-envoy-internal` is set to true. If - // :ref:`x-envoy-internal` is not set or false, a - // request is considered external. The filter defaults to *both*, and it will apply to all request - // types. - string request_type = 3 - [(validate.rules).string = {in: "internal" in: "external" in: "both" in: ""}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 5; - - // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead - // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The - // HTTP code will be 200 for a gRPC response. - bool rate_limited_as_resource_exhausted = 6; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/http/rbac/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/rbac/v2/BUILD deleted file mode 100644 index 90082d083a3f6..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/rbac/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto b/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto deleted file mode 100644 index 87d76a8f913ea..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/rbac/v2/rbac.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.rbac.v2; - -import "envoy/config/rbac/v2/rbac.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v2"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.rbac.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.http.rbac] - -// RBAC filter config. -message RBAC { - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - config.rbac.v2.RBAC rules = 1; - - // Shadow rules are not enforced by the filter (i.e., returning a 403) - // but will emit stats and logs and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v2.RBAC shadow_rules = 2; -} - -message RBACPerRoute { - reserved 1; - - // Override the global configuration of the filter with this new config. - // If absent, the global RBAC policy will be disabled for this route. - RBAC rbac = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/http/router/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/router/v2/BUILD deleted file mode 100644 index 4b7ccc42a6ca4..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/router/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/filter/accesslog/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto b/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto deleted file mode 100644 index c95500cf8168b..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/router/v2/router.proto +++ /dev/null @@ -1,79 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.router.v2; - -import "envoy/config/filter/accesslog/v2/accesslog.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.router.v2"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.router.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Router] -// Router :ref:`configuration overview `. -// [#extension: envoy.filters.http.router] - -// [#next-free-field: 7] -message Router { - // Whether the router generates dynamic cluster statistics. Defaults to - // true. Can be disabled in high performance scenarios. - google.protobuf.BoolValue dynamic_stats = 1; - - // Whether to start a child span for egress routed calls. This can be - // useful in scenarios where other filters (auth, ratelimit, etc.) make - // outbound calls and have child spans rooted at the same ingress - // parent. Defaults to false. - bool start_child_span = 2; - - // Configuration for HTTP upstream logs emitted by the router. Upstream logs - // are configured in the same way as access logs, but each log entry represents - // an upstream request. Presuming retries are configured, multiple upstream - // requests may be made for each downstream (inbound) request. - repeated accesslog.v2.AccessLog upstream_log = 3; - - // Do not add any additional *x-envoy-* headers to requests or responses. This - // only affects the :ref:`router filter generated *x-envoy-* headers - // `, other Envoy filters and the HTTP - // connection manager may continue to set *x-envoy-* headers. - bool suppress_envoy_headers = 4; - - // Specifies a list of HTTP headers to strictly validate. Envoy will reject a - // request and respond with HTTP status 400 if the request contains an invalid - // value for any of the headers listed in this field. Strict header checking - // is only supported for the following headers: - // - // Value must be a ','-delimited list (i.e. no spaces) of supported retry - // policy values: - // - // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` - // * :ref:`config_http_filters_router_x-envoy-retry-on` - // - // Value must be an integer: - // - // * :ref:`config_http_filters_router_x-envoy-max-retries` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - repeated string strict_check_headers = 5 [(validate.rules).repeated = { - items { - string { - in: "x-envoy-upstream-rq-timeout-ms" - in: "x-envoy-upstream-rq-per-try-timeout-ms" - in: "x-envoy-max-retries" - in: "x-envoy-retry-grpc-on" - in: "x-envoy-retry-on" - } - } - }]; - - // If not set, ingress Envoy will ignore - // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress - // Envoy, when deriving timeout for upstream cluster. - bool respect_expected_rq_timeout = 6; -} diff --git a/generated_api_shadow/envoy/config/filter/http/squash/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/squash/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/squash/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto b/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto deleted file mode 100644 index a7ae625d2ee37..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/squash/v2/squash.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.squash.v2; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v2"; -option java_outer_classname = "SquashProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.squash.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Squash] -// Squash :ref:`configuration overview `. -// [#extension: envoy.filters.http.squash] - -// [#next-free-field: 6] -message Squash { - // The name of the cluster that hosts the Squash server. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // When the filter requests the Squash server to create a DebugAttachment, it will use this - // structure as template for the body of the request. It can contain reference to environment - // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server - // with more information to find the process to attach the debugger to. For example, in a - // Istio/k8s environment, this will contain information on the pod: - // - // .. code-block:: json - // - // { - // "spec": { - // "attachment": { - // "pod": "{{ POD_NAME }}", - // "namespace": "{{ POD_NAMESPACE }}" - // }, - // "match_request": true - // } - // } - // - // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) - google.protobuf.Struct attachment_template = 2; - - // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second. - google.protobuf.Duration request_timeout = 3; - - // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60 - // seconds. - google.protobuf.Duration attachment_timeout = 4; - - // Amount of time to poll for the status of the attachment object in the Squash server - // (to check if has been attached). Defaults to 1 second. - google.protobuf.Duration attachment_poll_period = 5; -} diff --git a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/BUILD deleted file mode 100644 index cf02fc6c0b1f9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/common/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto b/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto deleted file mode 100644 index 3f984cec0d6c3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/tap/v2alpha/tap.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.tap.v2alpha; - -import "envoy/config/common/tap/v2alpha/common.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.tap.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap] -// Tap :ref:`configuration overview `. -// [#extension: envoy.filters.http.tap] - -// Top level configuration for the tap filter. -message Tap { - // Common configuration for the HTTP tap filter. - common.tap.v2alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/BUILD b/generated_api_shadow/envoy/config/filter/http/transcoder/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto b/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto deleted file mode 100644 index ac6d7eefa78a0..0000000000000 --- a/generated_api_shadow/envoy/config/filter/http/transcoder/v2/transcoder.proto +++ /dev/null @@ -1,159 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.http.transcoder.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v2"; -option java_outer_classname = "TranscoderProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.http.grpc_json_transcoder.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC-JSON transcoder] -// gRPC-JSON transcoder :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_json_transcoder] - -// [#next-free-field: 10] -message GrpcJsonTranscoder { - message PrintOptions { - // Whether to add spaces, line breaks and indentation to make the JSON - // output easy to read. Defaults to false. - bool add_whitespace = 1; - - // Whether to always print primitive fields. By default primitive - // fields with default values will be omitted in JSON output. For - // example, an int32 field set to 0 will be omitted. Setting this flag to - // true will override the default behavior and print primitive fields - // regardless of their values. Defaults to false. - bool always_print_primitive_fields = 2; - - // Whether to always print enums as ints. By default they are rendered - // as strings. Defaults to false. - bool always_print_enums_as_ints = 3; - - // Whether to preserve proto field names. By default protobuf will - // generate JSON field names using the ``json_name`` option, or lower camel case, - // in that order. Setting this flag will preserve the original field names. Defaults to false. - bool preserve_proto_field_names = 4; - } - - oneof descriptor_set { - option (validate.required) = true; - - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - string proto_descriptor = 1; - - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - bytes proto_descriptor_bin = 4; - } - - // A list of strings that - // supplies the fully qualified service names (i.e. "package_name.service_name") that - // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, - // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than - // the service names specified here, but they won't be translated. - repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; - - // Control options for response JSON. These options are passed directly to - // `JsonPrintOptions `_. - PrintOptions print_options = 3; - - // Whether to keep the incoming request route after the outgoing headers have been transformed to - // the match the upstream gRPC service. Note: This means that routes for gRPC services that are - // not transcoded cannot be used in combination with *match_incoming_request_route*. - bool match_incoming_request_route = 5; - - // A list of query parameters to be ignored for transcoding method mapping. - // By default, the transcoder filter will not transcode a request if there are any - // unknown/invalid query parameters. - // - // Example : - // - // .. code-block:: proto - // - // service Bookstore { - // rpc GetShelf(GetShelfRequest) returns (Shelf) { - // option (google.api.http) = { - // get: "/shelves/{shelf}" - // }; - // } - // } - // - // message GetShelfRequest { - // int64 shelf = 1; - // } - // - // message Shelf {} - // - // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable - // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow - // the same request to be mapped to ``GetShelf``. - repeated string ignored_query_parameters = 6; - - // Whether to route methods without the ``google.api.http`` option. - // - // Example : - // - // .. code-block:: proto - // - // package bookstore; - // - // service Bookstore { - // rpc GetShelf(GetShelfRequest) returns (Shelf) {} - // } - // - // message GetShelfRequest { - // int64 shelf = 1; - // } - // - // message Shelf {} - // - // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of - // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. - bool auto_mapping = 7; - - // Whether to ignore query parameters that cannot be mapped to a corresponding - // protobuf field. Use this if you cannot control the query parameters and do - // not know them beforehand. Otherwise use ``ignored_query_parameters``. - // Defaults to false. - bool ignore_unknown_query_parameters = 8; - - // Whether to convert gRPC status headers to JSON. - // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` - // from the ``grpc-status-details-bin`` header and use it as JSON body. - // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and - // ``grpc-message`` headers. - // The error details types must be present in the ``proto_descriptor``. - // - // For example, if an upstream server replies with headers: - // - // .. code-block:: none - // - // grpc-status: 5 - // grpc-status-details-bin: - // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ - // - // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message - // ``google.rpc.Status``. It will be transcoded into: - // - // .. code-block:: none - // - // HTTP/1.1 404 Not Found - // content-type: application/json - // - // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} - // - // In order to transcode the message, the ``google.rpc.RequestInfo`` type from - // the ``google/rpc/error_details.proto`` should be included in the configured - // :ref:`proto descriptor set `. - bool convert_grpc_status = 9; -} diff --git a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/BUILD b/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto b/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto deleted file mode 100644 index 0496207e09bcb..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.http_inspector.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.http_inspector.v2"; -option java_outer_classname = "HttpInspectorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.http_inspector.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP Inspector Filter] -// Detect whether the application protocol is HTTP. -// [#extension: envoy.filters.listener.http_inspector] - -message HttpInspector { -} diff --git a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/BUILD b/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto b/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto deleted file mode 100644 index fa4acee45fc11..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/original_dst/v2/original_dst.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.original_dst.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.original_dst.v2"; -option java_outer_classname = "OriginalDstProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.original_dst.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Original Dst Filter] -// Use the Original destination address on downstream connections. -// [#extension: envoy.filters.listener.original_dst] - -message OriginalDst { -} diff --git a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto b/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto deleted file mode 100644 index f9ddb98e745c7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.original_src.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1"; -option java_outer_classname = "OriginalSrcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.original_src.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Original Src Filter] -// Use the Original source address on upstream connections. -// [#extension: envoy.filters.listener.original_src] - -// The Original Src filter binds upstream connections to the original source address determined -// for the connection. This address could come from something like the Proxy Protocol filter, or it -// could come from trusted http headers. -message OriginalSrc { - // Whether to bind the port to the one used in the original downstream connection. - // [#not-implemented-hide:] - bool bind_port = 1; - - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to - // ensure that non-local addresses may be routed back through envoy when binding to the original - // source address. The option will not be applied if the mark is 0. - uint32 mark = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/BUILD b/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto b/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto deleted file mode 100644 index cabffb9fc0c05..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.proxy_protocol.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.proxy_protocol.v2"; -option java_outer_classname = "ProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.proxy_protocol.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Proxy Protocol Filter] -// PROXY protocol listener filter. -// [#extension: envoy.filters.listener.proxy_protocol] - -message ProxyProtocol { -} diff --git a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/BUILD b/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto b/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto deleted file mode 100644 index 7ab679c47dc57..0000000000000 --- a/generated_api_shadow/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.listener.tls_inspector.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.listener.tls_inspector.v2"; -option java_outer_classname = "TlsInspectorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.listener.tls_inspector.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: TLS Inspector Filter] -// Allows detecting whether the transport appears to be TLS or plaintext. -// [#extension: envoy.filters.listener.tls_inspector] - -message TlsInspector { -} diff --git a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto deleted file mode 100644 index 4da6d97ca2992..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.client_ssl_auth.v2; - -import "envoy/api/v2/core/address.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2"; -option java_outer_classname = "ClientSslAuthProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.client_ssl_auth.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Client TLS authentication] -// Client TLS authentication -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.client_ssl_auth] - -message ClientSSLAuth { - // The :ref:`cluster manager ` cluster that runs - // the authentication service. The filter will connect to the service every 60s to fetch the list - // of principals. The service must support the expected :ref:`REST API - // `. - string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Time in milliseconds between principal refreshes from the - // authentication service. Default is 60000 (60s). The actual fetch time - // will be this value plus a random jittered value between - // 0-refresh_delay_ms milliseconds. - google.protobuf.Duration refresh_delay = 3; - - // An optional list of IP address and subnet masks that should be white - // listed for access by the filter. If no list is provided, there is no - // IP allowlist. - repeated api.v2.core.CidrRange ip_white_list = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/direct_response/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto b/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto deleted file mode 100644 index 15de7e3b55379..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/direct_response/v2/config.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.direct_response.v2; - -import "envoy/api/v2/core/base.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.direct_response.v2"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.direct_response.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Direct response] -// Direct response :ref:`configuration overview `. -// [#extension: envoy.filters.network.direct_response] - -message Config { - // Response data as a data source. - api.v2.core.DataSource response = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD deleted file mode 100644 index 5fe475a5dcf8d..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/route:pkg", - "//envoy/type:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md deleted file mode 100644 index c83caca1f8f4d..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md +++ /dev/null @@ -1 +0,0 @@ -Protocol buffer definitions for the Dubbo proxy. diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto deleted file mode 100644 index 47248932f94ce..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto +++ /dev/null @@ -1,66 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.dubbo_proxy.v2alpha1; - -import "envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; -option java_outer_classname = "DubboProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.dubbo_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dubbo Proxy] -// Dubbo Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.dubbo_proxy] - -// Dubbo Protocol types supported by Envoy. -enum ProtocolType { - // the default protocol. - Dubbo = 0; -} - -// Dubbo Serialization types supported by Envoy. -enum SerializationType { - // the default serialization protocol. - Hessian2 = 0; -} - -// [#next-free-field: 6] -message DubboProxy { - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Configure the protocol used. - ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; - - // Configure the serialization protocol used. - SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; - - // The route table for the connection manager is static and is specified in this property. - repeated RouteConfiguration route_config = 4; - - // A list of individual Dubbo filters that make up the filter chain for requests made to the - // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no dubbo_filters are specified, a default Dubbo router filter - // (`envoy.filters.dubbo.router`) is used. - repeated DubboFilter dubbo_filters = 5; -} - -// DubboFilter configures a Dubbo filter. -message DubboFilter { - // The name of the filter to instantiate. The name must match a supported - // filter. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any config = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto b/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto deleted file mode 100644 index 9af461e3577cb..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.dubbo_proxy.v2alpha1; - -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/matcher/string.proto"; -import "envoy/type/range.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.dubbo_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dubbo Proxy Route Configuration] -// Dubbo Proxy :ref:`configuration overview `. - -// [#next-free-field: 6] -message RouteConfiguration { - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The interface name of the service. - string interface = 2; - - // Which group does the interface belong to. - string group = 3; - - // The version number of the interface. - string version = 4; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 5; -} - -message Route { - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - // Method level routing matching. - MethodMatch method = 1; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated api.v2.route.HeaderMatcher headers = 2; -} - -message RouteAction { - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - // Currently ClusterWeight only supports the name and weight fields. - api.v2.route.WeightedCluster weighted_clusters = 2; - } -} - -message MethodMatch { - // The parameter matching type. - message ParameterMatchSpecifier { - oneof parameter_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 3; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting - // of an optional plus or minus sign followed by a sequence of digits. The rule will not match - // if the header value does not represent an integer. Match will fail for empty values, - // floating point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, - // "somestring", 10.9, "-1somestring" - type.Int64Range range_match = 4; - } - } - - // The name of the method. - type.matcher.StringMatcher name = 1; - - // Method parameter definition. - // The key is the parameter index, starting from 0. - // The value is the parameter matching type. - map params_match = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/network/echo/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/echo/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/echo/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto b/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto deleted file mode 100644 index 2b51ce4e18c36..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/echo/v2/echo.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.echo.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.echo.v2"; -option java_outer_classname = "EchoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.echo.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Echo] -// Echo :ref:`configuration overview `. -// [#extension: envoy.filters.network.echo] - -message Echo { -} diff --git a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto deleted file mode 100644 index 40cea7061868c..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/ext_authz/v2/ext_authz.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.ext_authz.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v2"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.ext_authz.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Network External Authorization ] -// The network layer external authorization service configuration -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.ext_authz] - -// External Authorization filter calls out to an external service over the -// gRPC Authorization API defined by -// :ref:`CheckRequest `. -// A failed check will cause this filter to close the TCP connection. -message ExtAuthz { - // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The external authorization gRPC service configuration. - // The default timeout is set to 200ms by this filter. - api.v2.core.GrpcService grpc_service = 2; - - // The filter's behaviour in case the external authorization service does - // not respond back. When it is set to true, Envoy will also allow traffic in case of - // communication failure between authorization service and the proxy. - // Defaults to false. - bool failure_mode_allow = 3; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/BUILD deleted file mode 100644 index b03bcd437c3df..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/config/trace/v2:pkg", - "//envoy/type:pkg", - "//envoy/type/tracing/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto deleted file mode 100644 index 3e7a4dc17769c..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ /dev/null @@ -1,679 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.http_connection_manager.v2; - -import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/core/protocol.proto"; -import "envoy/api/v2/route.proto"; -import "envoy/api/v2/scoped_route.proto"; -import "envoy/config/filter/accesslog/v2/accesslog.proto"; -import "envoy/config/trace/v2/http_tracer.proto"; -import "envoy/type/percent.proto"; -import "envoy/type/tracing/v2/custom_tag.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2"; -option java_outer_classname = "HttpConnectionManagerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.http_connection_manager.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP connection manager] -// HTTP connection manager :ref:`configuration overview `. -// [#extension: envoy.filters.network.http_connection_manager] - -// [#next-free-field: 37] -message HttpConnectionManager { - enum CodecType { - // For every new connection, the connection manager will determine which - // codec to use. This mode supports both ALPN for TLS listeners as well as - // protocol inference for plaintext listeners. If ALPN data is available, it - // is preferred, otherwise protocol inference is used. In almost all cases, - // this is the right option to choose for this setting. - AUTO = 0; - - // The connection manager will assume that the client is speaking HTTP/1.1. - HTTP1 = 1; - - // The connection manager will assume that the client is speaking HTTP/2 - // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. - // Prior knowledge is allowed). - HTTP2 = 2; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 3; - } - - enum ServerHeaderTransformation { - // Overwrite any Server header with the contents of server_name. - OVERWRITE = 0; - - // If no Server header is present, append Server server_name - // If a Server header is present, pass it through. - APPEND_IF_ABSENT = 1; - - // Pass through the value of the server header, and do not append a header - // if none is present. - PASS_THROUGH = 2; - } - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - enum ForwardClientCertDetails { - // Do not send the XFCC header to the next hop. This is the default value. - SANITIZE = 0; - - // When the client connection is mTLS (Mutual TLS), forward the XFCC header - // in the request. - FORWARD_ONLY = 1; - - // When the client connection is mTLS, append the client certificate - // information to the request’s XFCC header and forward it. - APPEND_FORWARD = 2; - - // When the client connection is mTLS, reset the XFCC header with the client - // certificate information and send it to the next hop. - SANITIZE_SET = 3; - - // Always forward the XFCC header in the request, regardless of whether the - // client connection is mTLS. - ALWAYS_FORWARD_ONLY = 4; - } - - // [#next-free-field: 10] - message Tracing { - enum OperationName { - // The HTTP listener is used for ingress/incoming requests. - INGRESS = 0; - - // The HTTP listener is used for egress/outgoing requests. - EGRESS = 1; - } - - // The span name will be derived from this field. If - // :ref:`traffic_direction ` is - // specified on the parent listener, then it is used instead of this field. - // - // .. attention:: - // This field has been deprecated in favor of `traffic_direction`. - OperationName operation_name = 1 [ - deprecated = true, - (validate.rules).enum = {defined_only: true}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // A list of header names used to create tags for the active span. The header name is used to - // populate the tag name, and the header value is used to populate the tag value. The tag is - // created if the specified header name is present in the request's headers. - // - // .. attention:: - // This field has been deprecated in favor of :ref:`custom_tags - // `. - repeated string request_headers_for_tags = 2 [deprecated = true]; - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.Percent client_sampling = 3; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.Percent random_sampling = 4; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.Percent overall_sampling = 5; - - // Whether to annotate spans with additional data. If true, spans will include logs for stream - // events. - bool verbose = 6; - - // Maximum length of the request path to extract and include in the HttpUrl tag. Used to - // truncate lengthy request paths to meet the needs of a tracing backend. - // Default: 256 - google.protobuf.UInt32Value max_path_tag_length = 7; - - // A list of custom tags with unique tag name to create tags for the active span. - repeated type.tracing.v2.CustomTag custom_tags = 8; - - // Configuration for an external tracing provider. - // If not specified, no tracing will be performed. - // - // .. attention:: - // Please be aware that *envoy.tracers.opencensus* provider can only be configured once - // in Envoy lifetime. - // Any attempts to reconfigure it or to use different configurations for different HCM filters - // will be rejected. - // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes - // on OpenCensus side. - trace.v2.Tracing.Http provider = 9; - } - - message InternalAddressConfig { - // Whether unix socket addresses should be considered internal. - bool unix_sockets = 1; - } - - // [#next-free-field: 7] - message SetCurrentClientCertDetails { - reserved 2; - - // Whether to forward the subject of the client cert. Defaults to false. - google.protobuf.BoolValue subject = 1; - - // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - // XFCC header comma separated from other values with the value Cert="PEM". - // Defaults to false. - bool cert = 3; - - // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - // format. This will appear in the XFCC header comma separated from other values with the value - // Chain="PEM". - // Defaults to false. - bool chain = 6; - - // Whether to forward the DNS type Subject Alternative Names of the client cert. - // Defaults to false. - bool dns = 4; - - // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - // false. - bool uri = 5; - } - - // The configuration for HTTP upgrades. - // For each upgrade type desired, an UpgradeConfig must be added. - // - // .. warning:: - // - // The current implementation of upgrade headers does not handle - // multi-valued upgrade headers. Support for multi-valued headers may be - // added in the future if needed. - // - // .. warning:: - // The current implementation of upgrade headers does not work with HTTP/2 - // upstreams. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] - // will be proxied upstream. - string upgrade_type = 1; - - // If present, this represents the filter chain which will be created for - // this type of upgrade. If no filters are present, the filter chain for - // HTTP connections will be used for this upgrade type. - repeated HttpFilter filters = 2; - - // Determines if upgrades are enabled or disabled by default. Defaults to true. - // This can be overridden on a per-route basis with :ref:`cluster - // ` as documented in the - // :ref:`upgrade documentation `. - google.protobuf.BoolValue enabled = 3; - } - - reserved 27; - - // Supplies the type of codec that the connection manager should use. - CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics for the - // connection manager. See the :ref:`statistics documentation ` for - // more information. - string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; - - // The route table for the connection manager is static and is specified in this property. - api.v2.RouteConfiguration route_config = 4; - - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } - - // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. :ref:`Order matters ` - // as the filters are processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; - - // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; - - // Presence of the object defines whether the connection manager - // emits :ref:`tracing ` data to the :ref:`configured tracing provider - // `. - Tracing tracing = 7; - - // Additional settings for HTTP requests handled by the connection manager. These will be - // applicable to both HTTP1 and HTTP2 requests. - api.v2.core.HttpProtocolOptions common_http_protocol_options = 35; - - // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - api.v2.core.Http1ProtocolOptions http_protocol_options = 8; - - // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - api.v2.core.Http2ProtocolOptions http2_protocol_options = 9; - - // An optional override that the connection manager will write to the server - // header in responses. If not set, the default is *envoy*. - string server_name = 10; - - // Defines the action to be applied to the Server header on the response path. - // By default, Envoy will overwrite the header with the value specified in - // server_name. - ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum = {defined_only: true}]; - - // The maximum request headers size for incoming connections. - // If unconfigured, the default max request headers allowed is 60 KiB. - // Requests that exceed this limit will receive a 431 response. - google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32 = {lte: 8192 gt: 0}]; - - // The idle timeout for connections managed by the connection manager. The - // idle timeout is defined as the period in which there are no active - // requests. If not set, there is no idle timeout. When the idle timeout is - // reached the connection will be closed. If the connection is an HTTP/2 - // connection a drain sequence will occur prior to closing the connection. - // This field is deprecated. Use :ref:`idle_timeout - // ` - // instead. - google.protobuf.Duration idle_timeout = 11 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // The stream idle timeout for connections managed by the connection manager. - // If not specified, this defaults to 5 minutes. The default value was selected - // so as not to interfere with any smaller configured timeouts that may have - // existed in configurations prior to the introduction of this feature, while - // introducing robustness to TCP connections that terminate without a FIN. - // - // This idle timeout applies to new streams and is overridable by the - // :ref:`route-level idle_timeout - // `. Even on a stream in - // which the override applies, prior to receipt of the initial request - // headers, the :ref:`stream_idle_timeout - // ` - // applies. Each time an encode/decode event for headers or data is processed - // for the stream, the timer will be reset. If the timeout fires, the stream - // is terminated with a 408 Request Timeout error code if no upstream response - // header has been received, otherwise a stream reset occurs. - // - // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough - // window to write any remaining stream data once the entirety of stream data (local end stream is - // true) has been buffered pending available window. In other words, this timeout defends against - // a peer that does not release enough window to completely write the stream, even though all - // data has been proxied within available flow control windows. If the timeout is hit in this - // case, the :ref:`tx_flush_timeout ` counter will be - // incremented. Note that :ref:`max_stream_duration - // ` does not apply to this corner - // case. - // - // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due - // to the granularity of events presented to the connection manager. For example, while receiving - // very large request headers, it may be the case that there is traffic regularly arriving on the - // wire while the connection manage is only able to observe the end-of-headers event, hence the - // stream may still idle timeout. - // - // A value of 0 will completely disable the connection manager stream idle - // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24; - - // The amount of time that Envoy will wait for the entire request to be received. - // The timer is activated when the request is initiated, and is disarmed when the last byte of the - // request is sent upstream (i.e. all decoding filters have processed the request), OR when the - // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28; - - // The time that Envoy will wait between sending an HTTP/2 “shutdown - // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - // This is used so that Envoy provides a grace period for new streams that - // race with the final GOAWAY frame. During this grace period, Envoy will - // continue to accept new streams. After the grace period, a final GOAWAY - // frame is sent and Envoy will start refusing new streams. Draining occurs - // both when a connection hits the idle timeout or during general server - // draining. The default grace period is 5000 milliseconds (5 seconds) if this - // option is not specified. - google.protobuf.Duration drain_timeout = 12; - - // The delayed close timeout is for downstream connections managed by the HTTP connection manager. - // It is defined as a grace period after connection close processing has been locally initiated - // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy - // from the downstream connection) prior to Envoy closing the socket associated with that - // connection. - // NOTE: This timeout is enforced even when the socket associated with the downstream connection - // is pending a flush of the write buffer. However, any progress made writing data to the socket - // will restart the timer associated with this timeout. This means that the total grace period for - // a socket in this state will be - // +. - // - // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close - // sequence mitigates a race condition that exists when downstream clients do not drain/process - // data in a connection's receive buffer after a remote close has been detected via a socket - // write(). This race leads to such clients failing to process the response code sent by Envoy, - // which could result in erroneous downstream processing. - // - // If the timeout triggers, Envoy will close the connection's socket. - // - // The default timeout is 1000 ms if this option is not specified. - // - // .. NOTE:: - // To be useful in avoiding the race condition described above, this timeout must be set - // to *at least* +<100ms to account for - // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. - // - // .. WARNING:: - // A value of 0 will completely disable delayed close processing. When disabled, the downstream - // connection's socket will be closed immediately after the write flush is completed or will - // never close if the write flush does not complete. - google.protobuf.Duration delayed_close_timeout = 26; - - // Configuration for :ref:`HTTP access logs ` - // emitted by the connection manager. - repeated accesslog.v2.AccessLog access_log = 13; - - // If set to true, the connection manager will use the real remote address - // of the client connection when determining internal versus external origin and manipulating - // various headers. If set to false or absent, the connection manager will use the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for`, - // :ref:`config_http_conn_man_headers_x-envoy-internal`, and - // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14; - - // The number of additional ingress proxy hops from the right side of the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when - // determining the origin client's IP address. The default is zero if this option - // is not specified. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - uint32 xff_num_trusted_hops = 19; - - // Configures what network addresses are considered internal for stats and header sanitation - // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. - // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information about internal/external addresses. - InternalAddressConfig internal_address_config = 25; - - // If set, Envoy will not append the remote address to the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in - // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager - // has mutated the request headers. While :ref:`use_remote_address - // ` - // will also suppress XFF addition, it has consequences for logging and other - // Envoy uses of the remote address, so *skip_xff_append* should be used - // when only an elision of XFF addition is intended. - bool skip_xff_append = 21; - - // Via header value to append to request and response headers. If this is - // empty, no via header will be appended. - string via = 22; - - // Whether the connection manager will generate the :ref:`x-request-id - // ` header if it does not exist. This defaults to - // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature - // is not desired it can be disabled. - google.protobuf.BoolValue generate_request_id = 15; - - // Whether the connection manager will keep the :ref:`x-request-id - // ` header if passed for a request that is edge - // (Edge request is the request from external clients to front Envoy) and not reset it, which - // is the current Envoy behaviour. This defaults to false. - bool preserve_external_request_id = 32; - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; - - // This field is valid only when :ref:`forward_client_cert_details - // ` - // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in - // the client certificate to be forwarded. Note that in the - // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and - // *By* is always set when the client certificate presents the URI type Subject Alternative Name - // value. - SetCurrentClientCertDetails set_current_client_cert_details = 17; - - // If proxy_100_continue is true, Envoy will proxy incoming "Expect: - // 100-continue" headers upstream, and forward "100 Continue" responses - // downstream. If this is false or not set, Envoy will instead strip the - // "Expect: 100-continue" header, and send a "100 Continue" response itself. - bool proxy_100_continue = 18; - - // If - // :ref:`use_remote_address - // ` - // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is - // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. - // This is useful for testing compatibility of upstream services that parse the header value. For - // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses - // `_ for details. This will also affect the - // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See - // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - // ` for runtime - // control. - // [#not-implemented-hide:] - bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - - repeated UpgradeConfig upgrade_configs = 23; - - // Should paths be normalized according to RFC 3986 before any processing of - // requests by HTTP filters or routing? This affects the upstream *:path* header - // as well. For paths that fail this check, Envoy will respond with 400 to - // paths that are malformed. This defaults to false currently but will default - // true in the future. When not specified, this value may be overridden by the - // runtime variable - // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison `_ - // for details of normalization. - // Note that Envoy does not perform - // `case normalization `_ - google.protobuf.BoolValue normalize_path = 30; - - // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec `_ and is provided for convenience. - bool merge_slashes = 33; - - // The configuration of the request ID extension. This includes operations such as - // generation, validation, and associated tracing operations. - // - // If not set, Envoy uses the default UUID-based behavior: - // - // 1. Request ID is propagated using *x-request-id* header. - // - // 2. Request ID is a universally unique identifier (UUID). - // - // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. - RequestIDExtension request_id_extension = 36; -} - -message Rds { - // Configuration source specifier for RDS. - api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; -} - -// This message is used to work around the limitations with 'oneof' and repeated fields. -message ScopedRouteConfigurationsList { - repeated api.v2.ScopedRouteConfiguration scoped_route_configurations = 1 - [(validate.rules).repeated = {min_items: 1}]; -} - -// [#next-free-field: 6] -message ScopedRoutes { - // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These - // keys are matched against a set of :ref:`Key` - // objects assembled from :ref:`ScopedRouteConfiguration` - // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via - // :ref:`scoped_route_configurations_list`. - // - // Upon receiving a request's headers, the Router will build a key using the algorithm specified - // by this message. This key will be used to look up the routing table (i.e., the - // :ref:`RouteConfiguration`) to use for the request. - message ScopeKeyBuilder { - // Specifies the mechanism for constructing key fragments which are composed into scope keys. - message FragmentBuilder { - // Specifies how the value of a header should be extracted. - // The following example maps the structure of a header to the fields in this message. - // - // .. code:: - // - // <0> <1> <-- index - // X-Header: a=b;c=d - // | || | - // | || \----> - // | || - // | |\----> - // | | - // | \----> - // | - // \----> - // - // Each 'a=b' key-value pair constitutes an 'element' of the header field. - message HeaderValueExtractor { - // Specifies a header field's key value pair to match on. - message KvElement { - // The separator between key and value (e.g., '=' separates 'k=v;...'). - // If an element is an empty string, the element is ignored. - // If an element contains no separator, the whole element is parsed as key and the - // fragment value is an empty string. - // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The key to match on. - string key = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // The name of the header field to extract the value from. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The element separator (e.g., ';' separates 'a;b;c;d'). - // Default: empty string. This causes the entirety of the header field to be extracted. - // If this field is set to an empty string and 'index' is used in the oneof below, 'index' - // must be set to 0. - string element_separator = 2; - - oneof extract_type { - // Specifies the zero based index of the element to extract. - // Note Envoy concatenates multiple values of the same header key into a comma separated - // string, the splitting always happens after the concatenation. - uint32 index = 3; - - // Specifies the key value pair to extract the value from. - KvElement element = 4; - } - } - - oneof type { - option (validate.required) = true; - - // Specifies how a header field's value should be extracted. - HeaderValueExtractor header_value_extractor = 1; - } - } - - // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the - // fragments of a :ref:`ScopedRouteConfiguration`. - // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. - repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The algorithm to use for constructing a scope key for each request. - ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; - - // Configuration source specifier for RDS. - // This config source is used to subscribe to RouteConfiguration resources specified in - // ScopedRouteConfiguration messages. - api.v2.core.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; - - oneof config_specifier { - option (validate.required) = true; - - // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by - // matching a key constructed from the request's attributes according to the algorithm specified - // by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRouteConfigurationsList scoped_route_configurations_list = 4; - - // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS - // API. A scope is assigned to a request by matching a key constructed from the request's - // attributes according to the algorithm specified by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRds scoped_rds = 5; - } -} - -message ScopedRds { - // Configuration source specifier for scoped RDS. - api.v2.core.ConfigSource scoped_rds_config_source = 1 - [(validate.rules).message = {required: true}]; -} - -message HttpFilter { - reserved 3; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 4; - } -} - -message RequestIDExtension { - // Request ID extension specific configuration. - google.protobuf.Any typed_config = 1; -} diff --git a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto b/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto deleted file mode 100644 index ea2f60e71eed3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.kafka_broker.v2alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.kafka_broker.v2alpha1"; -option java_outer_classname = "KafkaBrokerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.kafka_broker.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Kafka Broker] -// Kafka Broker :ref:`configuration overview `. -// [#extension: envoy.filters.network.kafka_broker] - -message KafkaBroker { - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD deleted file mode 100644 index 2ffbc958786b3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto b/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto deleted file mode 100644 index 791b767f3e6aa..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.local_rate_limit.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/token_bucket.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.local_rate_limit.v2alpha"; -option java_outer_classname = "LocalRateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.local_ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Local rate limit] -// Local rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.local_ratelimit] - -message LocalRateLimit { - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The token bucket configuration to use for rate limiting connections that are processed by the - // filter's filter chain. Each incoming connection processed by the filter consumes a single - // token. If the token is available, the connection will be allowed. If no tokens are available, - // the connection will be immediately closed. - // - // .. note:: - // In the current implementation each filter and filter chain has an independent rate limit. - // - // .. note:: - // In the current implementation the token bucket's :ref:`fill_interval - // ` must be >= 50ms to avoid too aggressive - // refills. - type.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}]; - - // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults - // to enabled. - api.v2.core.RuntimeFeatureFlag runtime_enabled = 3; -} diff --git a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/BUILD deleted file mode 100644 index b4f275ad5f870..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/filter/fault/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto deleted file mode 100644 index b261897858e21..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.mongo_proxy.v2; - -import "envoy/config/filter/fault/v2/fault.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2"; -option java_outer_classname = "MongoProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.mongo_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Mongo proxy] -// MongoDB :ref:`configuration overview `. -// [#extension: envoy.filters.network.mongo_proxy] - -message MongoProxy { - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The optional path to use for writing Mongo access logs. If not access log - // path is specified no access logs will be written. Note that access log is - // also gated :ref:`runtime `. - string access_log = 2; - - // Inject a fixed delay before proxying a Mongo operation. Delays are - // applied to the following MongoDB operations: Query, Insert, GetMore, - // and KillCursors. Once an active delay is in progress, all incoming - // data up until the timer event fires will be a part of the delay. - fault.v2.FaultDelay delay = 3; - - // Flag to specify whether :ref:`dynamic metadata - // ` should be emitted. Defaults to false. - bool emit_dynamic_metadata = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto b/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto deleted file mode 100644 index 78c6b7e971df4..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.mysql_proxy.v1alpha1; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1"; -option java_outer_classname = "MysqlProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.mysql_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: MySQL proxy] -// MySQL Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.mysql_proxy] - -message MySQLProxy { - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // [#not-implemented-hide:] The optional path to use for writing MySQL access logs. - // If the access log field is empty, access logs will not be written. - string access_log = 2; -} diff --git a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/BUILD deleted file mode 100644 index 6d29e84c421c9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/ratelimit:pkg", - "//envoy/config/ratelimit/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto deleted file mode 100644 index aed56c9af6292..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/rate_limit/v2/rate_limit.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.rate_limit.v2; - -import "envoy/api/v2/ratelimit/ratelimit.proto"; -import "envoy/config/ratelimit/v2/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v2"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.ratelimit] - -// [#next-free-field: 7] -message RateLimit { - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The rate limit descriptor list to use in the rate limit service request. - repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 3 - [(validate.rules).repeated = {min_items: 1}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 5; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/rbac/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/rbac/v2/BUILD deleted file mode 100644 index 90082d083a3f6..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/rbac/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto b/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto deleted file mode 100644 index ce86794c71cc3..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/rbac/v2/rbac.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.rbac.v2; - -import "envoy/config/rbac/v2/rbac.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v2"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.rbac.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.network.rbac] - -// RBAC network filter config. -// -// Header should not be used in rules/shadow_rules in RBAC network filter as -// this information is only available in :ref:`RBAC http filter `. -message RBAC { - enum EnforcementType { - // Apply RBAC policies when the first byte of data arrives on the connection. - ONE_TIME_ON_FIRST_BYTE = 0; - - // Continuously apply RBAC policies as data arrives. Use this mode when - // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, - // etc. when the protocol decoders emit dynamic metadata such as the - // resources being accessed and the operations on the resources. - CONTINUOUS = 1; - } - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - config.rbac.v2.RBAC rules = 1; - - // Shadow rules are not enforced by the filter but will emit stats and logs - // and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v2.RBAC shadow_rules = 2; - - // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; - - // RBAC enforcement strategy. By default RBAC will be enforced only once - // when the first byte of data arrives from the downstream. When used in - // conjunction with filters that emit dynamic metadata after decoding - // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to - // CONTINUOUS to enforce RBAC policies on every message boundary. - EnforcementType enforcement_type = 4; -} diff --git a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/BUILD deleted file mode 100644 index f91701518907a..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto deleted file mode 100644 index 948d7c349ff00..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto +++ /dev/null @@ -1,245 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.redis_proxy.v2; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v2"; -option java_outer_classname = "RedisProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.redis_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Redis Proxy] -// Redis Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.redis_proxy] - -// [#next-free-field: 7] -message RedisProxy { - // Redis connection pool settings. - // [#next-free-field: 9] - message ConnPoolSettings { - // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently - // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data - // because replication is asynchronous and requires some delay. You need to ensure that your - // application can tolerate stale data. - enum ReadPolicy { - // Default mode. Read from the current primary node. - MASTER = 0; - - // Read from the primary, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1; - - // Read from replica nodes. If multiple replica nodes are present within a shard, a random - // node is selected. Healthy nodes have precedent over unhealthy nodes. - REPLICA = 2; - - // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the primary. - PREFER_REPLICA = 3; - - // Read from any node of the cluster. A random node is selected among the primary and - // replicas, healthy nodes have precedent over unhealthy nodes. - ANY = 4; - } - - // Per-operation timeout in milliseconds. The timer starts when the first - // command of a pipeline is written to the backend connection. Each response received from Redis - // resets the timer since it signifies that the next command is being processed by the backend. - // The only exception to this behavior is when a connection to a backend is not yet established. - // In that case, the connect timeout on the cluster will govern the timeout until the connection - // is ready. - google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}]; - - // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be - // forwarded to the same upstream. The hash key used for determining the upstream in a - // consistent hash ring configuration will be computed from the hash tagged key instead of the - // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster - // implementation `_. - // - // Examples: - // - // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream - // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream - bool enable_hashtagging = 2; - - // Accept `moved and ask redirection - // `_ errors from upstream - // redis servers, and retry commands to the specified target server. The target server does not - // need to be known to the cluster manager. If the command cannot be redirected, then the - // original error is passed downstream unchanged. By default, this support is not enabled. - bool enable_redirection = 3; - - // Maximum size of encoded request buffer before flush is triggered and encoded requests - // are sent upstream. If this is unset, the buffer flushes whenever it receives data - // and performs no batching. - // This feature makes it possible for multiple clients to send requests to Envoy and have - // them batched- for example if one is running several worker processes, each with its own - // Redis connection. There is no benefit to using this with a single downstream process. - // Recommended size (if enabled) is 1024 bytes. - uint32 max_buffer_size_before_flush = 4; - - // The encoded request buffer is flushed N milliseconds after the first request has been - // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`. - // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise, - // the timer should be set according to the number of clients, overall request rate and - // desired maximum latency for a single command. For example, if there are many requests - // being batched together at a high rate, the buffer will likely be filled before the timer - // fires. Alternatively, if the request rate is lower the buffer will not be filled as often - // before the timer fires. - // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter - // defaults to 3ms. - google.protobuf.Duration buffer_flush_timeout = 5; - - // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts - // can be created at any given time by any given worker thread (see `enable_redirection` for - // more details). If the host is unknown and a connection cannot be created due to enforcing - // this limit, then redirection will fail and the original redirection error will be passed - // downstream unchanged. This limit defaults to 100. - google.protobuf.UInt32Value max_upstream_unknown_connections = 6; - - // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate - // count. - bool enable_command_stats = 8; - - // Read policy. The default is to read from the primary. - ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; - } - - message PrefixRoutes { - message Route { - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If not specified or the runtime key is not present, all requests to the target cluster - // will be mirrored. - // - // If specified, Envoy will lookup the runtime key to get the percentage of requests to the - // mirror. - api.v2.core.RuntimeFractionalPercent runtime_fraction = 2; - - // Set this to TRUE to only mirror write commands, this is effectively replicating the - // writes in a "fire and forget" manner. - bool exclude_read_commands = 3; - } - - // String prefix that must match the beginning of the keys. Envoy will always favor the - // longest match. - string prefix = 1; - - // Indicates if the prefix needs to be removed from the key when forwarded. - bool remove_prefix = 2; - - // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string = {min_bytes: 1}]; - - // Indicates that the route has a request mirroring policy. - repeated RequestMirrorPolicy request_mirror_policy = 4; - } - - // List of prefix routes. - repeated Route routes = 1; - - // Indicates that prefix matching should be case insensitive. - bool case_insensitive = 2; - - // Optional catch-all route to forward commands that doesn't match any of the routes. The - // catch-all route becomes required when no routes are specified. - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch_all - // route` - // instead. - string catch_all_cluster = 3 - [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Optional catch-all route to forward commands that doesn't match any of the routes. The - // catch-all route becomes required when no routes are specified. - Route catch_all_route = 4; - } - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Name of cluster from cluster manager. See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing cluster. - // - // .. attention:: - // - // This field is deprecated. Use a :ref:`catch_all - // route` - // instead. - string cluster = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; - - // Network settings for the connection pool to the upstream clusters. - ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; - - // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. - bool latency_in_micros = 4; - - // List of **unique** prefixes used to separate keys from different workloads to different - // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all - // cluster can be used to forward commands when there is no match. Time complexity of the - // lookups are in O(min(longest key prefix, key length)). - // - // Example: - // - // .. code-block:: yaml - // - // prefix_routes: - // routes: - // - prefix: "ab" - // cluster: "cluster_a" - // - prefix: "abc" - // cluster: "cluster_b" - // - // When using the above routes, the following prefixes would be sent to: - // - // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b. - // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a. - // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all - // route` - // would have retrieved the key from that cluster instead. - // - // See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing clusters. - PrefixRoutes prefix_routes = 5; - - // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis - // AUTH command `_ with this password before enabling any other - // command. If an AUTH command's password matches this password, an "OK" response will be returned - // to the client. If the AUTH command password does not match this password, then an "ERR invalid - // password" error will be returned. If any other command is received before AUTH when this - // password is set, then a "NOAUTH Authentication required." error response will be sent to the - // client. If an AUTH command is received when the password is not set, then an "ERR Client sent - // AUTH, but no password is set" error will be returned. - api.v2.core.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; -} - -// RedisProtocolOptions specifies Redis upstream protocol options. This object is used in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.redis_proxy`. -message RedisProtocolOptions { - // Upstream server password as defined by the `requirepass` directive - // `_ in the server's configuration file. - api.v2.core.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto b/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto deleted file mode 100644 index 71c161fc48f69..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.sni_cluster.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.sni_cluster.v2"; -option java_outer_classname = "SniClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.sni_cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: SNI Cluster Filter] -// Set the upstream cluster name from the SNI field in the TLS connection. -// [#extension: envoy.filters.network.sni_cluster] - -message SniCluster { -} diff --git a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/BUILD b/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/BUILD deleted file mode 100644 index c02167a174de9..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/config/filter/accesslog/v2:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto deleted file mode 100644 index 4ec68f320eed7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ /dev/null @@ -1,184 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.tcp_proxy.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/config/filter/accesslog/v2/accesslog.proto"; -import "envoy/type/hash_policy.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2"; -option java_outer_classname = "TcpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.tcp_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: TCP Proxy] -// TCP Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.tcp_proxy] - -// [#next-free-field: 13] -message TcpProxy { - // [#not-implemented-hide:] Deprecated. - // TCP Proxy filter configuration using V1 format. - message DeprecatedV1 { - option deprecated = true; - - // A TCP proxy route consists of a set of optional L4 criteria and the - // name of a cluster. If a downstream connection matches all the - // specified criteria, the cluster in the route is used for the - // corresponding upstream connection. Routes are tried in the order - // specified until a match is found. If no match is found, the connection - // is closed. A route with no criteria is valid and always produces a - // match. - // [#next-free-field: 6] - message TCPRoute { - // The cluster to connect to when a the downstream network connection - // matches the specified criteria. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // An optional list of IP address subnets in the form - // “ip_address/xx”. The criteria is satisfied if the destination IP - // address of the downstream connection is contained in at least one of - // the specified subnets. If the parameter is not specified or the list - // is empty, the destination IP address is ignored. The destination IP - // address of the downstream connection might be different from the - // addresses on which the proxy is listening if the connection has been - // redirected. - repeated api.v2.core.CidrRange destination_ip_list = 2; - - // An optional string containing a comma-separated list of port numbers - // or ranges. The criteria is satisfied if the destination port of the - // downstream connection is contained in at least one of the specified - // ranges. If the parameter is not specified, the destination port is - // ignored. The destination port address of the downstream connection - // might be different from the port on which the proxy is listening if - // the connection has been redirected. - string destination_ports = 3; - - // An optional list of IP address subnets in the form - // “ip_address/xx”. The criteria is satisfied if the source IP address - // of the downstream connection is contained in at least one of the - // specified subnets. If the parameter is not specified or the list is - // empty, the source IP address is ignored. - repeated api.v2.core.CidrRange source_ip_list = 4; - - // An optional string containing a comma-separated list of port numbers - // or ranges. The criteria is satisfied if the source port of the - // downstream connection is contained in at least one of the specified - // ranges. If the parameter is not specified, the source port is - // ignored. - string source_ports = 5; - } - - // The route table for the filter. All filter instances must have a route - // table, even if it is empty. - repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Allows for specification of multiple upstream clusters along with weights - // that indicate the percentage of traffic to be forwarded to each cluster. - // The router selects an upstream cluster based on these weights. - message WeightedCluster { - message ClusterWeight { - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // When a request matches the route, the choice of an upstream cluster is - // determined by its weight. The sum of weights across all entries in the - // clusters array determines the total weight. - uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what is set in this field will be considered - // for load balancing. Note that this will be merged with what's provided in - // :ref:`TcpProxy.metadata_match - // `, with values - // here taking precedence. The filter name should be specified as *envoy.lb*. - api.v2.core.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Configuration for tunneling TCP over other transports or application layers. - // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will - // remain the default. - message TunnelingConfig { - // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } - - // Optional endpoint metadata match criteria. Only endpoints in the upstream - // cluster with metadata matching that set in metadata_match will be - // considered. The filter name should be specified as *envoy.lb*. - api.v2.core.Metadata metadata_match = 9; - - // The idle timeout for connections managed by the TCP proxy filter. The idle timeout - // is defined as the period in which there are no bytes sent or received on either - // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set - // to 0s, the timeout will be disabled. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - google.protobuf.Duration idle_timeout = 8; - - // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy - // filter. The idle timeout is defined as the period in which there is no - // active traffic. If not set, there is no idle timeout. When the idle timeout - // is reached the connection will be closed. The distinction between - // downstream_idle_timeout/upstream_idle_timeout provides a means to set - // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; - - // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; - - // Configuration for :ref:`access logs ` - // emitted by the this tcp_proxy. - repeated accesslog.v2.AccessLog access_log = 5; - - // [#not-implemented-hide:] Deprecated. - DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; - - // The maximum number of unsuccessful connection attempts that will be made before - // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based - // load balancing algorithms will select a host randomly. Currently the number of hash policies is - // limited to 1. - repeated type.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; - - // [#not-implemented-hide:] feature in progress - // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP - // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload - // will be proxied upstream as per usual. - TunnelingConfig tunneling_config = 12; -} diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD deleted file mode 100644 index 1e485f4e158ab..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md deleted file mode 100644 index a7d95c0d47640..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md +++ /dev/null @@ -1 +0,0 @@ -Protocol buffer definitions for the Thrift proxy. diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto deleted file mode 100644 index 8230a52e341e7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto +++ /dev/null @@ -1,141 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.thrift_proxy.v2alpha1; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/route/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.thrift_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Thrift Proxy Route Configuration] -// Thrift Proxy :ref:`configuration overview `. - -message RouteConfiguration { - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - oneof match_specifier { - option (validate.required) = true; - - // If specified, the route must exactly match the request method name. As a special case, an - // empty string matches any request method name. - string method_name = 1; - - // If specified, the route must have the service name as the request method name prefix. As a - // special case, an empty string matches any service name. Only relevant when service - // multiplexing. - string service_name = 2; - } - - // Inverts whatever matching is done in the :ref:`method_name - // ` or - // :ref:`service_name - // ` fields. - // Cannot be combined with wildcard matching as that would result in routes never being matched. - // - // .. note:: - // - // This does not invert matching done as part of the :ref:`headers field - // ` field. To - // invert header matching, see :ref:`invert_match - // `. - bool invert = 3; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). Note that this only applies for Thrift transports and/or - // protocols that support headers. - repeated api.v2.route.HeaderMatcher headers = 4; -} - -// [#next-free-field: 7] -message RouteAction { - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates a single upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 2; - - // Envoy will determine the cluster to route to by reading the value of the - // Thrift header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist Envoy will - // respond with an unknown method exception or an internal error exception, - // respectively. - string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; - } - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered. - // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match - // `, - // with values there taking precedence. Keys and values should be provided under the "envoy.lb" - // metadata key. - api.v2.core.Metadata metadata_match = 3; - - // Specifies a set of rate limit configurations that could be applied to the route. - // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders - // action with the header name ":method-name". - repeated api.v2.route.RateLimit rate_limits = 4; - - // Strip the service prefix from the method name, if there's a prefix. For - // example, the method call Service:method would end up being just method. - bool strip_service_name = 5; -} - -// Allows for specification of multiple upstream clusters along with weights that indicate the -// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster -// based on these weights. -message WeightedCluster { - message ClusterWeight { - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // When a request matches the route, the choice of an upstream cluster is determined by its - // weight. The sum of weights across all entries in the clusters array determines the total - // weight. - google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field, combined with what's - // provided in :ref:`RouteAction's metadata_match - // `, - // will be considered. Values here will take precedence. Keys and values should be provided - // under the "envoy.lb" metadata key. - api.v2.core.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto deleted file mode 100644 index 96e750ef310d7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto +++ /dev/null @@ -1,121 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.thrift_proxy.v2alpha1; - -import "envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; -option java_outer_classname = "ThriftProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.thrift_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Thrift Proxy] -// Thrift Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.thrift_proxy] - -// Thrift transport types supported by Envoy. -enum TransportType { - // For downstream connections, the Thrift proxy will attempt to determine which transport to use. - // For upstream connections, the Thrift proxy will use same transport as the downstream - // connection. - AUTO_TRANSPORT = 0; - - // The Thrift proxy will use the Thrift framed transport. - FRAMED = 1; - - // The Thrift proxy will use the Thrift unframed transport. - UNFRAMED = 2; - - // The Thrift proxy will assume the client is using the Thrift header transport. - HEADER = 3; -} - -// Thrift Protocol types supported by Envoy. -enum ProtocolType { - // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. - // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol - // detection. For upstream connections, the Thrift proxy will use the same protocol as the - // downstream connection. - AUTO_PROTOCOL = 0; - - // The Thrift proxy will use the Thrift binary protocol. - BINARY = 1; - - // The Thrift proxy will use Thrift non-strict binary protocol. - LAX_BINARY = 2; - - // The Thrift proxy will use the Thrift compact protocol. - COMPACT = 3; - - // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. - TWITTER = 4; -} - -// [#next-free-field: 6] -message ThriftProxy { - // Supplies the type of transport that the Thrift proxy should use. Defaults to - // :ref:`AUTO_TRANSPORT`. - TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use. Defaults to - // :ref:`AUTO_PROTOCOL`. - ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The route table for the connection manager is static and is specified in this property. - RouteConfiguration route_config = 4; - - // A list of individual Thrift filters that make up the filter chain for requests made to the - // Thrift proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no thrift_filters are specified, a default Thrift router filter - // (`envoy.filters.thrift.router`) is used. - repeated ThriftFilter thrift_filters = 5; -} - -// ThriftFilter configures a Thrift filter. -message ThriftFilter { - // The name of the filter to instantiate. The name must match a supported - // filter. The built-in filters are: - // - // [#comment:TODO(zuercher): Auto generate the following list] - // * :ref:`envoy.filters.thrift.router ` - // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in -// in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.thrift_proxy`. -message ThriftProtocolOptions { - // Supplies the type of transport that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_TRANSPORT`, - // which is the default, causes the proxy to use the same transport as the downstream connection. - TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_PROTOCOL`, - // which is the default, causes the proxy to use the same protocol as the downstream connection. - ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD b/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto b/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto deleted file mode 100644 index cae622cecc34e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.network.zookeeper_proxy.v1alpha1; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1"; -option java_outer_classname = "ZookeeperProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.zookeeper_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: ZooKeeper proxy] -// ZooKeeper Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.zookeeper_proxy] - -message ZooKeeperProxy { - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. - // If the access log field is empty, access logs will not be written. - string access_log = 2; - - // Messages — requests, responses and events — that are bigger than this value will - // be ignored. If it is not set, the default value is 1Mb. - // - // The value here should match the jute.maxbuffer property in your cluster configuration: - // - // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options - // - // if that is set. If it isn't, ZooKeeper's default is also 1Mb. - google.protobuf.UInt32Value max_packet_bytes = 3; -} diff --git a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD deleted file mode 100644 index 5b66057a82cd7..0000000000000 --- a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto b/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto deleted file mode 100644 index 389ddf35990ed..0000000000000 --- a/generated_api_shadow/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.thrift.rate_limit.v2alpha1; - -import "envoy/config/ratelimit/v2/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.ratelimit] - -// [#next-free-field: 6] -message RateLimit { - // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Specifies the rate limit configuration stage. Each configured rate limit filter performs a - // rate limit check using descriptors configured in the - // :ref:`envoy_api_msg_config.filter.network.thrift_proxy.v2alpha1.RouteAction` for the request. - // Only those entries with a matching stage number are used for a given filter. If not set, the - // default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 3; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 4; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD b/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto b/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto deleted file mode 100644 index 5463ab6513bee..0000000000000 --- a/generated_api_shadow/envoy/config/filter/thrift/router/v2alpha1/router.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.thrift.router.v2alpha1; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Router] -// Thrift router :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.router] - -message Router { -} diff --git a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto b/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto deleted file mode 100644 index 06dc150d5c70b..0000000000000 --- a/generated_api_shadow/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.config.filter.udp.udp_proxy.v2alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha"; -option java_outer_classname = "UdpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.filters.udp.udp_proxy.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: UDP proxy] -// UDP proxy :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.udp_proxy] - -// Configuration for the UDP proxy filter. -message UdpProxyConfig { - // The stat prefix used when emitting UDP proxy filter stats. - string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by - // the session. The default if not specified is 1 minute. - google.protobuf.Duration idle_timeout = 3; -} diff --git a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/BUILD b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto deleted file mode 100644 index b63d35af4018b..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/aws_iam.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.config.grpc_credential.v2alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; -option java_outer_classname = "AwsIamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Grpc Credentials AWS IAM] -// Configuration for AWS IAM Grpc Credentials Plugin -// [#extension: envoy.grpc_credentials.aws_iam] - -message AwsIamConfig { - // The `service namespace - // `_ - // of the Grpc endpoint. - // - // Example: appmesh - string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The `region `_ hosting the Grpc - // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment - // variable. - // - // Example: us-west-2 - string region = 2; -} diff --git a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto b/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto deleted file mode 100644 index 41e67f0bf24b3..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.config.grpc_credential.v2alpha; - -import "envoy/api/v2/core/base.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; -option java_outer_classname = "FileBasedMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Grpc Credentials File Based Metadata] -// Configuration for File Based Metadata Grpc Credentials Plugin -// [#extension: envoy.grpc_credentials.file_based_metadata] - -message FileBasedMetadataConfig { - // Location or inline data of secret to use for authentication of the Google gRPC connection - // this secret will be attached to a header of the gRPC connection - api.v2.core.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true]; - - // Metadata header key to use for sending the secret data - // if no header key is set, "authorization" header will be used - string header_key = 2; - - // Prefix to prepend to the secret in the metadata header - // if no prefix is set, the default is to use no prefix - string header_prefix = 3; -} diff --git a/generated_api_shadow/envoy/config/grpc_credential/v3/BUILD b/generated_api_shadow/envoy/config/grpc_credential/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto b/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto deleted file mode 100644 index e2e9c7da48331..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v3/aws_iam.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.config.grpc_credential.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; -option java_outer_classname = "AwsIamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Grpc Credentials AWS IAM] -// Configuration for AWS IAM Grpc Credentials Plugin -// [#extension: envoy.grpc_credentials.aws_iam] - -message AwsIamConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.grpc_credential.v2alpha.AwsIamConfig"; - - // The `service namespace - // `_ - // of the Grpc endpoint. - // - // Example: appmesh - string service_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The `region `_ hosting the Grpc - // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment - // variable. - // - // Example: us-west-2 - string region = 2; -} diff --git a/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto b/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto deleted file mode 100644 index b364d2917099b..0000000000000 --- a/generated_api_shadow/envoy/config/grpc_credential/v3/file_based_metadata.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.config.grpc_credential.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; -option java_outer_classname = "FileBasedMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Grpc Credentials File Based Metadata] -// Configuration for File Based Metadata Grpc Credentials Plugin -// [#extension: envoy.grpc_credentials.file_based_metadata] - -message FileBasedMetadataConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig"; - - // Location or inline data of secret to use for authentication of the Google gRPC connection - // this secret will be attached to a header of the gRPC connection - core.v3.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true]; - - // Metadata header key to use for sending the secret data - // if no header key is set, "authorization" header will be used - string header_key = 2; - - // Prefix to prepend to the secret in the metadata header - // if no prefix is set, the default is to use no prefix - string header_prefix = 3; -} diff --git a/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD b/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/health_checker/redis/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto b/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto deleted file mode 100644 index 0c569f5c75e8c..0000000000000 --- a/generated_api_shadow/envoy/config/health_checker/redis/v2/redis.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.config.health_checker.redis.v2; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v2"; -option java_outer_classname = "RedisProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Redis] -// Redis health checker :ref:`configuration overview `. -// [#extension: envoy.health_checkers.redis] - -message Redis { - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; -} diff --git a/generated_api_shadow/envoy/config/listener/v2/BUILD b/generated_api_shadow/envoy/config/listener/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/listener/v2/api_listener.proto b/generated_api_shadow/envoy/config/listener/v2/api_listener.proto deleted file mode 100644 index 6709d5fe0b524..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v2/api_listener.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v2; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v2"; -option java_outer_classname = "ApiListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: API listener] - -// Describes a type of API listener, which is used in non-proxy clients. The type of API -// exposed to the non-proxy application depends on the type of API listener. -message ApiListener { - // The type in this field determines the type of API listener. At present, the following - // types are supported: - // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) - // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the - // specific config message for each type of API listener. We could not do this in v2 because - // it would have caused circular dependencies for go protos: lds.proto depends on this file, - // and http_connection_manager.proto depends on rds.proto, which is in the same directory as - // lds.proto, so lds.proto cannot depend on this file.] - google.protobuf.Any api_listener = 1; -} diff --git a/generated_api_shadow/envoy/config/listener/v3/BUILD b/generated_api_shadow/envoy/config/listener/v3/BUILD deleted file mode 100644 index 3367a7bd5c595..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/listener:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/listener/v2:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/core/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/listener/v3/api_listener.proto b/generated_api_shadow/envoy/config/listener/v3/api_listener.proto deleted file mode 100644 index 77db7caaff5c0..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/api_listener.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "ApiListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: API listener] - -// Describes a type of API listener, which is used in non-proxy clients. The type of API -// exposed to the non-proxy application depends on the type of API listener. -message ApiListener { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.listener.v2.ApiListener"; - - // The type in this field determines the type of API listener. At present, the following - // types are supported: - // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) - // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) - // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the - // specific config message for each type of API listener. We could not do this in v2 because - // it would have caused circular dependencies for go protos: lds.proto depends on this file, - // and http_connection_manager.proto depends on rds.proto, which is in the same directory as - // lds.proto, so lds.proto cannot depend on this file.] - google.protobuf.Any api_listener = 1; -} diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto deleted file mode 100644 index a5cd4bfe976f7..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ /dev/null @@ -1,318 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/socket_option.proto"; -import "envoy/config/listener/v3/api_listener.proto"; -import "envoy/config/listener/v3/listener_components.proto"; -import "envoy/config/listener/v3/udp_listener_config.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "xds/core/v3/collection_entry.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "ListenerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Listener configuration] -// Listener :ref:`configuration overview ` - -// Listener list collections. Entries are *Listener* resources or references. -// [#not-implemented-hide:] -message ListenerCollection { - repeated xds.core.v3.CollectionEntry entries = 1; -} - -// [#next-free-field: 30] -message Listener { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; - - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - - // [#not-implemented-hide:] - message DeprecatedV1 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Listener.DeprecatedV1"; - - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // This is deprecated. Use :ref:`Listener.bind_to_port - // ` - google.protobuf.BoolValue bind_to_port = 1; - } - - // Configuration for listener connection balancing. - message ConnectionBalanceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Listener.ConnectionBalanceConfig"; - - // A connection balancer implementation that does exact balancing. This means that a lock is - // held during balancing so that connection counts are nearly exactly balanced between worker - // threads. This is "nearly" exact in the sense that a connection might close in parallel thus - // making the counts incorrect, but this should be rectified on the next accept. This balancer - // sacrifices accept throughput for accuracy and should be used when there are a small number of - // connections that rarely cycle (e.g., service mesh gRPC egress). - message ExactBalance { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.Listener.ConnectionBalanceConfig.ExactBalance"; - } - - oneof balance_type { - option (validate.required) = true; - - // If specified, the listener will use the exact connection balancer. - ExactBalance exact_balance = 1; - } - } - - // Configuration for envoy internal listener. All the future internal listener features should be added here. - // [#not-implemented-hide:] - message InternalListenerConfig { - } - - reserved 14, 23; - - // The unique name by which this listener is known. If no name is provided, - // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - // updated or removed via :ref:`LDS ` a unique name must be provided. - string name = 1; - - // The address that the listener should listen on. In general, the address must be unique, though - // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on - // Linux as the actual port will be allocated by the OS. - core.v3.Address address = 2 [(validate.rules).message = {required: true}]; - - // Optional prefix to use on listener stats. If empty, the stats will be rooted at - // `listener.
.`. If non-empty, stats will be rooted at - // `listener..`. - string stat_prefix = 28; - - // A list of filter chains to consider for this listener. The - // :ref:`FilterChain ` with the most specific - // :ref:`FilterChainMatch ` criteria is used on a - // connection. - // - // Example using SNI for filter chain selection can be found in the - // :ref:`FAQ entry `. - repeated FilterChain filter_chains = 3; - - // If a connection is redirected using *iptables*, the port on which the proxy - // receives it might be different from the original destination address. When this flag is set to - // true, the listener hands off redirected connections to the listener associated with the - // original destination address. If there is no listener associated with the original destination - // address, the connection is handled by the listener that receives it. Defaults to false. - google.protobuf.BoolValue use_original_dst = 4; - - // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, - // the connection will be closed. The filter chain match is ignored in this field. - FilterChain default_filter_chain = 25; - - // Soft limit on size of the listener’s new connection read and write buffers. - // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Listener metadata. - core.v3.Metadata metadata = 6; - - // [#not-implemented-hide:] - DeprecatedV1 deprecated_v1 = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The type of draining to perform at a listener-wide level. - DrainType drain_type = 8; - - // Listener filters have the opportunity to manipulate and augment the connection metadata that - // is used in connection filter chain matching, for example. These filters are run before any in - // :ref:`filter_chains `. Order matters as the - // filters are processed sequentially right after a socket has been accepted by the listener, and - // before a connection is created. - // UDP Listener filters can be specified when the protocol in the listener socket address in - // :ref:`protocol ` is :ref:`UDP - // `. - // UDP listeners currently support a single filter. - repeated ListenerFilter listener_filters = 9; - - // The timeout to wait for all listener filters to complete operation. If the timeout is reached, - // the accepted socket is closed without a connection being created unless - // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the - // timeout. If not specified, a default timeout of 15s is used. - google.protobuf.Duration listener_filters_timeout = 15; - - // Whether a connection should be created when listener filters timeout. Default is false. - // - // .. attention:: - // - // Some listener filters, such as :ref:`Proxy Protocol filter - // `, should not be used with this option. It will cause - // unexpected behavior when a connection is created. - bool continue_on_listener_filters_timeout = 17; - - // Whether the listener should be set as a transparent socket. - // When this flag is set to true, connections can be redirected to the listener using an - // *iptables* *TPROXY* target, in which case the original source and destination addresses and - // ports are preserved on accepted connections. This flag should be used in combination with - // :ref:`an original_dst ` :ref:`listener filter - // ` to mark the connections' local addresses as - // "restored." This can be used to hand off each redirected connection to another listener - // associated with the connection's destination address. Direct connections to the socket without - // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are - // therefore treated as if they were redirected. - // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. - // When this flag is not set (default), the socket is not modified, i.e. the transparent option - // is neither set nor reset. - google.protobuf.BoolValue transparent = 10; - - // Whether the listener should set the *IP_FREEBIND* socket option. When this - // flag is set to true, listeners can be bound to an IP address that is not - // configured on the system running Envoy. When this flag is set to false, the - // option *IP_FREEBIND* is disabled on the socket. When this flag is not set - // (default), the socket is not modified, i.e. the option is neither enabled - // nor disabled. - google.protobuf.BoolValue freebind = 11; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.v3.SocketOption socket_options = 13; - - // Whether the listener should accept TCP Fast Open (TFO) connections. - // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on - // the socket, with a queue length of the specified size - // (see `details in RFC7413 `_). - // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. - // When this flag is not set (default), the socket is not modified, - // i.e. the option is neither enabled nor disabled. - // - // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - // TCP_FASTOPEN. - // See `ip-sysctl.txt `_. - // - // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. - // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - - // Specifies the intended direction of the traffic relative to the local Envoy. - // This property is required on Windows for listeners using the original destination filter, - // see :ref:`Original Destination `. - core.v3.TrafficDirection traffic_direction = 16; - - // If the protocol in the listener socket address in :ref:`protocol - // ` is :ref:`UDP - // `, this field specifies UDP - // listener specific configuration. - UdpListenerConfig udp_listener_config = 18; - - // Used to represent an API listener, which is used in non-proxy clients. The type of API - // exposed to the non-proxy application depends on the type of API listener. - // When this field is set, no other field except for :ref:`name` - // should be set. - // - // .. note:: - // - // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, - // not LDS. - // - // [#next-major-version: In the v3 API, instead of this messy approach where the socket - // listener fields are directly in the top-level Listener message and the API listener types - // are in the ApiListener message, the socket listener messages should be in their own message, - // and the top-level Listener should essentially be a oneof that selects between the - // socket listener and the various types of API listener. That way, a given Listener message - // can structurally only contain the fields of the relevant type.] - ApiListener api_listener = 19; - - // The listener's connection balancer configuration, currently only applicable to TCP listeners. - // If no configuration is specified, Envoy will not attempt to balance active connections between - // worker threads. - // - // In the scenario that the listener X redirects all the connections to the listeners Y1 and Y2 - // by setting :ref:`use_original_dst ` in X - // and :ref:`bind_to_port ` to false in Y1 and Y2, - // it is recommended to disable the balance config in listener X to avoid the cost of balancing, and - // enable the balance config in Y1 and Y2 to balance the connections among the workers. - ConnectionBalanceConfig connection_balance_config = 20; - - // Deprecated. Use `enable_reuse_port` instead. - bool reuse_port = 21 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and - // create one socket for each worker thread. This makes inbound connections - // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. This field - // defaults to true. - // - // .. attention:: - // - // Although this field defaults to true, it has different behavior on different platforms. See - // the following text for more information. - // - // * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly - // with hot restart. - // * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, - // the last socket wins and receives all connections/packets. For TCP, reuse_port is force - // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive - // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only - // a single worker will currently receive packets. - // * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user - // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. - google.protobuf.BoolValue enable_reuse_port = 29; - - // Configuration for :ref:`access logs ` - // emitted by this listener. - repeated accesslog.v3.AccessLog access_log = 22; - - // The maximum length a tcp listener's pending connections queue can grow to. If no value is - // provided net.core.somaxconn will be used on Linux and 128 otherwise. - google.protobuf.UInt32Value tcp_backlog_size = 24; - - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that set - // :ref:`use_original_dst ` - // to true. Default is true. - google.protobuf.BoolValue bind_to_port = 26; - - // The exclusive listener type and the corresponding config. - // TODO(lambdai): https://github.com/envoyproxy/envoy/issues/15372 - // Will create and add TcpListenerConfig. Will add UdpListenerConfig and ApiListener. - // [#not-implemented-hide:] - oneof listener_specifier { - // Used to represent an internal listener which does not listen on OSI L4 address but can be used by the - // :ref:`envoy cluster ` to create a user space connection to. - // The internal listener acts as a tcp listener. It supports listener filters and network filter chains. - // The internal listener require :ref:`address ` has - // field `envoy_internal_address`. - // - // There are some limitations are derived from the implementation. The known limitations include - // - // * :ref:`ConnectionBalanceConfig ` is not - // allowed because both cluster connection and listener connection must be owned by the same dispatcher. - // * :ref:`tcp_backlog_size ` - // * :ref:`freebind ` - // * :ref:`transparent ` - // [#not-implemented-hide:] - InternalListenerConfig internal_listener = 27; - } -} diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto deleted file mode 100644 index 1e7e205bfded9..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ /dev/null @@ -1,361 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "ListenerComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Listener components] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 6] -message Filter { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.Filter"; - - reserved 3; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - // [#extension-category: envoy.filters.network] - google.protobuf.Any typed_config = 4; - - // Configuration source specifier for an extension configuration discovery - // service. In case of a failure and without the default configuration, the - // listener closes the connections. - // [#not-implemented-hide:] - core.v3.ExtensionConfigSource config_discovery = 5; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// Specifies the match criteria for selecting a specific filter chain for a -// listener. -// -// In order for a filter chain to be selected, *ALL* of its criteria must be -// fulfilled by the incoming connection, properties of which are set by the -// networking stack and/or listener filters. -// -// The following order applies: -// -// 1. Destination port. -// 2. Destination IP address. -// 3. Server name (e.g. SNI for TLS protocol), -// 4. Transport protocol. -// 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Directly connected source IP address (this will only be different from the source IP address -// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol -// listener filter `). -// 7. Source type (e.g. any, local or external network). -// 8. Source IP address. -// 9. Source port. -// -// For criteria that allow ranges or wildcards, the most specific value in any -// of the configured filter chains that matches the incoming connection is going -// to be used (e.g. for SNI ``www.example.com`` the most specific match would be -// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter -// chain without ``server_names`` requirements). -// -// A different way to reason about the filter chain matches: -// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. -// In each step, filter chains which most specifically matches the attributes continue to the next step. -// The listener guarantees at most 1 filter chain is left after all of the steps. -// -// Example: -// -// For destination port, filter chains specifying the destination port of incoming traffic are the -// most specific match. If none of the filter chains specifies the exact destination port, the filter -// chains which do not specify ports are the most specific match. Filter chains specifying the -// wrong port can never be the most specific match. -// -// [#comment: Implemented rules are kept in the preference order, with deprecated fields -// listed at the end, because that's how we want to list them in the docs. -// -// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -// [#next-free-field: 14] -message FilterChainMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.FilterChainMatch"; - - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - - // Match a connection originating from the same host. - SAME_IP_OR_LOOPBACK = 1; - - // Match a connection originating from a different host. - EXTERNAL = 2; - } - - reserved 1; - - // Optional destination port to consider when use_original_dst is set on the - // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; - - // If non-empty, an IP address and prefix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - repeated core.v3.CidrRange prefix_ranges = 3; - - // If non-empty, an IP address and suffix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - // [#not-implemented-hide:] - string address_suffix = 4; - - // [#not-implemented-hide:] - google.protobuf.UInt32Value suffix_len = 5; - - // The criteria is satisfied if the directly connected source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the parameter is not - // specified or the list is empty, the directly connected source IP address is ignored. - repeated core.v3.CidrRange direct_source_prefix_ranges = 13; - - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; - - // The criteria is satisfied if the source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the - // parameter is not specified or the list is empty, the source IP address is - // ignored. - repeated core.v3.CidrRange source_prefix_ranges = 6; - - // The criteria is satisfied if the source port of the downstream connection - // is contained in at least one of the specified ports. If the parameter is - // not specified, the source port is ignored. - repeated uint32 source_ports = 7 - [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; - - // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining - // a filter chain match. Those values will be compared against the server names of a new - // connection, when detected by one of the listener filters. - // - // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` - // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. - // - // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. - // - // .. attention:: - // - // See the :ref:`FAQ entry ` on how to configure SNI for more - // information. - repeated string server_names = 11; - - // If non-empty, a transport protocol to consider when determining a filter chain match. - // This value will be compared against the transport protocol of a new connection, when - // it's detected by one of the listener filters. - // - // Suggested values include: - // - // * ``raw_buffer`` - default, used when no transport protocol is detected, - // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // when TLS protocol is detected. - string transport_protocol = 9; - - // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when - // determining a filter chain match. Those values will be compared against the application - // protocols of a new connection, when detected by one of the listener filters. - // - // Suggested values include: - // - // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector - // `, - // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` - // - // .. attention:: - // - // Currently, only :ref:`TLS Inspector ` provides - // application protocol detection based on the requested - // `ALPN `_ values. - // - // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, - // and matching on values other than ``h2`` is going to lead to a lot of false negatives, - // unless all connecting clients are known to use ALPN. - repeated string application_protocols = 10; -} - -// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and -// various other parameters. -// [#next-free-field: 10] -message FilterChain { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.FilterChain"; - - // The configuration for on-demand filter chain. If this field is not empty in FilterChain message, - // a filter chain will be built on-demand. - // On-demand filter chains help speedup the warming up of listeners since the building and initialization of - // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. - // Filter chains that are not often used can be set as on-demand. - message OnDemandConfiguration { - // The timeout to wait for filter chain placeholders to complete rebuilding. - // 1. If this field is set to 0, timeout is disabled. - // 2. If not specified, a default timeout of 15s is used. - // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. - // Upon failure or timeout, all connections related to this filter chain will be closed. - // Rebuilding will start again on the next new connection. - google.protobuf.Duration rebuild_timeout = 1; - } - - // The criteria to use when matching a connection to this filter chain. - FilterChainMatch filter_chain_match = 1; - - // A list of individual network filters that make up the filter chain for - // connections established with the listener. Order matters as the filters are - // processed sequentially as connection events happen. Note: If the filter - // list is empty, the connection will close by default. - repeated Filter filters = 3; - - // Whether the listener should expect a PROXY protocol V1 header on new - // connections. If this option is enabled, the listener will assume that that - // remote address of the connection is the one specified in the header. Some - // load balancers including the AWS ELB support this option. If the option is - // absent or set to false, Envoy will use the physical peer address of the - // connection as the remote address. - // - // This field is deprecated. Add a - // :ref:`PROXY protocol listener filter ` - // explicitly instead. - google.protobuf.BoolValue use_proxy_proto = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // [#not-implemented-hide:] filter chain metadata. - core.v3.Metadata metadata = 5; - - // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`DownstreamTlsContext ` in the `typed_config`. - // If no transport socket configuration is specified, new connections - // will be set up with plaintext. - // [#extension-category: envoy.transport_sockets.downstream] - core.v3.TransportSocket transport_socket = 6; - - // If present and nonzero, the amount of time to allow incoming connections to complete any - // transport socket negotiations. If this expires before the transport reports connection - // establishment, the connection is summarily closed. - google.protobuf.Duration transport_socket_connect_timeout = 9; - - // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no - // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter - // chain is to be dynamically updated or removed via FCDS a unique name must be provided. - string name = 7; - - // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. - // If this field is not empty, the filter chain will be built on-demand. - // Otherwise, the filter chain will be built normally and block listener warming. - OnDemandConfiguration on_demand_configuration = 8; - - envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext - hidden_envoy_deprecated_tls_context = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// Listener filter chain match configuration. This is a recursive structure which allows complex -// nested match configurations to be built using various logical operators. -// -// Examples: -// -// * Matches if the destination port is 3306. -// -// .. code-block:: yaml -// -// destination_port_range: -// start: 3306 -// end: 3307 -// -// * Matches if the destination port is 3306 or 15000. -// -// .. code-block:: yaml -// -// or_match: -// rules: -// - destination_port_range: -// start: 3306 -// end: 3307 -// - destination_port_range: -// start: 15000 -// end: 15001 -// -// [#next-free-field: 6] -message ListenerFilterChainMatchPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.ListenerFilterChainMatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.ListenerFilterChainMatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated ListenerFilterChainMatchPredicate rules = 1 - [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - ListenerFilterChainMatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // Match destination port. Particularly, the match evaluation must use the recovered local port if - // the owning listener filter is after :ref:`an original_dst listener filter `. - type.v3.Int32Range destination_port_range = 5; - } -} - -message ListenerFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.ListenerFilter"; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - // [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - // See :ref:`ListenerFilterChainMatchPredicate ` - // for further examples. - ListenerFilterChainMatchPredicate filter_disabled = 4; -} diff --git a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto b/generated_api_shadow/envoy/config/listener/v3/quic_config.proto deleted file mode 100644 index 1432e1911b5d0..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/quic_config.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/protocol.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "QuicConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: QUIC listener config] - -// Configuration specific to the UDP QUIC listener. -// [#next-free-field: 8] -message QuicProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.QuicProtocolOptions"; - - core.v3.QuicProtocolOptions quic_protocol_options = 1; - - // Maximum number of milliseconds that connection will be alive when there is - // no network activity. 300000ms if not specified. - google.protobuf.Duration idle_timeout = 2; - - // Connection timeout in milliseconds before the crypto handshake is finished. - // 20000ms if not specified. - google.protobuf.Duration crypto_handshake_timeout = 3; - - // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults - // to enabled. - core.v3.RuntimeFeatureFlag enabled = 4; - - // A multiplier to number of connections which is used to determine how many packets to read per - // event loop. A reasonable number should allow the listener to process enough payload but not - // starve TCP and other UDP sockets and also prevent long event loop duration. - // The default value is 32. This means if there are N QUIC connections, the total number of - // packets to read in each read event will be 32 * N. - // The actual number of packets to read in total by the UDP listener is also - // bound by 6000, regardless of this field or how many connections there are. - google.protobuf.UInt32Value packets_to_read_to_connection_count_ratio = 5 - [(validate.rules).uint32 = {gte: 1}]; - - // Configure which implementation of `quic::QuicCryptoClientStreamBase` to be used for this listener. - // If not specified the :ref:`QUICHE default one configured by ` will be used. - // [#extension-category: envoy.quic.server.crypto_stream] - core.v3.TypedExtensionConfig crypto_stream_config = 6; - - // Configure which implementation of `quic::ProofSource` to be used for this listener. - // If not specified the :ref:`default one configured by ` will be used. - // [#extension-category: envoy.quic.proof_source] - core.v3.TypedExtensionConfig proof_source_config = 7; -} diff --git a/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto b/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto deleted file mode 100644 index 276e98153aeb5..0000000000000 --- a/generated_api_shadow/envoy/config/listener/v3/udp_listener_config.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.config.listener.v3; - -import "envoy/config/core/v3/udp_socket_config.proto"; -import "envoy/config/listener/v3/quic_config.proto"; - -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.listener.v3"; -option java_outer_classname = "UdpListenerConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: UDP listener config] -// Listener :ref:`configuration overview ` - -// [#next-free-field: 8] -message UdpListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.UdpListenerConfig"; - - reserved 1, 3, 4, 6; - - // UDP socket configuration for the listener. The default for - // :ref:`prefer_gro ` is false for - // listener sockets. If receiving a large amount of datagrams from a small number of sources, it - // may be worthwhile to enable this option after performance testing. - core.v3.UdpSocketConfig downstream_socket_config = 5; - - // Configuration for QUIC protocol. If empty, QUIC will not be enabled on this listener. Set - // to the default object to enable QUIC without modifying any additional options. - // - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - QuicProtocolOptions quic_options = 7; - - oneof config_type { - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -message ActiveRawUdpListenerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.listener.ActiveRawUdpListenerConfig"; -} diff --git a/generated_api_shadow/envoy/config/metrics/v2/BUILD b/generated_api_shadow/envoy/config/metrics/v2/BUILD deleted file mode 100644 index aaab1df155473..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto deleted file mode 100644 index f1f8662f0750d..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v2/metrics_service.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v2"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metrics service] - -// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink -// `. This opaque configuration will be used to create -// Metrics Service. -// [#extension: envoy.stat_sinks.metrics_service] -message MetricsServiceConfig { - // The upstream gRPC cluster that hosts the metrics service. - api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/metrics/v2/stats.proto b/generated_api_shadow/envoy/config/metrics/v2/stats.proto deleted file mode 100644 index 62afcf56e4e71..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v2/stats.proto +++ /dev/null @@ -1,339 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v2"; -option java_outer_classname = "StatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Stats] -// Statistics :ref:`architecture overview `. - -// Configuration for pluggable stats sinks. -message StatsSink { - // The name of the stats sink to instantiate. The name must match a supported - // stats sink. The built-in stats sinks are: - // - // * :ref:`envoy.stat_sinks.statsd ` - // * :ref:`envoy.stat_sinks.dog_statsd ` - // * :ref:`envoy.stat_sinks.metrics_service ` - // * :ref:`envoy.stat_sinks.hystrix ` - // - // Sinks optionally support tagged/multiple dimensional metrics. - string name = 1; - - // Stats sink specific configuration which depends on the sink being instantiated. See - // :ref:`StatsdSink ` for an example. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -// Statistics configuration such as tagging. -message StatsConfig { - // Each stat name is iteratively processed through these tag specifiers. - // When a tag is matched, the first capture group is removed from the name so - // later :ref:`TagSpecifiers ` cannot match that - // same portion of the match. - repeated TagSpecifier stats_tags = 1; - - // Use all default tag regexes specified in Envoy. These can be combined with - // custom tags specified in :ref:`stats_tags - // `. They will be processed before - // the custom tags. - // - // .. note:: - // - // If any default tags are specified twice, the config will be considered - // invalid. - // - // See :repo:`well_known_names.h ` for a list of the - // default tags in Envoy. - // - // If not provided, the value is assumed to be true. - google.protobuf.BoolValue use_all_default_tags = 2; - - // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated - // as normal. Preventing the instantiation of certain families of stats can improve memory - // performance for Envoys running especially large configs. - // - // .. warning:: - // Excluding stats may affect Envoy's behavior in undocumented ways. See - // `issue #8771 `_ for more information. - // If any unexpected behavior changes are observed, please open a new issue immediately. - StatsMatcher stats_matcher = 3; -} - -// Configuration for disabling stat instantiation. -message StatsMatcher { - // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to - // instantiate all stats, there is no need to construct a StatsMatcher. - // - // However, StatsMatcher can be used to limit the creation of families of stats in order to - // conserve memory. Stats can either be disabled entirely, or they can be - // limited by either an exclusion or an inclusion list of :ref:`StringMatcher - // ` protos: - // - // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to - // `false`, all stats will be instantiated. - // - // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the - // list will not instantiate. - // - // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of - // the StringMatchers in the list. - // - // - // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. - // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based - // matcher rather than a regex-based matcher. - // - // Example 1. Excluding all stats. - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "rejectAll": "true" - // } - // } - // - // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "exclusionList": { - // "patterns": [ - // { - // "prefix": "cluster." - // } - // ] - // } - // } - // } - // - // Example 3. Including only manager-related stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "inclusionList": { - // "patterns": [ - // { - // "prefix": "cluster_manager." - // }, - // { - // "prefix": "listener_manager." - // } - // ] - // } - // } - // } - // - - oneof stats_matcher { - option (validate.required) = true; - - // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all - // stats are enabled. - bool reject_all = 1; - - // Exclusive match. All stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.ListStringMatcher exclusion_list = 2; - - // Inclusive match. No stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.ListStringMatcher inclusion_list = 3; - } -} - -// Designates a tag name and value pair. The value may be either a fixed value -// or a regex providing the value via capture groups. The specified tag will be -// unconditionally set if a fixed value, otherwise it will only be set if one -// or more capture groups in the regex match. -message TagSpecifier { - // Attaches an identifier to the tag values to identify the tag being in the - // sink. Envoy has a set of default names and regexes to extract dynamic - // portions of existing stats, which can be found in :repo:`well_known_names.h - // ` in the Envoy repository. If a :ref:`tag_name - // ` is provided in the config and - // neither :ref:`regex ` or - // :ref:`fixed_value ` were specified, - // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. - // - // .. note:: - // - // It is invalid to specify the same tag name twice in a config. - string tag_name = 1; - - oneof tag_value { - // Designates a tag to strip from the tag extracted name and provide as a named - // tag value for all statistics. This will only occur if any part of the name - // matches the regex provided with one or more capture groups. - // - // The first capture group identifies the portion of the name to remove. The - // second capture group (which will normally be nested inside the first) will - // designate the value of the tag for the statistic. If no second capture - // group is provided, the first will also be used to set the value of the tag. - // All other capture groups will be ignored. - // - // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and - // one tag specifier: - // - // .. code-block:: json - // - // { - // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\\.((.+?)\\.)" - // } - // - // Note that the regex will remove ``foo_cluster.`` making the tag extracted - // name ``cluster.upstream_rq_timeout`` and the tag value for - // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no - // ``.`` character because of the second capture group). - // - // Example 2. a stat name - // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two - // tag specifiers: - // - // .. code-block:: json - // - // [ - // { - // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" - // }, - // { - // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\\.((.*?)\\.)" - // } - // ] - // - // The two regexes of the specifiers will be processed in the definition order. - // - // The first regex will remove ``ios.``, leaving the tag extracted name - // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag - // ``envoy.http_user_agent`` will be added with tag value ``ios``. - // - // The second regex will remove ``connection_manager_1.`` from the tag - // extracted name produced by the first regex - // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving - // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag - // ``envoy.http_conn_manager_prefix`` will be added with the tag value - // ``connection_manager_1``. - string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; - - // Specifies a fixed tag value for the ``tag_name``. - string fixed_value = 3; - } -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support -// tagged metrics. -// [#extension: envoy.stat_sinks.statsd] -message StatsdSink { - oneof statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running `statsd `_ - // compliant listener. If specified, statistics will be flushed to this - // address. - api.v2.core.Address address = 1; - - // The name of a cluster that is running a TCP `statsd - // `_ compliant listener. If specified, - // Envoy will connect to this cluster to flush statistics. - string tcp_cluster_name = 2; - } - - // Optional custom prefix for StatsdSink. If - // specified, this will override the default prefix. - // For example: - // - // .. code-block:: json - // - // { - // "prefix" : "envoy-prod" - // } - // - // will change emitted stats to - // - // .. code-block:: cpp - // - // envoy-prod.test_counter:1|c - // envoy-prod.test_timer:5|ms - // - // Note that the default prefix, "envoy", will be used if a prefix is not - // specified. - // - // Stats with default prefix: - // - // .. code-block:: cpp - // - // envoy.test_counter:1|c - // envoy.test_timer:5|ms - string prefix = 3; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. -// The sink emits stats with `DogStatsD `_ -// compatible tags. Tags are configurable via :ref:`StatsConfig -// `. -// [#extension: envoy.stat_sinks.dog_statsd] -message DogStatsdSink { - reserved 2; - - oneof dog_statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running DogStatsD compliant listener. If specified, - // statistics will be flushed to this address. - api.v2.core.Address address = 1; - } - - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. - string prefix = 3; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. -// The sink emits stats in `text/event-stream -// `_ -// formatted stream for use by `Hystrix dashboard -// `_. -// -// Note that only a single HystrixSink should be configured. -// -// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. -// [#extension: envoy.stat_sinks.hystrix] -message HystrixSink { - // The number of buckets the rolling statistical window is divided into. - // - // Each time the sink is flushed, all relevant Envoy statistics are sampled and - // added to the rolling window (removing the oldest samples in the window - // in the process). The sink then outputs the aggregate statistics across the - // current rolling window to the event stream(s). - // - // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets - // - // More detailed explanation can be found in `Hystrix wiki - // `_. - int64 num_buckets = 1; -} diff --git a/generated_api_shadow/envoy/config/metrics/v3/BUILD b/generated_api_shadow/envoy/config/metrics/v3/BUILD deleted file mode 100644 index 8e9c73c09e118..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/metrics/v2:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto deleted file mode 100644 index df3c71e6a6308..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v3/metrics_service.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v3; - -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v3"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metrics service] - -// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink -// `. This opaque configuration will be used to create -// Metrics Service. -// -// Example: -// -// .. code-block:: yaml -// -// stats_sinks: -// - name: envoy.stat_sinks.metrics_service -// typed_config: -// "@type": type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig -// transport_api_version: V3 -// -// [#extension: envoy.stat_sinks.metrics_service] -message MetricsServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.MetricsServiceConfig"; - - // The upstream gRPC cluster that hosts the metrics service. - core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; - - // API version for metric service transport protocol. This describes the metric service gRPC - // endpoint and version of messages used on the wire. - core.v3.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; - - // If true, counters are reported as the delta between flushing intervals. Otherwise, the current - // counter value is reported. Defaults to false. - // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the - // sink will take updates from the :ref:`MetricsResponse `. - google.protobuf.BoolValue report_counters_as_deltas = 2; - - // If true, metrics will have their tags emitted as labels on the metrics objects sent to the MetricsService, - // and the tag extracted name will be used instead of the full name, which may contain values used by the tag - // extractor or additional tags added during stats creation. - bool emit_tags_as_labels = 4; -} diff --git a/generated_api_shadow/envoy/config/metrics/v3/stats.proto b/generated_api_shadow/envoy/config/metrics/v3/stats.proto deleted file mode 100644 index 1b5e833e2bede..0000000000000 --- a/generated_api_shadow/envoy/config/metrics/v3/stats.proto +++ /dev/null @@ -1,409 +0,0 @@ -syntax = "proto3"; - -package envoy.config.metrics.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.metrics.v3"; -option java_outer_classname = "StatsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Stats] -// Statistics :ref:`architecture overview `. - -// Configuration for pluggable stats sinks. -message StatsSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsSink"; - - // The name of the stats sink to instantiate. The name must match a supported - // stats sink. - // See the :ref:`extensions listed in typed_config below ` for the default list of available stats sink. - // Sinks optionally support tagged/multiple dimensional metrics. - string name = 1; - - // Stats sink specific configuration which depends on the sink being instantiated. See - // :ref:`StatsdSink ` for an example. - // [#extension-category: envoy.stats_sinks] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// Statistics configuration such as tagging. -message StatsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.StatsConfig"; - - // Each stat name is iteratively processed through these tag specifiers. - // When a tag is matched, the first capture group is removed from the name so - // later :ref:`TagSpecifiers ` cannot match that - // same portion of the match. - repeated TagSpecifier stats_tags = 1; - - // Use all default tag regexes specified in Envoy. These can be combined with - // custom tags specified in :ref:`stats_tags - // `. They will be processed before - // the custom tags. - // - // .. note:: - // - // If any default tags are specified twice, the config will be considered - // invalid. - // - // See :repo:`well_known_names.h ` for a list of the - // default tags in Envoy. - // - // If not provided, the value is assumed to be true. - google.protobuf.BoolValue use_all_default_tags = 2; - - // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated - // as normal. Preventing the instantiation of certain families of stats can improve memory - // performance for Envoys running especially large configs. - // - // .. warning:: - // Excluding stats may affect Envoy's behavior in undocumented ways. See - // `issue #8771 `_ for more information. - // If any unexpected behavior changes are observed, please open a new issue immediately. - StatsMatcher stats_matcher = 3; - - // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first - // match is applied. If no match is found (or if no rules are set), the following default buckets - // are used: - // - // .. code-block:: json - // - // [ - // 0.5, - // 1, - // 5, - // 10, - // 25, - // 50, - // 100, - // 250, - // 500, - // 1000, - // 2500, - // 5000, - // 10000, - // 30000, - // 60000, - // 300000, - // 600000, - // 1800000, - // 3600000 - // ] - repeated HistogramBucketSettings histogram_bucket_settings = 4; -} - -// Configuration for disabling stat instantiation. -message StatsMatcher { - // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to - // instantiate all stats, there is no need to construct a StatsMatcher. - // - // However, StatsMatcher can be used to limit the creation of families of stats in order to - // conserve memory. Stats can either be disabled entirely, or they can be - // limited by either an exclusion or an inclusion list of :ref:`StringMatcher - // ` protos: - // - // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to - // `false`, all stats will be instantiated. - // - // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the - // list will not instantiate. - // - // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of - // the StringMatchers in the list. - // - // - // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. - // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based - // matcher rather than a regex-based matcher. - // - // Example 1. Excluding all stats. - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "rejectAll": "true" - // } - // } - // - // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "exclusionList": { - // "patterns": [ - // { - // "prefix": "cluster." - // } - // ] - // } - // } - // } - // - // Example 3. Including only manager-related stats: - // - // .. code-block:: json - // - // { - // "statsMatcher": { - // "inclusionList": { - // "patterns": [ - // { - // "prefix": "cluster_manager." - // }, - // { - // "prefix": "listener_manager." - // } - // ] - // } - // } - // } - // - - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.StatsMatcher"; - - oneof stats_matcher { - option (validate.required) = true; - - // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all - // stats are enabled. - bool reject_all = 1; - - // Exclusive match. All stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.v3.ListStringMatcher exclusion_list = 2; - - // Inclusive match. No stats are enabled except for those matching one of the supplied - // StringMatcher protos. - type.matcher.v3.ListStringMatcher inclusion_list = 3; - } -} - -// Designates a tag name and value pair. The value may be either a fixed value -// or a regex providing the value via capture groups. The specified tag will be -// unconditionally set if a fixed value, otherwise it will only be set if one -// or more capture groups in the regex match. -message TagSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.TagSpecifier"; - - // Attaches an identifier to the tag values to identify the tag being in the - // sink. Envoy has a set of default names and regexes to extract dynamic - // portions of existing stats, which can be found in :repo:`well_known_names.h - // ` in the Envoy repository. If a :ref:`tag_name - // ` is provided in the config and - // neither :ref:`regex ` or - // :ref:`fixed_value ` were specified, - // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. - // - // .. note:: - // - // It is invalid to specify the same tag name twice in a config. - string tag_name = 1; - - oneof tag_value { - // Designates a tag to strip from the tag extracted name and provide as a named - // tag value for all statistics. This will only occur if any part of the name - // matches the regex provided with one or more capture groups. - // - // The first capture group identifies the portion of the name to remove. The - // second capture group (which will normally be nested inside the first) will - // designate the value of the tag for the statistic. If no second capture - // group is provided, the first will also be used to set the value of the tag. - // All other capture groups will be ignored. - // - // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and - // one tag specifier: - // - // .. code-block:: json - // - // { - // "tag_name": "envoy.cluster_name", - // "regex": "^cluster\\.((.+?)\\.)" - // } - // - // Note that the regex will remove ``foo_cluster.`` making the tag extracted - // name ``cluster.upstream_rq_timeout`` and the tag value for - // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no - // ``.`` character because of the second capture group). - // - // Example 2. a stat name - // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two - // tag specifiers: - // - // .. code-block:: json - // - // [ - // { - // "tag_name": "envoy.http_user_agent", - // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" - // }, - // { - // "tag_name": "envoy.http_conn_manager_prefix", - // "regex": "^http\\.((.*?)\\.)" - // } - // ] - // - // The two regexes of the specifiers will be processed in the definition order. - // - // The first regex will remove ``ios.``, leaving the tag extracted name - // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag - // ``envoy.http_user_agent`` will be added with tag value ``ios``. - // - // The second regex will remove ``connection_manager_1.`` from the tag - // extracted name produced by the first regex - // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving - // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag - // ``envoy.http_conn_manager_prefix`` will be added with the tag value - // ``connection_manager_1``. - string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; - - // Specifies a fixed tag value for the ``tag_name``. - string fixed_value = 3; - } -} - -// Specifies a matcher for stats and the buckets that matching stats should use. -message HistogramBucketSettings { - // The stats that this rule applies to. The match is applied to the original stat name - // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. - type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}]; - - // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. - // The order of the buckets does not matter. - repeated double buckets = 2 [(validate.rules).repeated = { - min_items: 1 - unique: true - items {double {gt: 0.0}} - }]; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support -// tagged metrics. -// [#extension: envoy.stat_sinks.statsd] -message StatsdSink { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsdSink"; - - oneof statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running `statsd `_ - // compliant listener. If specified, statistics will be flushed to this - // address. - core.v3.Address address = 1; - - // The name of a cluster that is running a TCP `statsd - // `_ compliant listener. If specified, - // Envoy will connect to this cluster to flush statistics. - string tcp_cluster_name = 2; - } - - // Optional custom prefix for StatsdSink. If - // specified, this will override the default prefix. - // For example: - // - // .. code-block:: json - // - // { - // "prefix" : "envoy-prod" - // } - // - // will change emitted stats to - // - // .. code-block:: cpp - // - // envoy-prod.test_counter:1|c - // envoy-prod.test_timer:5|ms - // - // Note that the default prefix, "envoy", will be used if a prefix is not - // specified. - // - // Stats with default prefix: - // - // .. code-block:: cpp - // - // envoy.test_counter:1|c - // envoy.test_timer:5|ms - string prefix = 3; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. -// The sink emits stats with `DogStatsD `_ -// compatible tags. Tags are configurable via :ref:`StatsConfig -// `. -// [#extension: envoy.stat_sinks.dog_statsd] -message DogStatsdSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.DogStatsdSink"; - - reserved 2; - - oneof dog_statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running DogStatsD compliant listener. If specified, - // statistics will be flushed to this address. - core.v3.Address address = 1; - } - - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. - string prefix = 3; - - // Optional max datagram size to use when sending UDP messages. By default Envoy - // will emit one metric per datagram. By specifying a max-size larger than a single - // metric, Envoy will emit multiple, new-line separated metrics. The max datagram - // size should not exceed your network's MTU. - // - // Note that this value may not be respected if smaller than a single metric. - google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; -} - -// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. -// The sink emits stats in `text/event-stream -// `_ -// formatted stream for use by `Hystrix dashboard -// `_. -// -// Note that only a single HystrixSink should be configured. -// -// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. -// [#extension: envoy.stat_sinks.hystrix] -message HystrixSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.metrics.v2.HystrixSink"; - - // The number of buckets the rolling statistical window is divided into. - // - // Each time the sink is flushed, all relevant Envoy statistics are sampled and - // added to the rolling window (removing the oldest samples in the window - // in the process). The sink then outputs the aggregate statistics across the - // current rolling window to the event stream(s). - // - // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets - // - // More detailed explanation can be found in `Hystrix wiki - // `_. - int64 num_buckets = 1; -} diff --git a/generated_api_shadow/envoy/config/overload/v2alpha/BUILD b/generated_api_shadow/envoy/config/overload/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/overload/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto b/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto deleted file mode 100644 index 03886cdee6d6e..0000000000000 --- a/generated_api_shadow/envoy/config/overload/v2alpha/overload.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto3"; - -package envoy.config.overload.v2alpha; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.overload.v2alpha"; -option java_outer_classname = "OverloadProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Overload Manager] - -// The Overload Manager provides an extensible framework to protect Envoy instances -// from overload of various resources (memory, cpu, file descriptors, etc). -// It monitors a configurable set of resources and notifies registered listeners -// when triggers related to those resources fire. - -message ResourceMonitor { - // The name of the resource monitor to instantiate. Must match a registered - // resource monitor type. The built-in resource monitors are: - // - // * :ref:`envoy.resource_monitors.fixed_heap - // ` - // * :ref:`envoy.resource_monitors.injected_resource - // ` - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Configuration for the resource monitor being instantiated. - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } -} - -message ThresholdTrigger { - // If the resource pressure is greater than or equal to this value, the trigger - // will fire. - double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; -} - -message Trigger { - // The name of the resource this is a trigger for. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof trigger_oneof { - option (validate.required) = true; - - ThresholdTrigger threshold = 2; - } -} - -message OverloadAction { - // The name of the overload action. This is just a well-known string that listeners can - // use for registering callbacks. Custom overload actions should be named using reverse - // DNS to ensure uniqueness. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // A set of triggers for this action. If any of these triggers fire the overload action - // is activated. Listeners are notified when the overload action transitions from - // inactivated to activated, or vice versa. - repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -message OverloadManager { - // The interval for refreshing resource usage. - google.protobuf.Duration refresh_interval = 1; - - // The set of resources to monitor. - repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The set of overload actions. - repeated OverloadAction actions = 3; -} diff --git a/generated_api_shadow/envoy/config/overload/v3/BUILD b/generated_api_shadow/envoy/config/overload/v3/BUILD deleted file mode 100644 index 9a222edfc8e6a..0000000000000 --- a/generated_api_shadow/envoy/config/overload/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/overload/v2alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/overload/v3/overload.proto b/generated_api_shadow/envoy/config/overload/v3/overload.proto deleted file mode 100644 index 5ff2222987f6f..0000000000000 --- a/generated_api_shadow/envoy/config/overload/v3/overload.proto +++ /dev/null @@ -1,180 +0,0 @@ -syntax = "proto3"; - -package envoy.config.overload.v3; - -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.overload.v3"; -option java_outer_classname = "OverloadProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Overload Manager] - -// The Overload Manager provides an extensible framework to protect Envoy instances -// from overload of various resources (memory, cpu, file descriptors, etc). -// It monitors a configurable set of resources and notifies registered listeners -// when triggers related to those resources fire. - -message ResourceMonitor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.ResourceMonitor"; - - // The name of the resource monitor to instantiate. Must match a registered - // resource monitor type. - // See the :ref:`extensions listed in typed_config below ` for the default list of available resource monitor. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Configuration for the resource monitor being instantiated. - // [#extension-category: envoy.resource_monitors] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -message ThresholdTrigger { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.ThresholdTrigger"; - - // If the resource pressure is greater than or equal to this value, the trigger - // will enter saturation. - double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; -} - -message ScaledTrigger { - // If the resource pressure is greater than this value, the trigger will be in the - // :ref:`scaling ` state with value - // `(pressure - scaling_threshold) / (saturation_threshold - scaling_threshold)`. - double scaling_threshold = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; - - // If the resource pressure is greater than this value, the trigger will enter saturation. - double saturation_threshold = 2 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; -} - -message Trigger { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.Trigger"; - - // The name of the resource this is a trigger for. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof trigger_oneof { - option (validate.required) = true; - - ThresholdTrigger threshold = 2; - - ScaledTrigger scaled = 3; - } -} - -// Typed configuration for the "envoy.overload_actions.reduce_timeouts" action. See -// :ref:`the docs ` for an example of how to configure -// the action with different timeouts and minimum values. -message ScaleTimersOverloadActionConfig { - enum TimerType { - // Unsupported value; users must explicitly specify the timer they want scaled. - UNSPECIFIED = 0; - - // Adjusts the idle timer for downstream HTTP connections that takes effect when there are no active streams. - // This affects the value of :ref:`HttpConnectionManager.common_http_protocol_options.idle_timeout - // ` - HTTP_DOWNSTREAM_CONNECTION_IDLE = 1; - - // Adjusts the idle timer for HTTP streams initiated by downstream clients. - // This affects the value of :ref:`RouteAction.idle_timeout ` and - // :ref:`HttpConnectionManager.stream_idle_timeout - // ` - HTTP_DOWNSTREAM_STREAM_IDLE = 2; - - // Adjusts the timer for how long downstream clients have to finish transport-level negotiations - // before the connection is closed. - // This affects the value of - // :ref:`FilterChain.transport_socket_connect_timeout `. - TRANSPORT_SOCKET_CONNECT = 3; - } - - message ScaleTimer { - // The type of timer this minimum applies to. - TimerType timer = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; - - oneof overload_adjust { - option (validate.required) = true; - - // Sets the minimum duration as an absolute value. - google.protobuf.Duration min_timeout = 2; - - // Sets the minimum duration as a percentage of the maximum value. - type.v3.Percent min_scale = 3; - } - } - - // A set of timer scaling rules to be applied. - repeated ScaleTimer timer_scale_factors = 1 [(validate.rules).repeated = {min_items: 1}]; -} - -message OverloadAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.OverloadAction"; - - // The name of the overload action. This is just a well-known string that listeners can - // use for registering callbacks. Custom overload actions should be named using reverse - // DNS to ensure uniqueness. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A set of triggers for this action. The state of the action is the maximum - // state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners - // are notified when the overload action changes state. - repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; - - // Configuration for the action being instantiated. - google.protobuf.Any typed_config = 3; -} - -// Configuration for which accounts the WatermarkBuffer Factories should -// track. -message BufferFactoryConfig { - // The minimum power of two at which Envoy starts tracking an account. - // - // Envoy has 8 power of two buckets starting with the provided exponent below. - // Concretely the 1st bucket contains accounts for streams that use - // [2^minimum_account_to_track_power_of_two, - // 2^(minimum_account_to_track_power_of_two + 1)) bytes. - // With the 8th bucket tracking accounts - // >= 128 * 2^minimum_account_to_track_power_of_two. - // - // The maximum value is 56, since we're using uint64_t for bytes counting, - // and that's the last value that would use the 8 buckets. In practice, - // we don't expect the proxy to be holding 2^56 bytes. - // - // If omitted, Envoy should not do any tracking. - uint32 minimum_account_to_track_power_of_two = 1 [(validate.rules).uint32 = {lte: 56 gte: 10}]; -} - -message OverloadManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.overload.v2alpha.OverloadManager"; - - // The interval for refreshing resource usage. - google.protobuf.Duration refresh_interval = 1; - - // The set of resources to monitor. - repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The set of overload actions. - repeated OverloadAction actions = 3; - - // Configuration for buffer factory. - BufferFactoryConfig buffer_factory_config = 4; -} diff --git a/generated_api_shadow/envoy/config/ratelimit/v2/BUILD b/generated_api_shadow/envoy/config/ratelimit/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/ratelimit/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto b/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto deleted file mode 100644 index 92801ea7b9689..0000000000000 --- a/generated_api_shadow/envoy/config/ratelimit/v2/rls.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.ratelimit.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.ratelimit.v2"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate limit service] - -// Rate limit :ref:`configuration overview `. -message RateLimitServiceConfig { - reserved 1, 3; - - // Specifies the gRPC service that hosts the rate limit service. The client - // will connect to this cluster when it needs to make rate limit service - // requests. - api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/ratelimit/v3/BUILD b/generated_api_shadow/envoy/config/ratelimit/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/config/ratelimit/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto deleted file mode 100644 index 98889b1e28825..0000000000000 --- a/generated_api_shadow/envoy/config/ratelimit/v3/rls.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.config.ratelimit.v3; - -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.ratelimit.v3"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit service] - -// Rate limit :ref:`configuration overview `. -message RateLimitServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.ratelimit.v2.RateLimitServiceConfig"; - - reserved 1, 3; - - // Specifies the gRPC service that hosts the rate limit service. The client - // will connect to this cluster when it needs to make rate limit service - // requests. - core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and - // version of messages used on the wire. - core.v3.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/config/rbac/v2/BUILD b/generated_api_shadow/envoy/config/rbac/v2/BUILD deleted file mode 100644 index 4bce7466dddf7..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v2/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - ], -) diff --git a/generated_api_shadow/envoy/config/rbac/v2/rbac.proto b/generated_api_shadow/envoy/config/rbac/v2/rbac.proto deleted file mode 100644 index 943ac33e08590..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v2/rbac.proto +++ /dev/null @@ -1,240 +0,0 @@ -syntax = "proto3"; - -package envoy.config.rbac.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/route/route_components.proto"; -import "envoy/type/matcher/metadata.proto"; -import "envoy/type/matcher/path.proto"; -import "envoy/type/matcher/string.proto"; - -import "google/api/expr/v1alpha1/syntax.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.rbac.v2"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Role Based Access Control (RBAC)] - -// Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. RBAC policies are additive. The policies are examined in order. A request is allowed -// once a matching policy is found (suppose the `action` is ALLOW). -// -// Here is an example of RBAC configuration. It has two policies: -// -// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so -// does "cluster.local/ns/default/sa/superuser". -// -// * Any user can read ("GET") the service at paths with prefix "/products", so long as the -// destination port is either 80 or 443. -// -// .. code-block:: yaml -// -// action: ALLOW -// policies: -// "service-admin": -// permissions: -// - any: true -// principals: -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/admin" -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/superuser" -// "product-viewer": -// permissions: -// - and_rules: -// rules: -// - header: { name: ":method", exact_match: "GET" } -// - url_path: -// path: { prefix: "/products" } -// - or_rules: -// rules: -// - destination_port: 80 -// - destination_port: 443 -// principals: -// - any: true -// -message RBAC { - // Should we do safe-list or block-list style access control? - enum Action { - // The policies grant access to principals. The rest is denied. This is safe-list style - // access control. This is the default type. - ALLOW = 0; - - // The policies deny access to principals. The rest is allowed. This is block-list style - // access control. - DENY = 1; - } - - // The action to take if a policy matches. The request is allowed if and only if: - // - // * `action` is "ALLOWED" and at least one policy matches - // * `action` is "DENY" and none of the policies match - Action action = 1; - - // Maps from policy name to policy. A match occurs when at least one policy matches the request. - map policies = 2; -} - -// Policy specifies a role and the principals that are assigned/denied the role. A policy matches if -// and only if at least one of its permissions match the action taking place AND at least one of its -// principals match the downstream AND the condition is true if specified. -message Policy { - // Required. The set of permissions that define a role. Each permission is matched with OR - // semantics. To match all actions for this policy, a single Permission with the `any` field set - // to true should be used. - repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Required. The set of principals that are assigned/denied the role based on “action”. Each - // principal is matched with OR semantics. To match all downstreams for this policy, a single - // Principal with the `any` field set to true should be used. - repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - google.api.expr.v1alpha1.Expr condition = 3; -} - -// Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 11] -message Permission { - // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, - // each are applied with the associated behavior. - message Set { - repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - oneof rule { - option (validate.required) = true; - - // A set of rules that all must match in order to define the action. - Set and_rules = 1; - - // A set of rules where at least one must match in order to define the action. - Set or_rules = 2; - - // When any is set, it matches any action. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. - api.v2.route.HeaderMatcher header = 4; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.PathMatcher url_path = 10; - - // A CIDR block that describes the destination IP. - api.v2.core.CidrRange destination_ip = 5; - - // A port number that describes the destination port connecting to. - uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; - - // Metadata that describes additional information about the action. - type.matcher.MetadataMatcher metadata = 7; - - // Negates matching the provided permission. For instance, if the value of `not_rule` would - // match, this permission would not match. Conversely, if the value of `not_rule` would not - // match, this permission would match. - Permission not_rule = 8; - - // The request server from the client's connection request. This is - // typically TLS SNI. - // - // .. attention:: - // - // The behavior of this field may be affected by how Envoy is configured - // as explained below. - // - // * If the :ref:`TLS Inspector ` - // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name `, - // a TLS connection's requested SNI server name will be treated as if it - // wasn't present. - // - // * A :ref:`listener filter ` may - // overwrite a connection's requested server name within Envoy. - // - // Please refer to :ref:`this FAQ entry ` to learn to - // setup SNI. - type.matcher.StringMatcher requested_server_name = 9; - } -} - -// Principal defines an identity or a group of identities for a downstream subject. -// [#next-free-field: 12] -message Principal { - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, - // each are applied with the associated behavior. - message Set { - repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Authentication attributes for a downstream. - message Authenticated { - reserved 1; - - // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the - // certificate, otherwise the subject field is used. If unset, it applies to any user that is - // authenticated. - type.matcher.StringMatcher principal_name = 2; - } - - oneof identifier { - option (validate.required) = true; - - // A set of identifiers that all must match in order to define the downstream. - Set and_ids = 1; - - // A set of identifiers at least one must match in order to define the downstream. - Set or_ids = 2; - - // When any is set, it matches any downstream. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // Authenticated attributes that identify the downstream. - Authenticated authenticated = 4; - - // A CIDR block that describes the downstream IP. - // This address will honor proxy protocol, but will not honor XFF. - api.v2.core.CidrRange source_ip = 5 [deprecated = true]; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is inferred - // from for example the x-forwarder-for header, proxy protocol, etc. - api.v2.core.CidrRange direct_remote_ip = 10; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip `. - // E.g, if the remote ip is inferred from for example the x-forwarder-for header, - // proxy protocol, etc. - api.v2.core.CidrRange remote_ip = 11; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. - api.v2.route.HeaderMatcher header = 6; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.PathMatcher url_path = 9; - - // Metadata that describes additional information about the principal. - type.matcher.MetadataMatcher metadata = 7; - - // Negates matching the provided principal. For instance, if the value of `not_id` would match, - // this principal would not match. Conversely, if the value of `not_id` would not match, this - // principal would match. - Principal not_id = 8; - } -} diff --git a/generated_api_shadow/envoy/config/rbac/v3/BUILD b/generated_api_shadow/envoy/config/rbac/v3/BUILD deleted file mode 100644 index c289def1f11d2..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v3/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - ], -) diff --git a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto b/generated_api_shadow/envoy/config/rbac/v3/rbac.proto deleted file mode 100644 index d66f9be2b4981..0000000000000 --- a/generated_api_shadow/envoy/config/rbac/v3/rbac.proto +++ /dev/null @@ -1,306 +0,0 @@ -syntax = "proto3"; - -package envoy.config.rbac.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/path.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/v3/range.proto"; - -import "google/api/expr/v1alpha1/checked.proto"; -import "google/api/expr/v1alpha1/syntax.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.rbac.v3"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Role Based Access Control (RBAC)] - -// Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. Requests are allowed or denied based on the `action` and whether a matching policy is -// found. For instance, if the action is ALLOW and a matching policy is found the request should be -// allowed. -// -// RBAC can also be used to make access logging decisions by communicating with access loggers -// through dynamic metadata. When the action is LOG and at least one policy matches, the -// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating -// the request should be logged. -// -// Here is an example of RBAC configuration. It has two policies: -// -// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so -// does "cluster.local/ns/default/sa/superuser". -// -// * Any user can read ("GET") the service at paths with prefix "/products", so long as the -// destination port is either 80 or 443. -// -// .. code-block:: yaml -// -// action: ALLOW -// policies: -// "service-admin": -// permissions: -// - any: true -// principals: -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/admin" -// - authenticated: -// principal_name: -// exact: "cluster.local/ns/default/sa/superuser" -// "product-viewer": -// permissions: -// - and_rules: -// rules: -// - header: -// name: ":method" -// string_match: -// exact: "GET" -// - url_path: -// path: { prefix: "/products" } -// - or_rules: -// rules: -// - destination_port: 80 -// - destination_port: 443 -// principals: -// - any: true -// -message RBAC { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.RBAC"; - - // Should we do safe-list or block-list style access control? - enum Action { - // The policies grant access to principals. The rest are denied. This is safe-list style - // access control. This is the default type. - ALLOW = 0; - - // The policies deny access to principals. The rest are allowed. This is block-list style - // access control. - DENY = 1; - - // The policies set the `access_log_hint` dynamic metadata key based on if requests match. - // All requests are allowed. - LOG = 2; - } - - // The action to take if a policy matches. Every action either allows or denies a request, - // and can also carry out action-specific operations. - // - // Actions: - // - // * ALLOW: Allows the request if and only if there is a policy that matches - // the request. - // * DENY: Allows the request if and only if there are no policies that - // match the request. - // * LOG: Allows all requests. If at least one policy matches, the dynamic - // metadata key `access_log_hint` is set to the value `true` under the shared - // key namespace 'envoy.common'. If no policies match, it is set to `false`. - // Other actions do not modify this key. - // - Action action = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maps from policy name to policy. A match occurs when at least one policy matches the request. - // The policies are evaluated in lexicographic order of the policy name. - map policies = 2; -} - -// Policy specifies a role and the principals that are assigned/denied the role. -// A policy matches if and only if at least one of its permissions match the -// action taking place AND at least one of its principals match the downstream -// AND the condition is true if specified. -message Policy { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Policy"; - - // Required. The set of permissions that define a role. Each permission is - // matched with OR semantics. To match all actions for this policy, a single - // Permission with the `any` field set to true should be used. - repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Required. The set of principals that are assigned/denied the role based on - // “action”. Each principal is matched with OR semantics. To match all - // downstreams for this policy, a single Principal with the `any` field set to - // true should be used. - repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; - - // An optional symbolic expression specifying an access control - // :ref:`condition `. The condition is combined - // with the permissions and the principals as a clause with AND semantics. - // Only be used when checked_condition is not used. - google.api.expr.v1alpha1.Expr condition = 3 - [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; - - // [#not-implemented-hide:] - // An optional symbolic expression that has been successfully type checked. - // Only be used when condition is not used. - google.api.expr.v1alpha1.CheckedExpr checked_condition = 4 - [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; -} - -// Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 12] -message Permission { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission"; - - // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, - // each are applied with the associated behavior. - message Set { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v2.Permission.Set"; - - repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - oneof rule { - option (validate.required) = true; - - // A set of rules that all must match in order to define the action. - Set and_rules = 1; - - // A set of rules where at least one must match in order to define the action. - Set or_rules = 2; - - // When any is set, it matches any action. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only - // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` - // field if you want to match the URL path without the query and fragment string. - route.v3.HeaderMatcher header = 4; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 10; - - // A CIDR block that describes the destination IP. - core.v3.CidrRange destination_ip = 5; - - // A port number that describes the destination port connecting to. - uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; - - // A port number range that describes a range of destination ports connecting to. - type.v3.Int32Range destination_port_range = 11; - - // Metadata that describes additional information about the action. - type.matcher.v3.MetadataMatcher metadata = 7; - - // Negates matching the provided permission. For instance, if the value of - // `not_rule` would match, this permission would not match. Conversely, if - // the value of `not_rule` would not match, this permission would match. - Permission not_rule = 8; - - // The request server from the client's connection request. This is - // typically TLS SNI. - // - // .. attention:: - // - // The behavior of this field may be affected by how Envoy is configured - // as explained below. - // - // * If the :ref:`TLS Inspector ` - // filter is not added, and if a `FilterChainMatch` is not defined for - // the :ref:`server name - // `, - // a TLS connection's requested SNI server name will be treated as if it - // wasn't present. - // - // * A :ref:`listener filter ` may - // overwrite a connection's requested server name within Envoy. - // - // Please refer to :ref:`this FAQ entry ` to learn to - // setup SNI. - type.matcher.v3.StringMatcher requested_server_name = 9; - } -} - -// Principal defines an identity or a group of identities for a downstream -// subject. -// [#next-free-field: 12] -message Principal { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; - - // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. - // Depending on the context, each are applied with the associated behavior. - message Set { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v2.Principal.Set"; - - repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Authentication attributes for a downstream. - message Authenticated { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.rbac.v2.Principal.Authenticated"; - - reserved 1; - - // The name of the principal. If set, The URI SAN or DNS SAN in that order - // is used from the certificate, otherwise the subject field is used. If - // unset, it applies to any user that is authenticated. - type.matcher.v3.StringMatcher principal_name = 2; - } - - oneof identifier { - option (validate.required) = true; - - // A set of identifiers that all must match in order to define the - // downstream. - Set and_ids = 1; - - // A set of identifiers at least one must match in order to define the - // downstream. - Set or_ids = 2; - - // When any is set, it matches any downstream. - bool any = 3 [(validate.rules).bool = {const: true}]; - - // Authenticated attributes that identify the downstream. - Authenticated authenticated = 4; - - // A CIDR block that describes the downstream IP. - // This address will honor proxy protocol, but will not honor XFF. - core.v3.CidrRange source_ip = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This is always the physical peer even if the - // :ref:`remote_ip ` is - // inferred from for example the x-forwarder-for header, proxy protocol, - // etc. - core.v3.CidrRange direct_remote_ip = 10; - - // A CIDR block that describes the downstream remote/origin address. - // Note: This may not be the physical peer and could be different from the - // :ref:`direct_remote_ip - // `. E.g, if the - // remote ip is inferred from for example the x-forwarder-for header, proxy - // protocol, etc. - core.v3.CidrRange remote_ip = 11; - - // A header (or pseudo-header such as :path or :method) on the incoming HTTP - // request. Only available for HTTP request. Note: the pseudo-header :path - // includes the query and fragment string. Use the `url_path` field if you - // want to match the URL path without the query and fragment string. - route.v3.HeaderMatcher header = 6; - - // A URL path on the incoming HTTP request. Only available for HTTP. - type.matcher.v3.PathMatcher url_path = 9; - - // Metadata that describes additional information about the principal. - type.matcher.v3.MetadataMatcher metadata = 7; - - // Negates matching the provided principal. For instance, if the value of - // `not_id` would match, this principal would not match. Conversely, if the - // value of `not_id` would not match, this principal would match. - Principal not_id = 8; - } -} diff --git a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD b/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto deleted file mode 100644 index 529622a071e77..0000000000000 --- a/generated_api_shadow/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.resource_monitor.fixed_heap.v2alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha"; -option java_outer_classname = "FixedHeapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Fixed heap] -// [#extension: envoy.resource_monitors.fixed_heap] - -// The fixed heap resource monitor reports the Envoy process memory pressure, computed as a -// fraction of currently reserved heap memory divided by a statically configured maximum -// specified in the FixedHeapConfig. -message FixedHeapConfig { - uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD b/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto b/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto deleted file mode 100644 index a9f056d2d29aa..0000000000000 --- a/generated_api_shadow/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.config.resource_monitor.injected_resource.v2alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha"; -option java_outer_classname = "InjectedResourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Injected resource] -// [#extension: envoy.resource_monitors.injected_resource] - -// The injected resource monitor allows injecting a synthetic resource pressure into Envoy -// via a text file, which must contain a floating-point number in the range [0..1] representing -// the resource pressure and be updated atomically by a symbolic link swap. -// This is intended primarily for integration tests to force Envoy into an overloaded state. -message InjectedResourceConfig { - string filename = 1 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD b/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto b/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto deleted file mode 100644 index c2b2e58a1823d..0000000000000 --- a/generated_api_shadow/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.config.retry.omit_canary_hosts.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.retry.omit_canary_hosts.v2"; -option java_outer_classname = "OmitCanaryHostsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.retry.host.omit_canary_hosts.v3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Omit Canary Hosts Predicate] -// [#extension: envoy.retry_host_predicates.omit_canary_hosts] - -message OmitCanaryHostsPredicate { -} diff --git a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/BUILD b/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto b/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto deleted file mode 100644 index d229cffef8ca9..0000000000000 --- a/generated_api_shadow/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.config.retry.omit_host_metadata.v2; - -import "envoy/api/v2/core/base.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.retry.omit_host_metadata.v2"; -option java_outer_classname = "OmitHostMetadataConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.retry.host.omit_host_metadata.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Omit host metadata retry predicate] - -// A retry host predicate that can be used to reject a host based on -// predefined metadata match criteria. -// [#extension: envoy.retry_host_predicates.omit_host_metadata] -message OmitHostMetadataConfig { - // Retry host predicate metadata match criteria. The hosts in - // the upstream cluster with matching metadata will be omitted while - // attempting a retry of a failed request. The metadata should be specified - // under the *envoy.lb* key. - api.v2.core.Metadata metadata_match = 1; -} diff --git a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD b/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto b/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto deleted file mode 100644 index f69c5054f9c9b..0000000000000 --- a/generated_api_shadow/envoy/config/retry/previous_hosts/v2/previous_hosts.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.config.retry.previous_hosts.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.retry.previous_hosts.v2"; -option java_outer_classname = "PreviousHostsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.retry.host.previous_hosts.v3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Previous Hosts Predicate] -// [#extension: envoy.retry_host_predicates.previous_hosts] - -message PreviousHostsPredicate { -} diff --git a/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD b/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/retry/previous_priorities/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto b/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto deleted file mode 100644 index 3fc400c053a7f..0000000000000 --- a/generated_api_shadow/envoy/config/retry/previous_priorities/previous_priorities_config.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.config.retry.previous_priorities; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.retry.previous_priorities"; -option java_outer_classname = "PreviousPrioritiesConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.retry.priority.previous_priorities.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Previous priorities retry selector] - -// A retry host selector that attempts to spread retries between priorities, even if certain -// priorities would not normally be attempted due to higher priorities being available. -// -// As priorities get excluded, load will be distributed amongst the remaining healthy priorities -// based on the relative health of the priorities, matching how load is distributed during regular -// host selection. For example, given priority healths of {100, 50, 50}, the original load will be -// {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load -// changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the -// remaining to spill over to P2. -// -// Each priority attempted will be excluded until there are no healthy priorities left, at which -// point the list of attempted priorities will be reset, essentially starting from the beginning. -// For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the -// following sequence of priorities would be selected (assuming update_frequency = 1): -// Attempt 1: P0 (P0 is 100% healthy) -// Attempt 2: P2 (P0 already attempted, P2 only healthy priority) -// Attempt 3: P0 (no healthy priorities, reset) -// Attempt 4: P2 -// -// In the case of all upstream hosts being unhealthy, no adjustments will be made to the original -// priority load, so behavior should be identical to not using this plugin. -// -// Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of -// priorities), which might incur significant overhead for clusters with many priorities. -// [#extension: envoy.retry_priorities.previous_priorities] -message PreviousPrioritiesConfig { - // How often the priority load should be updated based on previously attempted priorities. Useful - // to allow each priorities to receive more than one request before being excluded or to reduce - // the number of times that the priority load has to be recomputed. - // - // For example, by setting this to 2, then the first two attempts (initial attempt and first - // retry) will use the unmodified priority load. The third and fourth attempt will use priority - // load which excludes the priorities routed to with the first two attempts, and the fifth and - // sixth attempt will use the priority load excluding the priorities used for the first four - // attempts. - // - // Must be greater than 0. - int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/config/route/v3/BUILD b/generated_api_shadow/envoy/config/route/v3/BUILD deleted file mode 100644 index 81cdfdf8a93a6..0000000000000 --- a/generated_api_shadow/envoy/config/route/v3/BUILD +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/metadata/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/route/v3/route.proto b/generated_api_shadow/envoy/config/route/v3/route.proto deleted file mode 100644 index e2bf52165be92..0000000000000 --- a/generated_api_shadow/envoy/config/route/v3/route.proto +++ /dev/null @@ -1,142 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/route/v3/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v3"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP route configuration] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// [#next-free-field: 13] -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RouteConfiguration"; - - // The name of the route configuration. For example, it might match - // :ref:`route_config_name - // ` in - // :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. - string name = 1; - - // An array of virtual hosts that make up the route table. - repeated VirtualHost virtual_hosts = 2; - - // An array of virtual hosts will be dynamically loaded via the VHDS API. - // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for - // on-demand discovery of virtual hosts. The contents of these two fields will be merged to - // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration - // taking precedence. - Vhds vhds = 9; - - // Optionally specifies a list of HTTP headers that the connection manager - // will consider to be internal only. If they are found on external requests they will be cleaned - // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information. - repeated string internal_only_headers = 3 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each response that - // the connection manager encodes. Headers specified at this level are applied - // after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // that the connection manager encodes. - repeated string response_headers_to_remove = 5 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // Specifies a list of HTTP headers that should be added to each request - // routed by the HTTP connection manager. Headers specified at this level are - // applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or - // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // routed by the HTTP connection manager. - repeated string request_headers_to_remove = 8 [ - (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} - ]; - - // By default, headers that should be added/removed are evaluated from most to least specific: - // - // * route level - // * virtual host level - // * connection manager level - // - // To allow setting overrides at the route or virtual host level, this order can be reversed - // by setting this option to true. Defaults to false. - // - // [#next-major-version: In the v3 API, this will default to true.] - bool most_specific_header_mutations_wins = 10; - - // An optional boolean that specifies whether the clusters that the route - // table refers to will be validated by the cluster manager. If set to true - // and a route refers to a non-existent cluster, the route table will not - // load. If set to false and a route refers to a non-existent cluster, the - // route table will load and the router filter will return a 404 if the route - // is selected at runtime. This setting defaults to true if the route table - // is statically defined via the :ref:`route_config - // ` - // option. This setting default to false if the route table is loaded dynamically via the - // :ref:`rds - // ` - // option. Users may wish to override the default behavior in certain cases (for example when - // using CDS with a static route table). - google.protobuf.BoolValue validate_clusters = 7; - - // The maximum bytes of the response :ref:`direct response body - // ` size. If not specified the default - // is 4096. - // - // .. warning:: - // - // Envoy currently holds the content of :ref:`direct response body - // ` in memory. Be careful setting - // this to be larger than the default 4KB, since the allocated memory for direct response body - // is not subject to data plane buffering controls. - // - google.protobuf.UInt32Value max_direct_response_body_size_bytes = 11; - - // [#not-implemented-hide:] - // A list of plugins and their configurations which may be used by a - // :ref:`envoy_v3_api_field_config.route.v3.RouteAction.cluster_specifier_plugin` - // within the route. All *extension.name* fields in this list must be unique. - repeated ClusterSpecifierPlugin cluster_specifier_plugins = 12; -} - -// Configuration for a cluster specifier plugin. -message ClusterSpecifierPlugin { - // The name of the plugin and its opaque configuration. - core.v3.TypedExtensionConfig extension = 1; -} - -message Vhds { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Vhds"; - - // Configuration source specifier for VHDS. - core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto deleted file mode 100644 index 8930f9ec8dff3..0000000000000 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ /dev/null @@ -1,2106 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/proxy_protocol.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/regex.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/metadata/v3/metadata.proto"; -import "envoy/type/tracing/v3/custom_tag.proto"; -import "envoy/type/v3/percent.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v3"; -option java_outer_classname = "RouteComponentsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP route components] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// The top level element in the routing configuration is a virtual host. Each virtual host has -// a logical name as well as a set of domains that get routed to it based on the incoming request's -// host header. This allows a single listener to service multiple top level domain path trees. Once -// a virtual host is selected based on the domain, the routes are processed in order to see which -// upstream cluster to route to or whether to perform a redirect. -// [#next-free-field: 21] -message VirtualHost { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualHost"; - - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - - reserved 9; - - // The logical name of the virtual host. This is used when emitting certain - // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A list of domains (host/authority header) that will be matched to this - // virtual host. Wildcard hosts are supported in the suffix or prefix form. - // - // Domain search order: - // 1. Exact domain names: ``www.foo.com``. - // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. - // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. - // 4. Special wildcard ``*`` matching any domain. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - // The longest wildcards match first. - // Only a single virtual host in the entire route configuration can match on ``*``. A domain - // must be unique across all virtual hosts or the config will fail to load. - // - // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. - repeated string domains = 2 [(validate.rules).repeated = { - min_items: 1 - items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} - }]; - - // The list of routes that will be matched, in order, for incoming requests. - // The first route that matches will be used. - repeated Route routes = 3; - - // Specifies the type of TLS enforcement the virtual host expects. If this option is not - // specified, there is no TLS requirement for the virtual host. - TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; - - // A list of virtual clusters defined for this virtual host. Virtual clusters - // are used for additional statistics gathering. - repeated VirtualCluster virtual_clusters = 5; - - // Specifies a set of rate limit configurations that will be applied to the - // virtual host. - repeated RateLimit rate_limits = 6; - - // Specifies a list of HTTP headers that should be added to each request - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // handled by this virtual host. - repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of HTTP headers that should be added to each response - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // handled by this virtual host. - repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Indicates that the virtual host has a CORS policy. - CorsPolicy cors = 8; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 15; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the upstream request. Setting this option will cause it to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the upstream - // will see the attempt count as perceived by the second Envoy. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - // - // [#next-major-version: rename to include_attempt_count_in_request.] - bool include_request_attempt_count = 14; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the downstream response. Setting this option will cause the router to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the downstream - // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - bool include_attempt_count_in_response = 19; - - // Indicates the retry policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - RetryPolicy retry_policy = 16; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that setting a route level entry - // will take precedence over this config and it'll be treated independently (e.g.: values are not - // inherited). :ref:`Retry policy ` should not be - // set if this field is used. - google.protobuf.Any retry_policy_typed_config = 20; - - // Indicates the hedge policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - HedgePolicy hedge_policy = 17; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum - // value of this and the listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; - - map hidden_envoy_deprecated_per_filter_config = 12 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// A filter-defined action type. -message FilterAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.FilterAction"; - - google.protobuf.Any action = 1; -} - -// A route is both a specification of how to match a request as well as an indication of what to do -// next (e.g., redirect, forward, rewrite, etc.). -// -// .. attention:: -// -// Envoy supports routing on HTTP method via :ref:`header matching -// `. -// [#next-free-field: 19] -message Route { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Route"; - - reserved 6; - - // Name for the route. - string name = 14; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - oneof action { - option (validate.required) = true; - - // Route request to some upstream cluster. - RouteAction route = 2; - - // Return a redirect. - RedirectAction redirect = 3; - - // Return an arbitrary HTTP response directly, without proxying. - DirectResponseAction direct_response = 7; - - // [#not-implemented-hide:] - // A filter-defined action (e.g., it could dynamically generate the RouteAction). - // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when - // implemented] - FilterAction filter_action = 17; - - // [#not-implemented-hide:] - // An action used when the route will generate a response directly, - // without forwarding to an upstream host. This will be used in non-proxy - // xDS clients like the gRPC server. It could also be used in the future - // in Envoy for a filter that directly generates responses for requests. - NonForwardingAction non_forwarding_action = 18; - } - - // The Metadata field can be used to provide additional information - // about the route. It can be used for configuration, stats, and logging. - // The metadata should go under the filter namespace that will need it. - // For instance, if the metadata is intended for the Router filter, - // the filter name should be specified as *envoy.filters.http.router*. - core.v3.Metadata metadata = 4; - - // Decorator for the matched route. - Decorator decorator = 5; - - // The typed_per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 13; - - // Specifies a set of headers that will be added to requests matching this - // route. Headers specified at this level are applied before headers from the - // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // matching this route. - repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a set of headers that will be added to responses to requests - // matching this route. Headers specified at this level are applied before - // headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on - // :ref:`custom request headers `. - repeated core.v3.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { - items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - Tracing tracing = 15; - - // The maximum bytes which will be buffered for retries and shadowing. - // If set, the bytes actually buffered will be the minimum value of this and the - // listener per_connection_buffer_limit_bytes. - google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; - - map hidden_envoy_deprecated_per_filter_config = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// Compared to the :ref:`cluster ` field that specifies a -// single upstream cluster as the target of a request, the :ref:`weighted_clusters -// ` option allows for specification of -// multiple upstream clusters along with weights that indicate the percentage of -// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the -// weights. -message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster"; - - // [#next-free-field: 13] - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.WeightedCluster.ClusterWeight"; - - reserved 7; - - // Only one of *name* and *cluster_header* may be specified. - // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] - // Name of the upstream cluster. The cluster must exist in the - // :ref:`cluster manager configuration `. - string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier"]; - - // Only one of *name* and *cluster_header* may be specified. - // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string cluster_header = 12 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, - (udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier" - ]; - - // An integer between 0 and :ref:`total_weight - // `. When a request matches the route, - // the choice of an upstream cluster is determined by its weight. The sum of weights across all - // entries in the clusters array must add up to the total_weight, which defaults to 100. - google.protobuf.UInt32Value weight = 2; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered for - // load balancing. Note that this will be merged with what's provided in - // :ref:`RouteAction.metadata_match `, with - // values here taking precedence. The filter name should be specified as *envoy.lb*. - core.v3.Metadata metadata_match = 3; - - // Specifies a list of headers to be added to requests when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request when - // this cluster is selected through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // Specifies a list of headers to be added to responses when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and - // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.v3.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of headers to be removed from responses when this cluster is selected - // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. - repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { - items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} - }]; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - // [#comment: An entry's value may be wrapped in a - // :ref:`FilterConfig` - // message to specify additional options.] - map typed_per_filter_config = 10; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite_literal = 11 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - map hidden_envoy_deprecated_per_filter_config = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the total weight across all clusters. The sum of all cluster weights must equal this - // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies the runtime key prefix that should be used to construct the - // runtime keys associated with each cluster. When the *runtime_key_prefix* is - // specified, the router will look for weights associated with each upstream - // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - // *cluster[i]* denotes an entry in the clusters array field. If the runtime - // key for the cluster does not exist, the value specified in the - // configuration file will be used as the default weight. See the :ref:`runtime documentation - // ` for how key names map to the underlying implementation. - string runtime_key_prefix = 2; -} - -// [#next-free-field: 14] -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; - - message GrpcRouteMatchOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteMatch.GrpcRouteMatchOptions"; - } - - message TlsContextMatchOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteMatch.TlsContextMatchOptions"; - - // If specified, the route will match against whether or not a certificate is presented. - // If not specified, certificate presentation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue presented = 1; - - // If specified, the route will match against whether or not a certificate is validated. - // If not specified, certificate validation status (true or false) will not be considered when route matching. - google.protobuf.BoolValue validated = 2; - } - - // An extensible message for matching CONNECT requests. - message ConnectMatcher { - } - - reserved 5; - - oneof path_specifier { - option (validate.required) = true; - - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - string prefix = 1; - - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - string path = 2; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - - // If this is used as the matcher, the matcher will only match CONNECT requests. - // Note that this will not match HTTP/2 upgrade-style CONNECT requests - // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style - // upgrades. - // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, - // where Extended CONNECT requests may have a path, the path matchers will work if - // there is a path present. - // Note that CONNECT support is currently considered alpha in Envoy. - // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] - ConnectMatcher connect_matcher = 12; - - string hidden_envoy_deprecated_regex = 3 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // Indicates that prefix/path matching should be case sensitive. The default - // is true. Ignored for safe_regex matching. - google.protobuf.BoolValue case_sensitive = 4; - - // Indicates that the route should additionally match on a runtime key. Every time the route - // is considered for a match, it must also fall under the percentage of matches indicated by - // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the router continues to evaluate the remaining match criteria. A runtime_fraction - // route configuration can be used to roll out route changes in a gradual manner without full - // code/config deploys. Refer to the :ref:`traffic shifting - // ` docs for additional documentation. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an - // integer with the assumption that the value is an integral percentage out of 100. For - // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent - // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - core.v3.RuntimeFractionalPercent runtime_fraction = 9; - - // Specifies a set of headers that the route should match on. The router will - // check the request’s headers against all the specified headers in the route - // config. A match will happen if all the headers in the route are present in - // the request with the same values (or based on presence if the value field - // is not in the config). - repeated HeaderMatcher headers = 6; - - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - repeated QueryParameterMatcher query_parameters = 7; - - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - GrpcRouteMatchOptions grpc = 8; - - // If specified, the client tls context will be matched against the defined - // match options. - // - // [#next-major-version: unify with RBAC] - TlsContextMatchOptions tls_context = 11; - - // Specifies a set of dynamic metadata matchers on which the route should match. - // The router will check the dynamic metadata against all the specified dynamic metadata matchers. - // If the number of specified dynamic metadata matchers is nonzero, they all must match the - // dynamic metadata for a match to occur. - repeated type.matcher.v3.MetadataMatcher dynamic_metadata = 13; -} - -// [#next-free-field: 12] -message CorsPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.CorsPolicy"; - - // Specifies string patterns that match allowed origins. An origin is allowed if any of the - // string matchers match. - repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; - - // Specifies the content for the *access-control-allow-methods* header. - string allow_methods = 2; - - // Specifies the content for the *access-control-allow-headers* header. - string allow_headers = 3; - - // Specifies the content for the *access-control-expose-headers* header. - string expose_headers = 4; - - // Specifies the content for the *access-control-max-age* header. - string max_age = 5; - - // Specifies whether the resource allows credentials. - google.protobuf.BoolValue allow_credentials = 6; - - oneof enabled_specifier { - // Specifies the % of requests for which the CORS filter is enabled. - // - // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - // filter will be enabled for 100% of the requests. - // - // If :ref:`runtime_key ` is - // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - core.v3.RuntimeFractionalPercent filter_enabled = 9; - - google.protobuf.BoolValue hidden_envoy_deprecated_enabled = 7 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not - // enforced. - // - // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those - // fields have to explicitly disable the filter in order for this setting to take effect. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* to determine if it's valid but will not enforce any policies. - core.v3.RuntimeFractionalPercent shadow_enabled = 10; - - repeated string hidden_envoy_deprecated_allow_origin = 1 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - repeated string hidden_envoy_deprecated_allow_origin_regex = 8 [ - deprecated = true, - (validate.rules).repeated = {items {string {max_bytes: 1024}}}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; -} - -// [#next-free-field: 38] -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; - - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - - // Configures :ref:`internal redirect ` behavior. - // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] - enum InternalRedirectAction { - option deprecated = true; - - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - // - // .. note:: - // - // Shadowing will not be triggered if the primary cluster does not exist. - message RequestMirrorPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.RequestMirrorPolicy"; - - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // If not specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - core.v3.RuntimeFractionalPercent runtime_fraction = 3; - - // Determines if the trace span should be sampled. Defaults to true. - google.protobuf.BoolValue trace_sampled = 4; - - string hidden_envoy_deprecated_runtime_key = 2 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - // [#next-free-field: 7] - message HashPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy"; - - message Header { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.Header"; - - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // If specified, the request header value will be rewritten and used - // to produce the hash key. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.Cookie"; - - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.ConnectionProperties"; - - // Hash on source IP address. - bool source_ip = 1; - } - - message QueryParameter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.QueryParameter"; - - // The name of the URL query parameter that will be used to obtain the hash - // key. If the parameter is not present, no hash will be produced. Query - // parameter names are case-sensitive. - string name = 1 [(validate.rules).string = {min_len: 1}]; - } - - message FilterState { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.HashPolicy.FilterState"; - - // The name of the Object in the per-request filterState, which is an - // Envoy::Http::Hashable object. If there is no data associated with the key, - // or the stored object is not Envoy::Http::Hashable, no hash will be produced. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - - // Query parameter hash policy. - QueryParameter query_parameter = 5; - - // Filter state hash policy. - FilterState filter_state = 6; - } - - // The flag that short-circuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:`upgrade_configs - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RouteAction.UpgradeConfig"; - - // Configuration for sending data upstream as a raw data payload. This is used for - // CONNECT or POST requests, when forwarding request payload as raw TCP. - message ConnectConfig { - // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. - core.v3.ProxyProtocolConfig proxy_protocol_config = 1; - - // If set, the route will also allow forwarding POST payload as raw TCP. - bool allow_post = 2; - } - - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - - // Configuration for sending data upstream as a raw data payload. This is used for - // CONNECT requests, when forwarding CONNECT payload as raw TCP. - // Note that CONNECT support is currently considered alpha in Envoy. - // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] - ConnectConfig connect_config = 3; - } - - message MaxStreamDuration { - // Specifies the maximum duration allowed for streams on the route. If not specified, the value - // from the :ref:`max_stream_duration - // ` field in - // :ref:`HttpConnectionManager.common_http_protocol_options - // ` - // is used. If this field is set explicitly to zero, any - // HttpConnectionManager max_stream_duration timeout will be disabled for - // this route. - google.protobuf.Duration max_stream_duration = 1; - - // If present, and the request contains a `grpc-timeout header - // `_, use that value as the - // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. - // If set to 0, the `grpc-timeout` header is used without modification. - google.protobuf.Duration grpc_timeout_header_max = 2; - - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by - // subtracting the provided duration from the header. This is useful for allowing Envoy to set - // its global timeout to be less than that of the deadline imposed by the calling client, which - // makes it more likely that Envoy will handle the timeout instead of having the call canceled - // by the client. If, after applying the offset, the resulting timeout is zero or negative, - // the stream will timeout immediately. - google.protobuf.Duration grpc_timeout_header_offset = 3; - } - - reserved 12, 18, 19, 16, 22, 21; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string cluster_header = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - WeightedCluster weighted_clusters = 3; - - // [#not-implemented-hide:] - // Name of the cluster specifier plugin to use to determine the cluster for - // requests on this route. The plugin name must be defined in the associated - // :ref:`envoy_v3_api_field_config.route.v3.RouteConfiguration.cluster_specifier_plugins` - // in the - // :ref:`envoy_v3_api_field_config.core.v3.TypedExtensionConfig.name` field. - string cluster_specifier_plugin = 37; - } - - // The HTTP status code to use when configured cluster is not found. - // The default response code is 503 Service Unavailable. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what's set in this field will be considered - // for load balancing. If using :ref:`weighted_clusters - // `, metadata will be merged, with values - // provided there taking precedence. The filter name should be specified as *envoy.lb*. - core.v3.Metadata metadata_match = 4; - - // Indicates that during forwarding, the matched prefix (or path) should be - // swapped with this value. This option allows application URLs to be rooted - // at a different path from those exposed at the reverse proxy layer. The router filter will - // place the original path before rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of *prefix_rewrite* or - // :ref:`regex_rewrite ` - // may be specified. - // - // .. attention:: - // - // Pay careful attention to the use of trailing slashes in the - // :ref:`route's match ` prefix value. - // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single - // :ref:`Route `, as shown by the below config entries: - // - // .. code-block:: yaml - // - // - match: - // prefix: "/prefix/" - // route: - // prefix_rewrite: "/" - // - match: - // prefix: "/prefix" - // route: - // prefix_rewrite: "/" - // - // Having above entries in the config, requests to */prefix* will be stripped to */*, while - // requests to */prefix/etc* will be stripped to */etc*. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. The router filter will place the original path as it was - // before the rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of :ref:`prefix_rewrite ` - // or *regex_rewrite* may be specified. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite_literal = 6 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - google.protobuf.BoolValue auto_host_rewrite = 7; - - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string host_rewrite_header = 29 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Indicates that during forwarding, the host header will be swapped with - // the result of the regex substitution executed on path value with query and fragment removed. - // This is useful for transitioning variable content between path segment and subdomain. - // - // For example with the following config: - // - // .. code-block:: yaml - // - // host_rewrite_path_regex: - // pattern: - // google_re2: {} - // regex: "^/(.+)/.+$" - // substitution: \1 - // - // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`. - type.matcher.v3.RegexMatchAndSubstitute host_rewrite_path_regex = 35; - } - - // Specifies the upstream timeout for the route. If not specified, the default is 15s. This - // spans between the point at which the entire downstream request (i.e. end-of-stream) has been - // processed and when the upstream response has been completely processed. A value of 0 will - // disable the route's timeout. - // - // .. note:: - // - // This timeout includes all retries. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration timeout = 8; - - // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, - // although the connection manager wide :ref:`stream_idle_timeout - // ` - // will still apply. A value of 0 will completely disable the route's idle timeout, even if a - // connection manager stream idle timeout is configured. - // - // The idle timeout is distinct to :ref:`timeout - // `, which provides an upper bound - // on the upstream response time; :ref:`idle_timeout - // ` instead bounds the amount - // of time the request's stream may be idle. - // - // After header decoding, the idle timeout will apply on downstream and - // upstream request events. Each time an encode/decode event for headers or - // data is processed for the stream, the timer will be reset. If the timeout - // fires, the stream is terminated with a 408 Request Timeout error code if no - // upstream response header has been received, otherwise a stream reset - // occurs. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled according to the value for - // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - google.protobuf.Duration idle_timeout = 24; - - // Indicates that the route has a retry policy. Note that if this is set, - // it'll take precedence over the virtual host level retry policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - RetryPolicy retry_policy = 9; - - // [#not-implemented-hide:] - // Specifies the configuration for retry policy extension. Note that if this is set, it'll take - // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, - // most internal one becomes the enforced policy). :ref:`Retry policy ` - // should not be set if this field is used. - google.protobuf.Any retry_policy_typed_config = 33; - - // Indicates that the route has request mirroring policies. - repeated RequestMirrorPolicy request_mirror_policies = 30; - - // Optionally specifies the :ref:`routing priority `. - core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; - - // Specifies a set of rate limit configurations that could be applied to the - // route. - repeated RateLimit rate_limits = 13; - - // Specifies if the rate limit filter should include the virtual host rate - // limits. By default, if the route configured rate limits, the virtual host - // :ref:`rate_limits ` are not applied to the - // request. - // - // This field is deprecated. Please use :ref:`vh_rate_limits ` - google.protobuf.BoolValue include_vh_rate_limits = 14 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Specifies a list of hash policies to use for ring hash load balancing. Each - // hash policy is evaluated individually and the combined result is used to - // route the request. The method of combination is deterministic such that - // identical lists of hash policies will produce the same hash. Since a hash - // policy examines specific parts of a request, it can fail to produce a hash - // (i.e. if the hashed header is not present). If (and only if) all configured - // hash policies fail to generate a hash, no hash will be produced for - // the route. In this case, the behavior is the same as if no hash policies - // were specified (i.e. the ring hash load balancer will choose a random - // backend). If a hash policy has the "terminal" attribute set to true, and - // there is already a hash generated, the hash is returned immediately, - // ignoring the rest of the hash policy list. - repeated HashPolicy hash_policy = 15; - - // Indicates that the route has a CORS policy. - CorsPolicy cors = 17; - - // Deprecated by :ref:`grpc_timeout_header_max ` - // If present, and the request is a gRPC request, use the - // `grpc-timeout header `_, - // or its default value (infinity) instead of - // :ref:`timeout `, but limit the applied timeout - // to the maximum value specified here. If configured as 0, the maximum allowed timeout for - // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used - // and gRPC requests time out like any other requests using - // :ref:`timeout ` or its default. - // This can be used to prevent unexpected upstream request timeouts due to potentially long - // time gaps between gRPC request and response in gRPC streaming mode. - // - // .. note:: - // - // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - // precedence over `grpc-timeout header `_, when - // both are present. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration max_grpc_timeout = 23 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Deprecated by :ref:`grpc_timeout_header_offset `. - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - google.protobuf.Duration grpc_timeout_offset = 28 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - repeated UpgradeConfig upgrade_configs = 25; - - // If present, Envoy will try to follow an upstream redirect response instead of proxying the - // response back to the downstream. An upstream redirect response is defined - // by :ref:`redirect_response_codes - // `. - InternalRedirectPolicy internal_redirect_policy = 34; - - InternalRedirectAction internal_redirect_action = 26 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // An internal redirect is handled, iff the number of previous internal redirects that a - // downstream request has encountered is lower than this value, and - // :ref:`internal_redirect_action ` - // is set to :ref:`HANDLE_INTERNAL_REDIRECT - // ` - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or has - // :ref:`internal_redirect_action ` - // set to - // :ref:`PASS_THROUGH_INTERNAL_REDIRECT - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 31 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - HedgePolicy hedge_policy = 27; - - // Specifies the maximum stream duration for this route. - MaxStreamDuration max_stream_duration = 36; - - RequestMirrorPolicy hidden_envoy_deprecated_request_mirror_policy = 10 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -// HTTP retry :ref:`architecture overview `. -// [#next-free-field: 12] -message RetryPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy"; - - enum ResetHeaderFormat { - SECONDS = 0; - UNIX_TIMESTAMP = 1; - } - - message RetryPriority { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RetryPolicy.RetryPriority"; - - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // [#extension-category: envoy.retry_priorities] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - message RetryHostPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RetryPolicy.RetryHostPredicate"; - - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // [#extension-category: envoy.retry_host_predicates] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - message RetryBackOff { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RetryPolicy.RetryBackOff"; - - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - message ResetHeader { - // The name of the reset header. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The format of the reset header. - ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // A retry back-off strategy that applies when the upstream server rate limits - // the request. - // - // Given this configuration: - // - // .. code-block:: yaml - // - // rate_limited_retry_back_off: - // reset_headers: - // - name: Retry-After - // format: SECONDS - // - name: X-RateLimit-Reset - // format: UNIX_TIMESTAMP - // max_interval: "300s" - // - // The following algorithm will apply: - // - // 1. If the response contains the header ``Retry-After`` its value must be on - // the form ``120`` (an integer that represents the number of seconds to - // wait before retrying). If so, this value is used as the back-off interval. - // 2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its - // value must be on the form ``1595320702`` (an integer that represents the - // point in time at which to retry, as a Unix timestamp in seconds). If so, - // the current time is subtracted from this value and the result is used as - // the back-off interval. - // 3. Otherwise, Envoy will use the default - // :ref:`exponential back-off ` - // strategy. - // - // No matter which format is used, if the resulting back-off interval exceeds - // ``max_interval`` it is discarded and the next header in ``reset_headers`` - // is tried. If a request timeout is configured for the route it will further - // limit how long the request will be allowed to run. - // - // To prevent many clients retrying at the same point in time jitter is added - // to the back-off interval, so the resulting interval is decided by taking: - // ``random(interval, interval * 1.5)``. - // - // .. attention:: - // - // Configuring ``rate_limited_retry_back_off`` will not by itself cause a request - // to be retried. You will still need to configure the right retry policy to match - // the responses from the upstream server. - message RateLimitedRetryBackOff { - // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) - // to match against the response. Headers are tried in order, and matched case - // insensitive. The first header to be parsed successfully is used. If no headers - // match the default exponential back-off is used instead. - repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the maximum back off interval that Envoy will allow. If a reset - // header contains an interval longer than this then it will be discarded and - // the next header will be tried. Defaults to 300 seconds. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - // Specifies the conditions under which retry takes place. These are the same - // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - string retry_on = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. These are the same conditions documented for - // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2 - [(udpa.annotations.field_migrate).rename = "max_retries"]; - - // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - // same conditions documented for - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - // - // .. note:: - // - // If left unspecified, Envoy will use the global - // :ref:`route timeout ` for the request. - // Consequently, when using a :ref:`5xx ` based - // retry policy, a request that times out will not be retried as the total timeout budget - // would have been exhausted. - google.protobuf.Duration per_try_timeout = 3; - - // Specifies an implementation of a RetryPriority which is used to determine the - // distribution of load across priorities used for retries. Refer to - // :ref:`retry plugin configuration ` for more details. - RetryPriority retry_priority = 4; - - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host - // for retries. If any of the predicates reject the host, host selection will be reattempted. - // Refer to :ref:`retry plugin configuration ` for more - // details. - repeated RetryHostPredicate retry_host_predicate = 5; - - // The maximum number of times host selection will be reattempted before giving up, at which - // point the host that was last selected will be routed to. If unspecified, this will default to - // retrying once. - int64 host_selection_retry_max_attempts = 6; - - // HTTP status codes that should trigger a retry in addition to those specified by retry_on. - repeated uint32 retriable_status_codes = 7; - - // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the - // default base interval is 25 milliseconds or, if set, the current value of the - // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times - // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - // describes Envoy's back-off algorithm. - RetryBackOff retry_back_off = 8; - - // Specifies parameters that control a retry back-off strategy that is used - // when the request is rate limited by the upstream server. The server may - // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to - // provide feedback to the client on how long to wait before retrying. If - // configured, this back-off strategy will be used instead of the - // default exponential back off strategy (configured using `retry_back_off`) - // whenever a response includes the matching headers. - RateLimitedRetryBackOff rate_limited_retry_back_off = 11; - - // HTTP response headers that trigger a retry if present in the response. A retry will be - // triggered if any of the header matches match the upstream response headers. - // The field is only consulted if 'retriable-headers' retry policy is active. - repeated HeaderMatcher retriable_headers = 9; - - // HTTP headers which must be present in the request for retries to be attempted. - repeated HeaderMatcher retriable_request_headers = 10; -} - -// HTTP request hedging :ref:`architecture overview `. -message HedgePolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HedgePolicy"; - - // Specifies the number of initial requests that should be sent upstream. - // Must be at least 1. - // Defaults to 1. - // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies a probability that an additional upstream request should be sent - // on top of what is specified by initial_requests. - // Defaults to 0. - // [#not-implemented-hide:] - type.v3.FractionalPercent additional_request_chance = 2; - - // Indicates that a hedged request should be sent when the per-try timeout is hit. - // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. - // The first request to complete successfully will be the one returned to the caller. - // - // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. - // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client - // if there are no more retries left. - // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. - // - // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least - // one error code and specifies a maximum number of retries. - // - // Defaults to false. - bool hedge_on_per_try_timeout = 3; -} - -// [#next-free-field: 10] -message RedirectAction { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RedirectAction"; - - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - - // When the scheme redirection take place, the following rules apply: - // 1. If the source URI scheme is `http` and the port is explicitly - // set to `:80`, the port will be removed after the redirection - // 2. If the source URI scheme is `https` and the port is explicitly - // set to `:443`, the port will be removed after the redirection - oneof scheme_rewrite_specifier { - // The scheme portion of the URL will be swapped with "https". - bool https_redirect = 4; - - // The scheme portion of the URL will be swapped with this value. - string scheme_redirect = 7; - } - - // The host portion of the URL will be swapped with this value. - string host_redirect = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The port value of the URL will be swapped with this value. - uint32 port_redirect = 8; - - oneof path_rewrite_specifier { - // The path portion of the URL will be swapped with this value. - // Please note that query string in path_redirect will override the - // request's query string and will not be stripped. - // - // For example, let's say we have the following routes: - // - // - match: { path: "/old-path-1" } - // redirect: { path_redirect: "/new-path-1" } - // - match: { path: "/old-path-2" } - // redirect: { path_redirect: "/new-path-2", strip-query: "true" } - // - match: { path: "/old-path-3" } - // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } - // - // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" - // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" - // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" - string path_redirect = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirection, the matched prefix (or path) - // should be swapped with this value. This option allows redirect URLs be dynamically created - // based on the request. - // - // .. attention:: - // - // Pay attention to the use of trailing slashes as mentioned in - // :ref:`RouteAction's prefix_rewrite `. - string prefix_rewrite = 5 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Indicates that during redirect, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of capture - // groups from the pattern into the new path as specified by the rewrite - // substitution string. This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. - // - // Examples using Google's `RE2 `_ engine: - // - // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - // into ``/v1/api/instance/foo``. - // - // * The pattern ``one`` paired with a substitution string of ``two`` would - // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - // - // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - // ``\1two\2`` would replace only the first occurrence of ``one``, - // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - // - // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - // ``/aaa/yyy/bbb``. - type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 9; - } - - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; - - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. - bool strip_query = 6; -} - -message DirectResponseAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.DirectResponseAction"; - - // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; - - // Specifies the content of the response body. If this setting is omitted, - // no body is included in the generated response. - // - // .. note:: - // - // Headers can be specified using *response_headers_to_add* in the enclosing - // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or - // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. - core.v3.DataSource body = 2; -} - -// [#not-implemented-hide:] -message NonForwardingAction { -} - -message Decorator { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Decorator"; - - // The operation name associated with the request matched to this route. If tracing is - // enabled, this information will be used as the span name reported for this request. - // - // .. note:: - // - // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden - // by the :ref:`x-envoy-decorator-operation - // ` header. - string operation = 1 [(validate.rules).string = {min_len: 1}]; - - // Whether the decorated details should be propagated to the other party. The default is true. - google.protobuf.BoolValue propagate = 2; -} - -message Tracing { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Tracing"; - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.v3.FractionalPercent client_sampling = 1; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.FractionalPercent random_sampling = 2; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.FractionalPercent overall_sampling = 3; - - // A list of custom tags with unique tag name to create tags for the active span. - // It will take effect after merging with the :ref:`corresponding configuration - // ` - // configured in the HTTP connection manager. If two tags with the same name are configured - // each in the HTTP connection manager and the route level, the one configured here takes - // priority. - repeated type.tracing.v3.CustomTag custom_tags = 4; -} - -// A virtual cluster is a way of specifying a regex matching rule against -// certain important endpoints such that statistics are generated explicitly for -// the matched requests. The reason this is useful is that when doing -// prefix/path matching Envoy does not always know what the application -// considers to be an endpoint. Thus, it’s impossible for Envoy to generically -// emit per endpoint statistics. However, often systems have highly critical -// endpoints that they wish to get “perfect” statistics on. Virtual cluster -// statistics are perfect in the sense that they are emitted on the downstream -// side such that they include network level failures. -// -// Documentation for :ref:`virtual cluster statistics `. -// -// .. note:: -// -// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for -// every application endpoint. This is both not easily maintainable and as well the matching and -// statistics output are not free. -message VirtualCluster { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualCluster"; - - // Specifies a list of header matchers to use for matching requests. Each specified header must - // match. The pseudo-headers `:path` and `:method` can be used to match the request path and - // method, respectively. - repeated HeaderMatcher headers = 4; - - // Specifies the name of the virtual cluster. The virtual cluster name as well - // as the virtual host name are used when emitting statistics. The statistics are emitted by the - // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_len: 1}]; - - string hidden_envoy_deprecated_pattern = 1 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - core.v3.RequestMethod hidden_envoy_deprecated_method = 3 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// Global rate limiting :ref:`architecture overview `. -// Also applies to Local rate limiting :ref:`using descriptors `. -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - - // [#next-free-field: 10] - message Action { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action"; - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("source_cluster", "") - // - // is derived from the :option:`--service-cluster` option. - message SourceCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.SourceCluster"; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("destination_cluster", "") - // - // Once a request matches against a route table rule, a routed cluster is determined by one of - // the following :ref:`route table configuration ` - // settings: - // - // * :ref:`cluster ` indicates the upstream cluster - // to route to. - // * :ref:`weighted_clusters ` - // chooses a cluster randomly from a set of clusters with attributed weight. - // * :ref:`cluster_header ` indicates which - // header in the request contains the target cluster. - message DestinationCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.DestinationCluster"; - } - - // The following descriptor entry is appended when a header contains a key that matches the - // *header_name*: - // - // .. code-block:: cpp - // - // ("", "") - message RequestHeaders { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.RequestHeaders"; - - // The header name to be queried from the request headers. The header’s - // value is used to populate the value of the descriptor entry for the - // descriptor_key. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; - - // If set to true, Envoy skips the descriptor while calling rate limiting service - // when header is not present in the request. By default it skips calling the - // rate limiting service if this header is not present in the request. - bool skip_if_absent = 3; - } - - // The following descriptor entry is appended to the descriptor and is populated using the - // trusted address from :ref:`x-forwarded-for `: - // - // .. code-block:: cpp - // - // ("remote_address", "") - message RemoteAddress { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.RemoteAddress"; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("generic_key", "") - message GenericKey { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.GenericKey"; - - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; - - // An optional key to use in the descriptor entry. If not set it defaults - // to 'generic_key' as the descriptor key. - string descriptor_key = 2; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("header_match", "") - message HeaderValueMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.RateLimit.Action.HeaderValueMatch"; - - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; - - // If set to true, the action will append a descriptor entry when the - // request matches the headers. If set to false, the action will append a - // descriptor entry when the request does not match the headers. The - // default value is true. - google.protobuf.BoolValue expect_match = 2; - - // Specifies a set of headers that the rate limit action should match - // on. The action will check the request’s headers against all the - // specified headers in the config. A match will happen if all the - // headers in the config are present in the request with the same values - // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; - } - - // The following descriptor entry is appended when the - // :ref:`dynamic metadata ` contains a key value: - // - // .. code-block:: cpp - // - // ("", "") - // - // .. attention:: - // This action has been deprecated in favor of the :ref:`metadata ` action - message DynamicMetaData { - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // Metadata struct that defines the key and path to retrieve the string value. A match will - // only happen if the value in the dynamic metadata is of type string. - type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; - - // An optional value to use if *metadata_key* is empty. If not set and - // no value is present under the metadata_key then no descriptor is generated. - string default_value = 3; - } - - // The following descriptor entry is appended when the metadata contains a key value: - // - // .. code-block:: cpp - // - // ("", "") - message MetaData { - enum Source { - // Query :ref:`dynamic metadata ` - DYNAMIC = 0; - - // Query :ref:`route entry metadata ` - ROUTE_ENTRY = 1; - } - - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // Metadata struct that defines the key and path to retrieve the string value. A match will - // only happen if the value in the metadata is of type string. - type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; - - // An optional value to use if *metadata_key* is empty. If not set and - // no value is present under the metadata_key then no descriptor is generated. - string default_value = 3; - - // Source of metadata - Source source = 4 [(validate.rules).enum = {defined_only: true}]; - } - - oneof action_specifier { - option (validate.required) = true; - - // Rate limit on source cluster. - SourceCluster source_cluster = 1; - - // Rate limit on destination cluster. - DestinationCluster destination_cluster = 2; - - // Rate limit on request headers. - RequestHeaders request_headers = 3; - - // Rate limit on remote address. - RemoteAddress remote_address = 4; - - // Rate limit on a generic key. - GenericKey generic_key = 5; - - // Rate limit on the existence of request headers. - HeaderValueMatch header_value_match = 6; - - // Rate limit on dynamic metadata. - // - // .. attention:: - // This field has been deprecated in favor of the :ref:`metadata ` field - DynamicMetaData dynamic_metadata = 7 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - // Rate limit on metadata. - MetaData metadata = 8; - - // Rate limit descriptor extension. See the rate limit descriptor extensions documentation. - // [#extension-category: envoy.rate_limit_descriptors] - core.v3.TypedExtensionConfig extension = 9; - } - } - - message Override { - // Fetches the override from the dynamic metadata. - message DynamicMetadata { - // Metadata struct that defines the key and path to retrieve the struct value. - // The value must be a struct containing an integer "requests_per_unit" property - // and a "unit" property with a value parseable to :ref:`RateLimitUnit - // enum ` - type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; - } - - oneof override_specifier { - option (validate.required) = true; - - // Limit override from dynamic metadata. - DynamicMetadata dynamic_metadata = 1; - } - } - - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - - // A list of actions that are to be applied for this rate limit configuration. - // Order matters as the actions are processed sequentially and the descriptor - // is composed by appending descriptor entries in that sequence. If an action - // cannot append a descriptor entry, no descriptor is generated for the - // configuration. See :ref:`composing actions - // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; - - // An optional limit override to be appended to the descriptor produced by this - // rate limit configuration. If the override value is invalid or cannot be resolved - // from metadata, no override is provided. See :ref:`rate limit override - // ` for more information. - Override limit = 4; -} - -// .. attention:: -// -// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* -// header. Thus, if attempting to match on *Host*, match on *:authority* instead. -// -// .. attention:: -// -// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both -// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., -// -// .. code-block:: json -// -// { -// "name": ":method", -// "exact_match": "POST" -// } -// -// .. attention:: -// In the absence of any header match specifier, match will default to :ref:`present_match -// `. i.e, a request that has the :ref:`name -// ` header will match, regardless of the header's -// value. -// -// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] -// [#next-free-field: 14] -message HeaderMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HeaderMatcher"; - - reserved 2, 3; - - // Specifies the name of the header in the request. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Specifies how the header match will be performed to route the request. - oneof header_match_specifier { - // If specified, header match will be performed based on the value of the header. - // This field is deprecated. Please use :ref:`string_match `. - string exact_match = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. - // This field is deprecated. Please use :ref:`string_match `. - type.matcher.v3.RegexMatcher safe_regex_match = 11 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting of - // an optional plus or minus sign followed by a sequence of digits. The rule will not match if - // the header value does not represent an integer. Match will fail for empty values, floating - // point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - // "-1somestring" - type.v3.Int64Range range_match = 6; - - // If specified as true, header match will be performed based on whether the header is in the - // request. If specified as false, header match will be performed based on whether the header is absent. - bool present_match = 7; - - // If specified, header match will be performed based on the prefix of the header value. - // Note: empty prefix is not allowed, please use present_match instead. - // This field is deprecated. Please use :ref:`string_match `. - // - // Examples: - // - // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // If specified, header match will be performed based on the suffix of the header value. - // Note: empty suffix is not allowed, please use present_match instead. - // This field is deprecated. Please use :ref:`string_match `. - // - // Examples: - // - // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // If specified, header match will be performed based on whether the header value contains - // the given value or not. - // Note: empty contains match is not allowed, please use present_match instead. - // This field is deprecated. Please use :ref:`string_match `. - // - // Examples: - // - // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. - string contains_match = 12 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // If specified, header match will be performed based on the string match of the header value. - type.matcher.v3.StringMatcher string_match = 13; - - string hidden_envoy_deprecated_regex_match = 5 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. - bool invert_match = 8; -} - -// Query parameter matching treats the query string of a request's :path header -// as an ampersand-separated list of keys and/or key=value elements. -// [#next-free-field: 7] -message QueryParameterMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.route.QueryParameterMatcher"; - - // Specifies the name of a key that must be present in the requested - // *path*'s query string. - string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; - - oneof query_parameter_match_specifier { - // Specifies whether a query parameter value should match against a string. - type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; - - // Specifies whether a query parameter should be present. - bool present_match = 6; - } - - string hidden_envoy_deprecated_value = 3 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - google.protobuf.BoolValue hidden_envoy_deprecated_regex = 4 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// HTTP Internal Redirect :ref:`architecture overview `. -message InternalRedirectPolicy { - // An internal redirect is not handled, unless the number of previous internal redirects that a - // downstream request has encountered is lower than this value. - // In the case where a downstream request is bounced among multiple routes by internal redirect, - // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy - // ` - // will pass the redirect back to downstream. - // - // If not specified, at most one redirect will be followed. - google.protobuf.UInt32Value max_internal_redirects = 1; - - // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, - // only 302 will be treated as internal redirect. - // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. - repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; - - // Specifies a list of predicates that are queried when an upstream response is deemed - // to trigger an internal redirect by all other criteria. Any predicate in the list can reject - // the redirect, causing the response to be proxied to downstream. - // [#extension-category: envoy.internal_redirect_predicates] - repeated core.v3.TypedExtensionConfig predicates = 3; - - // Allow internal redirect to follow a target URI with a different scheme than the value of - // x-forwarded-proto. The default is false. - bool allow_cross_scheme_redirect = 4; -} - -// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the -// map value in -// :ref:`VirtualHost.typed_per_filter_config`, -// :ref:`Route.typed_per_filter_config`, -// or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` -// to add additional flags to the filter. -// [#not-implemented-hide:] -message FilterConfig { - // The filter config. - google.protobuf.Any config = 1; - - // If true, the filter is optional, meaning that if the client does - // not support the specified filter, it may ignore the map entry rather - // than rejecting the config. - bool is_optional = 2; -} diff --git a/generated_api_shadow/envoy/config/route/v3/scoped_route.proto b/generated_api_shadow/envoy/config/route/v3/scoped_route.proto deleted file mode 100644 index eb47d7e10898d..0000000000000 --- a/generated_api_shadow/envoy/config/route/v3/scoped_route.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; - -package envoy.config.route.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.route.v3"; -option java_outer_classname = "ScopedRouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP scoped routing configuration] -// * Routing :ref:`architecture overview ` - -// Specifies a routing scope, which associates a -// :ref:`Key` to a -// :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` (identified by its resource name). -// -// The HTTP connection manager builds up a table consisting of these Key to -// RouteConfiguration mappings, and looks up the RouteConfiguration to use per -// request according to the algorithm specified in the -// :ref:`scope_key_builder` -// assigned to the HttpConnectionManager. -// -// For example, with the following configurations (in YAML): -// -// HttpConnectionManager config: -// -// .. code:: -// -// ... -// scoped_routes: -// name: foo-scoped-routes -// scope_key_builder: -// fragments: -// - header_value_extractor: -// name: X-Route-Selector -// element_separator: , -// element: -// separator: = -// key: vip -// -// ScopedRouteConfiguration resources (specified statically via -// :ref:`scoped_route_configurations_list` -// or obtained dynamically via SRDS): -// -// .. code:: -// -// (1) -// name: route-scope1 -// route_configuration_name: route-config1 -// key: -// fragments: -// - string_key: 172.10.10.20 -// -// (2) -// name: route-scope2 -// route_configuration_name: route-config2 -// key: -// fragments: -// - string_key: 172.20.20.30 -// -// A request from a client such as: -// -// .. code:: -// -// GET / HTTP/1.1 -// Host: foo.com -// X-Route-Selector: vip=172.10.10.20 -// -// would result in the routing table defined by the `route-config1` -// RouteConfiguration being assigned to the HTTP request/stream. -// -message ScopedRouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ScopedRouteConfiguration"; - - // Specifies a key which is matched against the output of the - // :ref:`scope_key_builder` - // specified in the HttpConnectionManager. The matching is done per HTTP - // request and is dependent on the order of the fragments contained in the - // Key. - message Key { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ScopedRouteConfiguration.Key"; - - message Fragment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ScopedRouteConfiguration.Key.Fragment"; - - oneof type { - option (validate.required) = true; - - // A string to match against. - string string_key = 1; - } - } - - // The ordered set of fragments to match against. The order must match the - // fragments in the corresponding - // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Whether the RouteConfiguration should be loaded on demand. - bool on_demand = 4; - - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an - // RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated - // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The key to match against. - Key key = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/tap/v3/BUILD b/generated_api_shadow/envoy/config/tap/v3/BUILD deleted file mode 100644 index 416ccc0f9403c..0000000000000 --- a/generated_api_shadow/envoy/config/tap/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/common/matcher/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/service/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/tap/v3/common.proto b/generated_api_shadow/envoy/config/tap/v3/common.proto deleted file mode 100644 index c25a2af5a3b51..0000000000000 --- a/generated_api_shadow/envoy/config/tap/v3/common.proto +++ /dev/null @@ -1,280 +0,0 @@ -syntax = "proto3"; - -package envoy.config.tap.v3; - -import "envoy/config/common/matcher/v3/matcher.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/config/route/v3/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.tap.v3"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common tap configuration] - -// Tap configuration. -message TapConfig { - // [#comment:TODO(mattklein123): Rate limiting] - - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.TapConfig"; - - // The match configuration. If the configuration matches the data source being tapped, a tap will - // occur, with the result written to the configured output. - // Exactly one of :ref:`match ` and - // :ref:`match_config ` must be set. If both - // are set, the :ref:`match ` will be used. - MatchPredicate match_config = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The match configuration. If the configuration matches the data source being tapped, a tap will - // occur, with the result written to the configured output. - // Exactly one of :ref:`match ` and - // :ref:`match_config ` must be set. If both - // are set, the :ref:`match ` will be used. - common.matcher.v3.MatchPredicate match = 4; - - // The tap output configuration. If a match configuration matches a data source being tapped, - // a tap will occur and the data will be written to the configured output. - OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for - // which the tap matching is enabled. When not enabled, the request\connection will not be - // recorded. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - core.v3.RuntimeFractionalPercent tap_enabled = 3; -} - -// Tap match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 11] -message MatchPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.MatchPredicate"; - - // A set of match configurations used for logical operations. - message MatchSet { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.MatchPredicate.MatchSet"; - - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - - // HTTP request generic body match configuration. - HttpGenericBodyMatch http_request_generic_body_match = 9; - - // HTTP response generic body match configuration. - HttpGenericBodyMatch http_response_generic_body_match = 10; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.HttpHeadersMatch"; - - // HTTP headers to match. - repeated route.v3.HeaderMatcher headers = 1; -} - -// HTTP generic body match configuration. -// List of text strings and hex strings to be located in HTTP body. -// All specified strings must be found in the HTTP body for positive match. -// The search may be limited to specified number of bytes from the body start. -// -// .. attention:: -// -// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. -// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified -// to scan only part of the http body. -message HttpGenericBodyMatch { - message GenericTextMatch { - oneof rule { - option (validate.required) = true; - - // Text string to be located in HTTP body. - string string_match = 1 [(validate.rules).string = {min_len: 1}]; - - // Sequence of bytes to be located in HTTP body. - bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; - } - } - - // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). - uint32 bytes_limit = 1; - - // List of patterns to match. - repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// Tap output configuration. -message OutputConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.OutputConfig"; - - // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple - // sink types are supported this constraint will be relaxed. - repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; - - // For buffered tapping, the maximum amount of received body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_rx_bytes = 2; - - // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_tx_bytes = 3; - - // Indicates whether taps produce a single buffered message per tap, or multiple streamed - // messages per tap in the emitted :ref:`TraceWrapper - // ` messages. Note that streamed tapping does not - // mean that no buffering takes place. Buffering may be required if data is processed before a - // match can be determined. See the HTTP tap filter :ref:`streaming - // ` documentation for more information. - bool streaming = 4; -} - -// Tap output sink configuration. -message OutputSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.OutputSink"; - - // Output format. All output is in the form of one or more :ref:`TraceWrapper - // ` messages. This enumeration indicates - // how those messages are written. Note that not all sinks support all output formats. See - // individual sink documentation for more information. - enum Format { - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_bytes - // ` field. This means that body data will be - // base64 encoded as per the `proto3 JSON mappings - // `_. - JSON_BODY_AS_BYTES = 0; - - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_string - // ` field. This means that body data will be - // string encoded as per the `proto3 JSON mappings - // `_. This format type is - // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the - // user wishes to view it directly without being forced to base64 decode the body. - JSON_BODY_AS_STRING = 1; - - // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes - // multiple binary messages without any length information the data stream will not be - // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) - // this output format makes consumption simpler. - PROTO_BINARY = 2; - - // Messages are written as a sequence tuples, where each tuple is the message length encoded - // as a `protobuf 32-bit varint - // `_ - // followed by the binary message. The messages can be read back using the language specific - // protobuf coded stream implementation to obtain the message length and the message. - PROTO_BINARY_LENGTH_DELIMITED = 3; - - // Text proto format. - PROTO_TEXT = 4; - } - - // Sink output format. - Format format = 1 [(validate.rules).enum = {defined_only: true}]; - - oneof output_sink_type { - option (validate.required) = true; - - // Tap output will be streamed out the :http:post:`/tap` admin endpoint. - // - // .. attention:: - // - // It is only allowed to specify the streaming admin output sink if the tap is being - // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has - // been configured to receive tap configuration from some other source (e.g., static - // file, XDS, etc.) configuring the streaming admin output type will fail. - StreamingAdminSink streaming_admin = 2; - - // Tap output will be written to a file per tap sink. - FilePerTapSink file_per_tap = 3; - - // [#not-implemented-hide:] - // GrpcService to stream data to. The format argument must be PROTO_BINARY. - // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] - StreamingGrpcSink streaming_grpc = 4; - } -} - -// Streaming admin sink configuration. -message StreamingAdminSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamingAdminSink"; -} - -// The file per tap sink outputs a discrete file for every tapped stream. -message FilePerTapSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.FilePerTapSink"; - - // Path prefix. The output file will be of the form _.pb, where is an - // identifier distinguishing the recorded trace for stream instances (the Envoy - // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; -} - -// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC -// server. -message StreamingGrpcSink { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamingGrpcSink"; - - // Opaque identifier, that will be sent back to the streaming grpc server. - string tap_id = 1; - - // The gRPC server that hosts the Tap Sink Service. - core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/BUILD b/generated_api_shadow/envoy/config/trace/v2/BUILD deleted file mode 100644 index e6505e4f15d01..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", - ], -) diff --git a/generated_api_shadow/envoy/config/trace/v2/datadog.proto b/generated_api_shadow/envoy/config/trace/v2/datadog.proto deleted file mode 100644 index 0992601a8acc4..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/datadog.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "DatadogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Datadog tracer] - -// Configuration for the Datadog tracer. -// [#extension: envoy.tracers.datadog] -message DatadogConfig { - // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/dynamic_ot.proto b/generated_api_shadow/envoy/config/trace/v2/dynamic_ot.proto deleted file mode 100644 index 55c6d401b335f..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/dynamic_ot.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "DynamicOtProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Dynamically loadable OpenTracing tracer] - -// DynamicOtConfig is used to dynamically load a tracer from a shared library -// that implements the `OpenTracing dynamic loading API -// `_. -// [#extension: envoy.tracers.dynamic_ot] -message DynamicOtConfig { - // Dynamic library implementing the `OpenTracing API - // `_. - string library = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The configuration to use when creating a tracer from the given dynamic - // library. - google.protobuf.Struct config = 2; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/http_tracer.proto b/generated_api_shadow/envoy/config/trace/v2/http_tracer.proto deleted file mode 100644 index fba830b987b6d..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/http_tracer.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "HttpTracerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tracing] -// Tracing :ref:`architecture overview `. - -// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. -// -// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one -// supported. -// -// .. attention:: -// -// Use of this message type has been deprecated in favor of direct use of -// :ref:`Tracing.Http `. -message Tracing { - // Configuration for an HTTP tracer provider used by Envoy. - // - // The configuration is defined by the - // :ref:`HttpConnectionManager.Tracing ` - // :ref:`provider ` - // field. - message Http { - // The name of the HTTP trace driver to instantiate. The name must match a - // supported HTTP trace driver. Built-in trace drivers: - // - // - *envoy.tracers.lightstep* - // - *envoy.tracers.zipkin* - // - *envoy.tracers.dynamic_ot* - // - *envoy.tracers.datadog* - // - *envoy.tracers.opencensus* - // - *envoy.tracers.xray* - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Trace driver specific configuration which depends on the driver being instantiated. - // See the trace drivers for examples: - // - // - :ref:`LightstepConfig ` - // - :ref:`ZipkinConfig ` - // - :ref:`DynamicOtConfig ` - // - :ref:`DatadogConfig ` - // - :ref:`OpenCensusConfig ` - // - :ref:`AWS X-Ray ` - oneof config_type { - google.protobuf.Struct config = 2 [deprecated = true]; - - google.protobuf.Any typed_config = 3; - } - } - - // Provides configuration for the HTTP tracer. - Http http = 1; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/lightstep.proto b/generated_api_shadow/envoy/config/trace/v2/lightstep.proto deleted file mode 100644 index 849749baaa0d9..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/lightstep.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "LightstepProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: LightStep tracer] - -// Configuration for the LightStep tracer. -// [#extension: envoy.tracers.lightstep] -message LightstepConfig { - // Available propagation modes - enum PropagationMode { - // Propagate trace context in the single header x-ot-span-context. - ENVOY = 0; - - // Propagate trace context using LightStep's native format. - LIGHTSTEP = 1; - - // Propagate trace context using the b3 format. - B3 = 2; - - // Propagation trace context using the w3 trace-context standard. - TRACE_CONTEXT = 3; - } - - // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // File containing the access token to the `LightStep - // `_ API. - string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Propagation modes to use by LightStep's tracer. - repeated PropagationMode propagation_modes = 3 - [(validate.rules).repeated = {items {enum {defined_only: true}}}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/opencensus.proto b/generated_api_shadow/envoy/config/trace/v2/opencensus.proto deleted file mode 100644 index 1a9a879b21e43..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/opencensus.proto +++ /dev/null @@ -1,92 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "opencensus/proto/trace/v1/trace_config.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "OpencensusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: OpenCensus tracer] - -// Configuration for the OpenCensus tracer. -// [#next-free-field: 15] -// [#extension: envoy.tracers.opencensus] -message OpenCensusConfig { - enum TraceContext { - // No-op default, no trace context is utilized. - NONE = 0; - - // W3C Trace-Context format "traceparent:" header. - TRACE_CONTEXT = 1; - - // Binary "grpc-trace-bin:" header. - GRPC_TRACE_BIN = 2; - - // "X-Cloud-Trace-Context:" header. - CLOUD_TRACE_CONTEXT = 3; - - // X-B3-* headers. - B3 = 4; - } - - reserved 7; - - // Configures tracing, e.g. the sampler, max number of annotations, etc. - opencensus.proto.trace.v1.TraceConfig trace_config = 1; - - // Enables the stdout exporter if set to true. This is intended for debugging - // purposes. - bool stdout_exporter_enabled = 2; - - // Enables the Stackdriver exporter if set to true. The project_id must also - // be set. - bool stackdriver_exporter_enabled = 3; - - // The Cloud project_id to use for Stackdriver tracing. - string stackdriver_project_id = 4; - - // (optional) By default, the Stackdriver exporter will connect to production - // Stackdriver. If stackdriver_address is non-empty, it will instead connect - // to this address, which is in the gRPC format: - // https://github.com/grpc/grpc/blob/master/doc/naming.md - string stackdriver_address = 10; - - // (optional) The gRPC server that hosts Stackdriver tracing service. Only - // Google gRPC is supported. If :ref:`target_uri ` - // is not provided, the default production Stackdriver address will be used. - api.v2.core.GrpcService stackdriver_grpc_service = 13; - - // Enables the Zipkin exporter if set to true. The url and service name must - // also be set. - bool zipkin_exporter_enabled = 5; - - // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans" - string zipkin_url = 6; - - // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or - // ocagent_grpc_service must also be set. - bool ocagent_exporter_enabled = 11; - - // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC - // format: https://github.com/grpc/grpc/blob/master/doc/naming.md - // [#comment:TODO: deprecate this field] - string ocagent_address = 12; - - // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. - // This is only used if the ocagent_address is left empty. - api.v2.core.GrpcService ocagent_grpc_service = 14; - - // List of incoming trace context headers we will accept. First one found - // wins. - repeated TraceContext incoming_trace_context = 8; - - // List of outgoing trace context headers we will produce. - repeated TraceContext outgoing_trace_context = 9; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/service.proto b/generated_api_shadow/envoy/config/trace/v2/service.proto deleted file mode 100644 index d102499b6261a..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/service.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "envoy/api/v2/core/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "ServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Trace Service] - -// Configuration structure. -message TraceServiceConfig { - // The upstream gRPC cluster that hosts the metrics service. - api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v2/trace.proto b/generated_api_shadow/envoy/config/trace/v2/trace.proto deleted file mode 100644 index 6ed394147db10..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/trace.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import public "envoy/config/trace/v2/datadog.proto"; -import public "envoy/config/trace/v2/dynamic_ot.proto"; -import public "envoy/config/trace/v2/http_tracer.proto"; -import public "envoy/config/trace/v2/lightstep.proto"; -import public "envoy/config/trace/v2/opencensus.proto"; -import public "envoy/config/trace/v2/service.proto"; -import public "envoy/config/trace/v2/zipkin.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "TraceProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/config/trace/v2/zipkin.proto b/generated_api_shadow/envoy/config/trace/v2/zipkin.proto deleted file mode 100644 index a825d85bb7f94..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2/zipkin.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2"; -option java_outer_classname = "ZipkinProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Zipkin tracer] - -// Configuration for the Zipkin tracer. -// [#extension: envoy.tracers.zipkin] -// [#next-free-field: 6] -message ZipkinConfig { - // Available Zipkin collector endpoint versions. - enum CollectorEndpointVersion { - // Zipkin API v1, JSON over HTTP. - // [#comment: The default implementation of Zipkin client before this field is added was only v1 - // and the way user configure this was by not explicitly specifying the version. Consequently, - // before this is added, the corresponding Zipkin collector expected to receive v1 payload. - // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when - // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, - // since in Zipkin realm this v1 version is considered to be not preferable anymore.] - HTTP_JSON_V1 = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; - - // Zipkin API v2, JSON over HTTP. - HTTP_JSON = 1; - - // Zipkin API v2, protobuf over HTTP. - HTTP_PROTO = 2; - - // [#not-implemented-hide:] - GRPC = 3; - } - - // The cluster manager cluster that hosts the Zipkin collectors. Note that the - // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster - // resources `. - string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation, the API endpoint is typically - // /api/v1/spans, which is the default value. - string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Determines whether a 128bit trace id will be used when creating a new - // trace instance. The default value is false, which will result in a 64 bit trace id being used. - bool trace_id_128bit = 3; - - // Determines whether client and server spans will share the same span context. - // The default value is true. - google.protobuf.BoolValue shared_span_context = 4; - - // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be - // used. - CollectorEndpointVersion collector_endpoint_version = 5; -} diff --git a/generated_api_shadow/envoy/config/trace/v2alpha/BUILD b/generated_api_shadow/envoy/config/trace/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto b/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto deleted file mode 100644 index 27db3ba40b724..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v2alpha/xray.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v2alpha; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v2alpha"; -option java_outer_classname = "XrayProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: AWS X-Ray Tracer Configuration] -// Configuration for AWS X-Ray tracer - -message XRayConfig { - // The UDP endpoint of the X-Ray Daemon where the spans will be sent. - // If this value is not set, the default value of 127.0.0.1:2000 will be used. - api.v2.core.SocketAddress daemon_endpoint = 1; - - // The name of the X-Ray segment. - string segment_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The location of a local custom sampling rules JSON file. - // For an example of the sampling rules see: - // `X-Ray SDK documentation - // `_ - api.v2.core.DataSource sampling_rule_manifest = 3; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/BUILD b/generated_api_shadow/envoy/config/trace/v3/BUILD deleted file mode 100644 index ec0d9dd6a65ba..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/trace/v2:pkg", - "//envoy/config/trace/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", - ], -) diff --git a/generated_api_shadow/envoy/config/trace/v3/datadog.proto b/generated_api_shadow/envoy/config/trace/v3/datadog.proto deleted file mode 100644 index c101ab2f03c9a..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/datadog.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "DatadogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.datadog.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Datadog tracer] - -// Configuration for the Datadog tracer. -// [#extension: envoy.tracers.datadog] -message DatadogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.DatadogConfig"; - - // The cluster to use for submitting traces to the Datadog agent. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // The name used for the service when traces are generated by envoy. - string service_name = 2 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto b/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto deleted file mode 100644 index c281068715428..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/dynamic_ot.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "DynamicOtProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.tracers.dynamic_ot.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamically loadable OpenTracing tracer] - -// DynamicOtConfig is used to dynamically load a tracer from a shared library -// that implements the `OpenTracing dynamic loading API -// `_. -// [#extension: envoy.tracers.dynamic_ot] -message DynamicOtConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.DynamicOtConfig"; - - // Dynamic library implementing the `OpenTracing API - // `_. - string library = 1 [(validate.rules).string = {min_len: 1}]; - - // The configuration to use when creating a tracer from the given dynamic - // library. - google.protobuf.Struct config = 2; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto b/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto deleted file mode 100644 index 5ec74646e79be..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/http_tracer.proto +++ /dev/null @@ -1,60 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "HttpTracerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tracing] -// Tracing :ref:`architecture overview `. - -// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. -// -// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one -// supported. -// -// .. attention:: -// -// Use of this message type has been deprecated in favor of direct use of -// :ref:`Tracing.Http `. -message Tracing { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.Tracing"; - - // Configuration for an HTTP tracer provider used by Envoy. - // - // The configuration is defined by the - // :ref:`HttpConnectionManager.Tracing ` - // :ref:`provider ` - // field. - message Http { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.Tracing.Http"; - - // The name of the HTTP trace driver to instantiate. The name must match a - // supported HTTP trace driver. - // See the :ref:`extensions listed in typed_config below ` for the default list of the HTTP trace driver. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Trace driver specific configuration which must be set according to the driver being instantiated. - // [#extension-category: envoy.tracers] - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - } - - // Provides configuration for the HTTP tracer. - Http http = 1; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/lightstep.proto b/generated_api_shadow/envoy/config/trace/v3/lightstep.proto deleted file mode 100644 index b5cff53fea96a..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/lightstep.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/base.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "LightstepProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.tracers.lightstep.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: LightStep tracer] - -// Configuration for the LightStep tracer. -// [#extension: envoy.tracers.lightstep] -message LightstepConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.LightstepConfig"; - - // Available propagation modes - enum PropagationMode { - // Propagate trace context in the single header x-ot-span-context. - ENVOY = 0; - - // Propagate trace context using LightStep's native format. - LIGHTSTEP = 1; - - // Propagate trace context using the b3 format. - B3 = 2; - - // Propagation trace context using the w3 trace-context standard. - TRACE_CONTEXT = 3; - } - - // The cluster manager cluster that hosts the LightStep collectors. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // File containing the access token to the `LightStep - // `_ API. - string access_token_file = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Access token to the `LightStep `_ API. - core.v3.DataSource access_token = 4; - - // Propagation modes to use by LightStep's tracer. - repeated PropagationMode propagation_modes = 3 - [(validate.rules).repeated = {items {enum {defined_only: true}}}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/opencensus.proto b/generated_api_shadow/envoy/config/trace/v3/opencensus.proto deleted file mode 100644 index ee2241e729a81..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/opencensus.proto +++ /dev/null @@ -1,105 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "opencensus/proto/trace/v1/trace_config.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "OpencensusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.tracers.opencensus.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: OpenCensus tracer] - -// Configuration for the OpenCensus tracer. -// [#next-free-field: 15] -// [#extension: envoy.tracers.opencensus] -message OpenCensusConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.OpenCensusConfig"; - - enum TraceContext { - // No-op default, no trace context is utilized. - NONE = 0; - - // W3C Trace-Context format "traceparent:" header. - TRACE_CONTEXT = 1; - - // Binary "grpc-trace-bin:" header. - GRPC_TRACE_BIN = 2; - - // "X-Cloud-Trace-Context:" header. - CLOUD_TRACE_CONTEXT = 3; - - // X-B3-* headers. - B3 = 4; - } - - reserved 7; - - // Configures tracing, e.g. the sampler, max number of annotations, etc. - opencensus.proto.trace.v1.TraceConfig trace_config = 1; - - // Enables the stdout exporter if set to true. This is intended for debugging - // purposes. - bool stdout_exporter_enabled = 2; - - // Enables the Stackdriver exporter if set to true. The project_id must also - // be set. - bool stackdriver_exporter_enabled = 3; - - // The Cloud project_id to use for Stackdriver tracing. - string stackdriver_project_id = 4; - - // (optional) By default, the Stackdriver exporter will connect to production - // Stackdriver. If stackdriver_address is non-empty, it will instead connect - // to this address, which is in the gRPC format: - // https://github.com/grpc/grpc/blob/master/doc/naming.md - string stackdriver_address = 10; - - // (optional) The gRPC server that hosts Stackdriver tracing service. Only - // Google gRPC is supported. If :ref:`target_uri ` - // is not provided, the default production Stackdriver address will be used. - core.v3.GrpcService stackdriver_grpc_service = 13; - - // Enables the Zipkin exporter if set to true. The url and service name must - // also be set. This is deprecated, prefer to use Envoy's :ref:`native Zipkin - // tracer `. - bool zipkin_exporter_enabled = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans". This is - // deprecated, prefer to use Envoy's :ref:`native Zipkin tracer - // `. - string zipkin_url = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or - // ocagent_grpc_service must also be set. - bool ocagent_exporter_enabled = 11; - - // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC - // format: https://github.com/grpc/grpc/blob/master/doc/naming.md - // [#comment:TODO: deprecate this field] - string ocagent_address = 12; - - // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. - // This is only used if the ocagent_address is left empty. - core.v3.GrpcService ocagent_grpc_service = 14; - - // List of incoming trace context headers we will accept. First one found - // wins. - repeated TraceContext incoming_trace_context = 8; - - // List of outgoing trace context headers we will produce. - repeated TraceContext outgoing_trace_context = 9; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/service.proto b/generated_api_shadow/envoy/config/trace/v3/service.proto deleted file mode 100644 index 1e01ff61847f0..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/service.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "ServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Trace Service] - -// Configuration structure. -message TraceServiceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2.TraceServiceConfig"; - - // The upstream gRPC cluster that hosts the metrics service. - core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/skywalking.proto b/generated_api_shadow/envoy/config/trace/v3/skywalking.proto deleted file mode 100644 index 3961a9e4db860..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/skywalking.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "SkywalkingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.tracers.skywalking.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SkyWalking tracer] - -// Configuration for the SkyWalking tracer. Please note that if SkyWalking tracer is used as the -// provider of http tracer, then -// :ref:`start_child_span ` -// in the router must be set to true to get the correct topology and tracing data. Moreover, SkyWalking -// Tracer does not support SkyWalking extension header (``sw8-x``) temporarily. -// [#extension: envoy.tracers.skywalking] -message SkyWalkingConfig { - // SkyWalking collector service. - core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; - - ClientConfig client_config = 2; -} - -// Client config for SkyWalking tracer. -message ClientConfig { - // Service name for SkyWalking tracer. If this field is empty, then local service cluster name - // that configured by :ref:`Bootstrap node ` - // message's :ref:`cluster ` field or command line - // option :option:`--service-cluster` will be used. If both this field and local service cluster - // name are empty, ``EnvoyProxy`` is used as the service name by default. - string service_name = 1; - - // Service instance name for SkyWalking tracer. If this field is empty, then local service node - // that configured by :ref:`Bootstrap node ` - // message's :ref:`id ` field or command line option - // :option:`--service-node` will be used. If both this field and local service node are empty, - // ``EnvoyProxy`` is used as the instance name by default. - string instance_name = 2; - - // Authentication token config for SkyWalking. SkyWalking can use token authentication to secure - // that monitoring application data can be trusted. In current version, Token is considered as a - // simple string. - // [#comment:TODO(wbpcode): Get backend token through the SDS API.] - oneof backend_token_specifier { - // Inline authentication token string. - string backend_token = 3 [(udpa.annotations.sensitive) = true]; - } - - // Envoy caches the segment in memory when the SkyWalking backend service is temporarily unavailable. - // This field specifies the maximum number of segments that can be cached. If not specified, the - // default is 1024. - google.protobuf.UInt32Value max_cache_size = 4; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/trace.proto b/generated_api_shadow/envoy/config/trace/v3/trace.proto deleted file mode 100644 index 472e38b5abb8d..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/trace.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import public "envoy/config/trace/v3/datadog.proto"; -import public "envoy/config/trace/v3/dynamic_ot.proto"; -import public "envoy/config/trace/v3/http_tracer.proto"; -import public "envoy/config/trace/v3/lightstep.proto"; -import public "envoy/config/trace/v3/opencensus.proto"; -import public "envoy/config/trace/v3/service.proto"; -import public "envoy/config/trace/v3/zipkin.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "TraceProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/config/trace/v3/xray.proto b/generated_api_shadow/envoy/config/trace/v3/xray.proto deleted file mode 100644 index 208170b60c3f7..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/xray.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "XrayProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.xray.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: AWS X-Ray Tracer Configuration] -// Configuration for AWS X-Ray tracer - -// [#extension: envoy.tracers.xray] -message XRayConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.trace.v2alpha.XRayConfig"; - - message SegmentFields { - // The type of AWS resource, e.g. "AWS::AppMesh::Proxy". - string origin = 1; - - // AWS resource metadata dictionary. - // See: `X-Ray Segment Document documentation `__ - google.protobuf.Struct aws = 2; - } - - // The UDP endpoint of the X-Ray Daemon where the spans will be sent. - // If this value is not set, the default value of 127.0.0.1:2000 will be used. - core.v3.SocketAddress daemon_endpoint = 1; - - // The name of the X-Ray segment. - string segment_name = 2 [(validate.rules).string = {min_len: 1}]; - - // The location of a local custom sampling rules JSON file. - // For an example of the sampling rules see: - // `X-Ray SDK documentation - // `_ - core.v3.DataSource sampling_rule_manifest = 3; - - // Optional custom fields to be added to each trace segment. - // see: `X-Ray Segment Document documentation - // `__ - SegmentFields segment_fields = 4; -} diff --git a/generated_api_shadow/envoy/config/trace/v3/zipkin.proto b/generated_api_shadow/envoy/config/trace/v3/zipkin.proto deleted file mode 100644 index 42e46ed69c649..0000000000000 --- a/generated_api_shadow/envoy/config/trace/v3/zipkin.proto +++ /dev/null @@ -1,73 +0,0 @@ -syntax = "proto3"; - -package envoy.config.trace.v3; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.trace.v3"; -option java_outer_classname = "ZipkinProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.zipkin.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Zipkin tracer] - -// Configuration for the Zipkin tracer. -// [#extension: envoy.tracers.zipkin] -// [#next-free-field: 7] -message ZipkinConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.ZipkinConfig"; - - // Available Zipkin collector endpoint versions. - enum CollectorEndpointVersion { - // Zipkin API v1, JSON over HTTP. - // [#comment: The default implementation of Zipkin client before this field is added was only v1 - // and the way user configure this was by not explicitly specifying the version. Consequently, - // before this is added, the corresponding Zipkin collector expected to receive v1 payload. - // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when - // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, - // since in Zipkin realm this v1 version is considered to be not preferable anymore.] - hidden_envoy_deprecated_HTTP_JSON_V1 = 0 [ - deprecated = true, - (envoy.annotations.disallowed_by_default_enum) = true, - (envoy.annotations.deprecated_at_minor_version_enum) = "3.0" - ]; - - // Zipkin API v2, JSON over HTTP. - HTTP_JSON = 1; - - // Zipkin API v2, protobuf over HTTP. - HTTP_PROTO = 2; - - // [#not-implemented-hide:] - GRPC = 3; - } - - // The cluster manager cluster that hosts the Zipkin collectors. - string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // The API endpoint of the Zipkin service where the spans will be sent. When - // using a standard Zipkin installation. - string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; - - // Determines whether a 128bit trace id will be used when creating a new - // trace instance. The default value is false, which will result in a 64 bit trace id being used. - bool trace_id_128bit = 3; - - // Determines whether client and server spans will share the same span context. - // The default value is true. - google.protobuf.BoolValue shared_span_context = 4; - - // Determines the selected collector endpoint version. - CollectorEndpointVersion collector_endpoint_version = 5; - - // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors - // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. - string collector_hostname = 6; -} diff --git a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/BUILD b/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto b/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto deleted file mode 100644 index 92d5fb83a49cd..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/alts/v2alpha/alts.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.config.transport_socket.alts.v2alpha; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v2alpha"; -option java_outer_classname = "AltsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.alts.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: ALTS] -// [#extension: envoy.transport_sockets.alts] - -// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy. -// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ -message Alts { - // The location of a handshaker service, this is usually 169.254.169.254:8080 - // on GCE. - string handshaker_service = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The acceptable service accounts from peer, peers not in the list will be rejected in the - // handshake validation step. If empty, no validation will be performed. - repeated string peer_service_accounts = 2; -} diff --git a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/BUILD b/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto b/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto deleted file mode 100644 index 1b3fd395d5724..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.config.transport_socket.raw_buffer.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.transport_socket.raw_buffer.v2"; -option java_outer_classname = "RawBufferProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.raw_buffer.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Raw Buffer] -// [#extension: envoy.transport_sockets.raw_buffer] - -// Configuration for raw buffer transport socket. -message RawBuffer { -} diff --git a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/BUILD b/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/BUILD deleted file mode 100644 index 52ca9859536e8..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/config/common/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto b/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto deleted file mode 100644 index 0802c7558ad35..0000000000000 --- a/generated_api_shadow/envoy/config/transport_socket/tap/v2alpha/tap.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.config.transport_socket.tap.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/config/common/tap/v2alpha/common.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = - "envoy.extensions.transport_sockets.tap.v3"; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap] -// [#extension: envoy.transport_sockets.tap] - -// Configuration for tap transport socket. This wraps another transport socket, providing the -// ability to interpose and record in plain text any traffic that is surfaced to Envoy. -message Tap { - // Common configuration for the tap transport socket. - common.tap.v2alpha.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; - - // The underlying transport socket being wrapped. - api.v2.core.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/data/accesslog/v2/BUILD b/generated_api_shadow/envoy/data/accesslog/v2/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/data/accesslog/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto deleted file mode 100644 index af19197f62a6a..0000000000000 --- a/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto +++ /dev/null @@ -1,378 +0,0 @@ -syntax = "proto3"; - -package envoy.data.accesslog.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.accesslog.v2"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC access logs] -// Envoy access logs describe incoming interaction with Envoy over a fixed -// period of time, and typically cover a single request/response exchange, -// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP). -// Access logs contain fields defined in protocol-specific protobuf messages. -// -// Except where explicitly declared otherwise, all fields describe -// *downstream* interaction between Envoy and a connected client. -// Fields describing *upstream* interaction will explicitly include ``upstream`` -// in their name. - -message TCPAccessLogEntry { - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - - // Properties of the TCP connection. - ConnectionProperties connection_properties = 2; -} - -message HTTPAccessLogEntry { - // HTTP version - enum HTTPVersion { - PROTOCOL_UNSPECIFIED = 0; - HTTP10 = 1; - HTTP11 = 2; - HTTP2 = 3; - HTTP3 = 4; - } - - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - - HTTPVersion protocol_version = 2; - - // Description of the incoming HTTP request. - HTTPRequestProperties request = 3; - - // Description of the outgoing HTTP response. - HTTPResponseProperties response = 4; -} - -// Defines fields for a connection -message ConnectionProperties { - // Number of bytes received from downstream. - uint64 received_bytes = 1; - - // Number of bytes sent to downstream. - uint64 sent_bytes = 2; -} - -// Defines fields that are shared by all Envoy access logs. -// [#next-free-field: 22] -message AccessLogCommon { - // [#not-implemented-hide:] - // This field indicates the rate at which this log entry was sampled. - // Valid range is (0.0, 1.0]. - double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; - - // This field is the remote/origin address on which the request from the user was received. - // Note: This may not be the physical peer. E.g, if the remote address is inferred from for - // example the x-forwarder-for header, proxy protocol, etc. - api.v2.core.Address downstream_remote_address = 2; - - // This field is the local/destination address on which the request from the user was received. - api.v2.core.Address downstream_local_address = 3; - - // If the connection is secure,S this field will contain TLS properties. - TLSProperties tls_properties = 4; - - // The time that Envoy started servicing this request. This is effectively the time that the first - // downstream byte is received. - google.protobuf.Timestamp start_time = 5; - - // Interval between the first downstream byte received and the last - // downstream byte received (i.e. time it takes to receive a request). - google.protobuf.Duration time_to_last_rx_byte = 6; - - // Interval between the first downstream byte received and the first upstream byte sent. There may - // by considerable delta between *time_to_last_rx_byte* and this value due to filters. - // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about - // not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_first_upstream_tx_byte = 7; - - // Interval between the first downstream byte received and the last upstream byte sent. There may - // by considerable delta between *time_to_last_rx_byte* and this value due to filters. - // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about - // not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_last_upstream_tx_byte = 8; - - // Interval between the first downstream byte received and the first upstream - // byte received (i.e. time it takes to start receiving a response). - google.protobuf.Duration time_to_first_upstream_rx_byte = 9; - - // Interval between the first downstream byte received and the last upstream - // byte received (i.e. time it takes to receive a complete response). - google.protobuf.Duration time_to_last_upstream_rx_byte = 10; - - // Interval between the first downstream byte received and the first downstream byte sent. - // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field - // due to filters. Additionally, the same caveats apply as documented in - // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_first_downstream_tx_byte = 11; - - // Interval between the first downstream byte received and the last downstream byte sent. - // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta - // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate - // time. In the current implementation it does not include kernel socket buffer time. In the - // current implementation it also does not include send window buffering inside the HTTP/2 codec. - // In the future it is likely that work will be done to make this duration more accurate. - google.protobuf.Duration time_to_last_downstream_tx_byte = 12; - - // The upstream remote/destination address that handles this exchange. This does not include - // retries. - api.v2.core.Address upstream_remote_address = 13; - - // The upstream local/origin address that handles this exchange. This does not include retries. - api.v2.core.Address upstream_local_address = 14; - - // The upstream cluster that *upstream_remote_address* belongs to. - string upstream_cluster = 15; - - // Flags indicating occurrences during request/response processing. - ResponseFlags response_flags = 16; - - // All metadata encountered during request processing, including endpoint - // selection. - // - // This can be used to associate IDs attached to the various configurations - // used to process this request with the access log entry. For example, a - // route created from a higher level forwarding rule with some ID can place - // that ID in this field and cross reference later. It can also be used to - // determine if a canary endpoint was used or not. - api.v2.core.Metadata metadata = 17; - - // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the - // failure reason from the transport socket. The format of this field depends on the configured - // upstream transport socket. Common TLS failures are in - // :ref:`TLS trouble shooting `. - string upstream_transport_failure_reason = 18; - - // The name of the route - string route_name = 19; - - // This field is the downstream direct remote address on which the request from the user was - // received. Note: This is always the physical peer, even if the remote address is inferred from - // for example the x-forwarder-for header, proxy protocol, etc. - api.v2.core.Address downstream_direct_remote_address = 20; - - // Map of filter state in stream info that have been configured to be logged. If the filter - // state serialized to any message other than `google.protobuf.Any` it will be packed into - // `google.protobuf.Any`. - map filter_state_objects = 21; -} - -// Flags indicating occurrences during request/response processing. -// [#next-free-field: 20] -message ResponseFlags { - message Unauthorized { - // Reasons why the request was unauthorized - enum Reason { - REASON_UNSPECIFIED = 0; - - // The request was denied by the external authorization service. - EXTERNAL_SERVICE = 1; - } - - Reason reason = 1; - } - - // Indicates local server healthcheck failed. - bool failed_local_healthcheck = 1; - - // Indicates there was no healthy upstream. - bool no_healthy_upstream = 2; - - // Indicates an there was an upstream request timeout. - bool upstream_request_timeout = 3; - - // Indicates local codec level reset was sent on the stream. - bool local_reset = 4; - - // Indicates remote codec level reset was received on the stream. - bool upstream_remote_reset = 5; - - // Indicates there was a local reset by a connection pool due to an initial connection failure. - bool upstream_connection_failure = 6; - - // Indicates the stream was reset due to an upstream connection termination. - bool upstream_connection_termination = 7; - - // Indicates the stream was reset because of a resource overflow. - bool upstream_overflow = 8; - - // Indicates no route was found for the request. - bool no_route_found = 9; - - // Indicates that the request was delayed before proxying. - bool delay_injected = 10; - - // Indicates that the request was aborted with an injected error code. - bool fault_injected = 11; - - // Indicates that the request was rate-limited locally. - bool rate_limited = 12; - - // Indicates if the request was deemed unauthorized and the reason for it. - Unauthorized unauthorized_details = 13; - - // Indicates that the request was rejected because there was an error in rate limit service. - bool rate_limit_service_error = 14; - - // Indicates the stream was reset due to a downstream connection termination. - bool downstream_connection_termination = 15; - - // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. - bool upstream_retry_limit_exceeded = 16; - - // Indicates that the stream idle timeout was hit, resulting in a downstream 408. - bool stream_idle_timeout = 17; - - // Indicates that the request was rejected because an envoy request header failed strict - // validation. - bool invalid_envoy_request_headers = 18; - - // Indicates there was an HTTP protocol error on the downstream request. - bool downstream_protocol_error = 19; -} - -// Properties of a negotiated TLS connection. -// [#next-free-field: 7] -message TLSProperties { - enum TLSVersion { - VERSION_UNSPECIFIED = 0; - TLSv1 = 1; - TLSv1_1 = 2; - TLSv1_2 = 3; - TLSv1_3 = 4; - } - - message CertificateProperties { - message SubjectAltName { - oneof san { - string uri = 1; - - // [#not-implemented-hide:] - string dns = 2; - } - } - - // SANs present in the certificate. - repeated SubjectAltName subject_alt_name = 1; - - // The subject field of the certificate. - string subject = 2; - } - - // Version of TLS that was negotiated. - TLSVersion tls_version = 1; - - // TLS cipher suite negotiated during handshake. The value is a - // four-digit hex code defined by the IANA TLS Cipher Suite Registry - // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). - // - // Here it is expressed as an integer. - google.protobuf.UInt32Value tls_cipher_suite = 2; - - // SNI hostname from handshake. - string tls_sni_hostname = 3; - - // Properties of the local certificate used to negotiate TLS. - CertificateProperties local_certificate_properties = 4; - - // Properties of the peer certificate used to negotiate TLS. - CertificateProperties peer_certificate_properties = 5; - - // The TLS session ID. - string tls_session_id = 6; -} - -// [#next-free-field: 14] -message HTTPRequestProperties { - // The request method (RFC 7231/2616). - api.v2.core.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}]; - - // The scheme portion of the incoming request URI. - string scheme = 2; - - // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. - string authority = 3; - - // The port of the incoming request URI - // (unused currently, as port is composed onto authority). - google.protobuf.UInt32Value port = 4; - - // The path portion from the incoming request URI. - string path = 5; - - // Value of the ``User-Agent`` request header. - string user_agent = 6; - - // Value of the ``Referer`` request header. - string referer = 7; - - // Value of the ``X-Forwarded-For`` request header. - string forwarded_for = 8; - - // Value of the ``X-Request-Id`` request header - // - // This header is used by Envoy to uniquely identify a request. - // It will be generated for all external requests and internal requests that - // do not already have a request ID. - string request_id = 9; - - // Value of the ``X-Envoy-Original-Path`` request header. - string original_path = 10; - - // Size of the HTTP request headers in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 request_headers_bytes = 11; - - // Size of the HTTP request body in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 request_body_bytes = 12; - - // Map of additional headers that have been configured to be logged. - map request_headers = 13; -} - -// [#next-free-field: 7] -message HTTPResponseProperties { - // The HTTP response code returned by Envoy. - google.protobuf.UInt32Value response_code = 1; - - // Size of the HTTP response headers in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 response_headers_bytes = 2; - - // Size of the HTTP response body in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 response_body_bytes = 3; - - // Map of additional headers configured to be logged. - map response_headers = 4; - - // Map of trailers configured to be logged. - map response_trailers = 5; - - // The HTTP response code details. - string response_code_details = 6; -} diff --git a/generated_api_shadow/envoy/data/accesslog/v3/BUILD b/generated_api_shadow/envoy/data/accesslog/v3/BUILD deleted file mode 100644 index 9065b1b5c331e..0000000000000 --- a/generated_api_shadow/envoy/data/accesslog/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/data/accesslog/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto b/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto deleted file mode 100644 index c53ae0d6ab852..0000000000000 --- a/generated_api_shadow/envoy/data/accesslog/v3/accesslog.proto +++ /dev/null @@ -1,433 +0,0 @@ -syntax = "proto3"; - -package envoy.data.accesslog.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.accesslog.v3"; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC access logs] -// Envoy access logs describe incoming interaction with Envoy over a fixed -// period of time, and typically cover a single request/response exchange, -// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP). -// Access logs contain fields defined in protocol-specific protobuf messages. -// -// Except where explicitly declared otherwise, all fields describe -// *downstream* interaction between Envoy and a connected client. -// Fields describing *upstream* interaction will explicitly include ``upstream`` -// in their name. - -message TCPAccessLogEntry { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.TCPAccessLogEntry"; - - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - - // Properties of the TCP connection. - ConnectionProperties connection_properties = 2; -} - -message HTTPAccessLogEntry { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.HTTPAccessLogEntry"; - - // HTTP version - enum HTTPVersion { - PROTOCOL_UNSPECIFIED = 0; - HTTP10 = 1; - HTTP11 = 2; - HTTP2 = 3; - HTTP3 = 4; - } - - // Common properties shared by all Envoy access logs. - AccessLogCommon common_properties = 1; - - HTTPVersion protocol_version = 2; - - // Description of the incoming HTTP request. - HTTPRequestProperties request = 3; - - // Description of the outgoing HTTP response. - HTTPResponseProperties response = 4; -} - -// Defines fields for a connection -message ConnectionProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.ConnectionProperties"; - - // Number of bytes received from downstream. - uint64 received_bytes = 1; - - // Number of bytes sent to downstream. - uint64 sent_bytes = 2; -} - -// Defines fields that are shared by all Envoy access logs. -// [#next-free-field: 22] -message AccessLogCommon { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.AccessLogCommon"; - - // [#not-implemented-hide:] - // This field indicates the rate at which this log entry was sampled. - // Valid range is (0.0, 1.0]. - double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; - - // This field is the remote/origin address on which the request from the user was received. - // Note: This may not be the physical peer. E.g, if the remote address is inferred from for - // example the x-forwarder-for header, proxy protocol, etc. - config.core.v3.Address downstream_remote_address = 2; - - // This field is the local/destination address on which the request from the user was received. - config.core.v3.Address downstream_local_address = 3; - - // If the connection is secure,S this field will contain TLS properties. - TLSProperties tls_properties = 4; - - // The time that Envoy started servicing this request. This is effectively the time that the first - // downstream byte is received. - google.protobuf.Timestamp start_time = 5; - - // Interval between the first downstream byte received and the last - // downstream byte received (i.e. time it takes to receive a request). - google.protobuf.Duration time_to_last_rx_byte = 6; - - // Interval between the first downstream byte received and the first upstream byte sent. There may - // by considerable delta between *time_to_last_rx_byte* and this value due to filters. - // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about - // not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_first_upstream_tx_byte = 7; - - // Interval between the first downstream byte received and the last upstream byte sent. There may - // by considerable delta between *time_to_last_rx_byte* and this value due to filters. - // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about - // not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_last_upstream_tx_byte = 8; - - // Interval between the first downstream byte received and the first upstream - // byte received (i.e. time it takes to start receiving a response). - google.protobuf.Duration time_to_first_upstream_rx_byte = 9; - - // Interval between the first downstream byte received and the last upstream - // byte received (i.e. time it takes to receive a complete response). - google.protobuf.Duration time_to_last_upstream_rx_byte = 10; - - // Interval between the first downstream byte received and the first downstream byte sent. - // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field - // due to filters. Additionally, the same caveats apply as documented in - // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc. - google.protobuf.Duration time_to_first_downstream_tx_byte = 11; - - // Interval between the first downstream byte received and the last downstream byte sent. - // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta - // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate - // time. In the current implementation it does not include kernel socket buffer time. In the - // current implementation it also does not include send window buffering inside the HTTP/2 codec. - // In the future it is likely that work will be done to make this duration more accurate. - google.protobuf.Duration time_to_last_downstream_tx_byte = 12; - - // The upstream remote/destination address that handles this exchange. This does not include - // retries. - config.core.v3.Address upstream_remote_address = 13; - - // The upstream local/origin address that handles this exchange. This does not include retries. - config.core.v3.Address upstream_local_address = 14; - - // The upstream cluster that *upstream_remote_address* belongs to. - string upstream_cluster = 15; - - // Flags indicating occurrences during request/response processing. - ResponseFlags response_flags = 16; - - // All metadata encountered during request processing, including endpoint - // selection. - // - // This can be used to associate IDs attached to the various configurations - // used to process this request with the access log entry. For example, a - // route created from a higher level forwarding rule with some ID can place - // that ID in this field and cross reference later. It can also be used to - // determine if a canary endpoint was used or not. - config.core.v3.Metadata metadata = 17; - - // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the - // failure reason from the transport socket. The format of this field depends on the configured - // upstream transport socket. Common TLS failures are in - // :ref:`TLS trouble shooting `. - string upstream_transport_failure_reason = 18; - - // The name of the route - string route_name = 19; - - // This field is the downstream direct remote address on which the request from the user was - // received. Note: This is always the physical peer, even if the remote address is inferred from - // for example the x-forwarder-for header, proxy protocol, etc. - config.core.v3.Address downstream_direct_remote_address = 20; - - // Map of filter state in stream info that have been configured to be logged. If the filter - // state serialized to any message other than `google.protobuf.Any` it will be packed into - // `google.protobuf.Any`. - map filter_state_objects = 21; -} - -// Flags indicating occurrences during request/response processing. -// [#next-free-field: 27] -message ResponseFlags { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.ResponseFlags"; - - message Unauthorized { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.ResponseFlags.Unauthorized"; - - // Reasons why the request was unauthorized - enum Reason { - REASON_UNSPECIFIED = 0; - - // The request was denied by the external authorization service. - EXTERNAL_SERVICE = 1; - } - - Reason reason = 1; - } - - // Indicates local server healthcheck failed. - bool failed_local_healthcheck = 1; - - // Indicates there was no healthy upstream. - bool no_healthy_upstream = 2; - - // Indicates an there was an upstream request timeout. - bool upstream_request_timeout = 3; - - // Indicates local codec level reset was sent on the stream. - bool local_reset = 4; - - // Indicates remote codec level reset was received on the stream. - bool upstream_remote_reset = 5; - - // Indicates there was a local reset by a connection pool due to an initial connection failure. - bool upstream_connection_failure = 6; - - // Indicates the stream was reset due to an upstream connection termination. - bool upstream_connection_termination = 7; - - // Indicates the stream was reset because of a resource overflow. - bool upstream_overflow = 8; - - // Indicates no route was found for the request. - bool no_route_found = 9; - - // Indicates that the request was delayed before proxying. - bool delay_injected = 10; - - // Indicates that the request was aborted with an injected error code. - bool fault_injected = 11; - - // Indicates that the request was rate-limited locally. - bool rate_limited = 12; - - // Indicates if the request was deemed unauthorized and the reason for it. - Unauthorized unauthorized_details = 13; - - // Indicates that the request was rejected because there was an error in rate limit service. - bool rate_limit_service_error = 14; - - // Indicates the stream was reset due to a downstream connection termination. - bool downstream_connection_termination = 15; - - // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. - bool upstream_retry_limit_exceeded = 16; - - // Indicates that the stream idle timeout was hit, resulting in a downstream 408. - bool stream_idle_timeout = 17; - - // Indicates that the request was rejected because an envoy request header failed strict - // validation. - bool invalid_envoy_request_headers = 18; - - // Indicates there was an HTTP protocol error on the downstream request. - bool downstream_protocol_error = 19; - - // Indicates there was a max stream duration reached on the upstream request. - bool upstream_max_stream_duration_reached = 20; - - // Indicates the response was served from a cache filter. - bool response_from_cache_filter = 21; - - // Indicates that a filter configuration is not available. - bool no_filter_config_found = 22; - - // Indicates that request or connection exceeded the downstream connection duration. - bool duration_timeout = 23; - - // Indicates there was an HTTP protocol error in the upstream response. - bool upstream_protocol_error = 24; - - // Indicates no cluster was found for the request. - bool no_cluster_found = 25; - - // Indicates overload manager terminated the request. - bool overload_manager = 26; -} - -// Properties of a negotiated TLS connection. -// [#next-free-field: 7] -message TLSProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.TLSProperties"; - - enum TLSVersion { - VERSION_UNSPECIFIED = 0; - TLSv1 = 1; - TLSv1_1 = 2; - TLSv1_2 = 3; - TLSv1_3 = 4; - } - - message CertificateProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.TLSProperties.CertificateProperties"; - - message SubjectAltName { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.TLSProperties.CertificateProperties.SubjectAltName"; - - oneof san { - string uri = 1; - - // [#not-implemented-hide:] - string dns = 2; - } - } - - // SANs present in the certificate. - repeated SubjectAltName subject_alt_name = 1; - - // The subject field of the certificate. - string subject = 2; - } - - // Version of TLS that was negotiated. - TLSVersion tls_version = 1; - - // TLS cipher suite negotiated during handshake. The value is a - // four-digit hex code defined by the IANA TLS Cipher Suite Registry - // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). - // - // Here it is expressed as an integer. - google.protobuf.UInt32Value tls_cipher_suite = 2; - - // SNI hostname from handshake. - string tls_sni_hostname = 3; - - // Properties of the local certificate used to negotiate TLS. - CertificateProperties local_certificate_properties = 4; - - // Properties of the peer certificate used to negotiate TLS. - CertificateProperties peer_certificate_properties = 5; - - // The TLS session ID. - string tls_session_id = 6; -} - -// [#next-free-field: 14] -message HTTPRequestProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.HTTPRequestProperties"; - - // The request method (RFC 7231/2616). - config.core.v3.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}]; - - // The scheme portion of the incoming request URI. - string scheme = 2; - - // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. - string authority = 3; - - // The port of the incoming request URI - // (unused currently, as port is composed onto authority). - google.protobuf.UInt32Value port = 4; - - // The path portion from the incoming request URI. - string path = 5; - - // Value of the ``User-Agent`` request header. - string user_agent = 6; - - // Value of the ``Referer`` request header. - string referer = 7; - - // Value of the ``X-Forwarded-For`` request header. - string forwarded_for = 8; - - // Value of the ``X-Request-Id`` request header - // - // This header is used by Envoy to uniquely identify a request. - // It will be generated for all external requests and internal requests that - // do not already have a request ID. - string request_id = 9; - - // Value of the ``X-Envoy-Original-Path`` request header. - string original_path = 10; - - // Size of the HTTP request headers in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 request_headers_bytes = 11; - - // Size of the HTTP request body in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 request_body_bytes = 12; - - // Map of additional headers that have been configured to be logged. - map request_headers = 13; -} - -// [#next-free-field: 7] -message HTTPResponseProperties { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.accesslog.v2.HTTPResponseProperties"; - - // The HTTP response code returned by Envoy. - google.protobuf.UInt32Value response_code = 1; - - // Size of the HTTP response headers in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 response_headers_bytes = 2; - - // Size of the HTTP response body in bytes. - // - // This value is captured from the OSI layer 7 perspective, i.e. it does not - // include overhead from framing or encoding at other networking layers. - uint64 response_body_bytes = 3; - - // Map of additional headers configured to be logged. - map response_headers = 4; - - // Map of trailers configured to be logged. - map response_trailers = 5; - - // The HTTP response code details. - string response_code_details = 6; -} diff --git a/generated_api_shadow/envoy/data/cluster/v2alpha/BUILD b/generated_api_shadow/envoy/data/cluster/v2alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/data/cluster/v2alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto b/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto deleted file mode 100644 index 3ea8bc2597fd8..0000000000000 --- a/generated_api_shadow/envoy/data/cluster/v2alpha/outlier_detection_event.proto +++ /dev/null @@ -1,135 +0,0 @@ -syntax = "proto3"; - -package envoy.data.cluster.v2alpha; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.cluster.v2alpha"; -option java_outer_classname = "OutlierDetectionEventProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.data.cluster.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Outlier detection logging events] -// :ref:`Outlier detection logging `. - -// Type of ejection that took place -enum OutlierEjectionType { - // In case upstream host returns certain number of consecutive 5xx. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all type of errors are treated as HTTP 5xx errors. - // See :ref:`Cluster outlier detection ` documentation for - // details. - CONSECUTIVE_5XX = 0; - - // In case upstream host returns certain number of consecutive gateway errors - CONSECUTIVE_GATEWAY_FAILURE = 1; - - // Runs over aggregated success rate statistics from every host in cluster - // and selects hosts for which ratio of successful replies deviates from other hosts - // in the cluster. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors (externally and locally generated) are used to calculate success rate - // statistics. See :ref:`Cluster outlier detection ` - // documentation for details. - SUCCESS_RATE = 2; - - // Consecutive local origin failures: Connection failures, resets, timeouts, etc - // This type of ejection happens only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is set to *true*. - // See :ref:`Cluster outlier detection ` documentation for - CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; - - // Runs over aggregated success rate statistics for local origin failures - // for all hosts in the cluster and selects hosts for which success rate deviates from other - // hosts in the cluster. This type of ejection happens only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is set to *true*. - // See :ref:`Cluster outlier detection ` documentation for - SUCCESS_RATE_LOCAL_ORIGIN = 4; - - // Runs over aggregated success rate statistics from every host in cluster and selects hosts for - // which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE = 5; - - // Runs over aggregated success rate statistics for local origin failures from every host in - // cluster and selects hosts for which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; -} - -// Represents possible action applied to upstream host -enum Action { - // In case host was excluded from service - EJECT = 0; - - // In case host was brought back into service - UNEJECT = 1; -} - -// [#next-free-field: 12] -message OutlierDetectionEvent { - // In case of eject represents type of ejection that took place. - OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 2; - - // The time in seconds since the last action (either an ejection or unejection) took place. - google.protobuf.UInt64Value secs_since_last_action = 3; - - // The :ref:`cluster ` that owns the ejected host. - string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}]; - - // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}]; - - // The action that took place. - Action action = 6 [(validate.rules).enum = {defined_only: true}]; - - // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to - // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and - // then re-added). - uint32 num_ejections = 7; - - // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was - // ejected. ``false`` means the event was logged but the host was not actually ejected. - bool enforced = 8; - - oneof event { - option (validate.required) = true; - - OutlierEjectSuccessRate eject_success_rate_event = 9; - - OutlierEjectConsecutive eject_consecutive_event = 10; - - OutlierEjectFailurePercentage eject_failure_percentage_event = 11; - } -} - -message OutlierEjectSuccessRate { - // Host’s success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; - - // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 - // range. - uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; - - // Success rate ejection threshold at the time of the ejection event. - uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; -} - -message OutlierEjectConsecutive { -} - -message OutlierEjectFailurePercentage { - // Host's success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; -} diff --git a/generated_api_shadow/envoy/data/cluster/v3/BUILD b/generated_api_shadow/envoy/data/cluster/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/data/cluster/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto b/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto deleted file mode 100644 index 2ba29d89954bb..0000000000000 --- a/generated_api_shadow/envoy/data/cluster/v3/outlier_detection_event.proto +++ /dev/null @@ -1,145 +0,0 @@ -syntax = "proto3"; - -package envoy.data.cluster.v3; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.cluster.v3"; -option java_outer_classname = "OutlierDetectionEventProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Outlier detection logging events] -// :ref:`Outlier detection logging `. - -// Type of ejection that took place -enum OutlierEjectionType { - // In case upstream host returns certain number of consecutive 5xx. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all type of errors are treated as HTTP 5xx errors. - // See :ref:`Cluster outlier detection ` documentation for - // details. - CONSECUTIVE_5XX = 0; - - // In case upstream host returns certain number of consecutive gateway errors - CONSECUTIVE_GATEWAY_FAILURE = 1; - - // Runs over aggregated success rate statistics from every host in cluster - // and selects hosts for which ratio of successful replies deviates from other hosts - // in the cluster. - // If - // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors (externally and locally generated) are used to calculate success rate - // statistics. See :ref:`Cluster outlier detection ` - // documentation for details. - SUCCESS_RATE = 2; - - // Consecutive local origin failures: Connection failures, resets, timeouts, etc - // This type of ejection happens only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is set to *true*. - // See :ref:`Cluster outlier detection ` documentation for - CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; - - // Runs over aggregated success rate statistics for local origin failures - // for all hosts in the cluster and selects hosts for which success rate deviates from other - // hosts in the cluster. This type of ejection happens only when - // :ref:`outlier_detection.split_external_local_origin_errors` - // is set to *true*. - // See :ref:`Cluster outlier detection ` documentation for - SUCCESS_RATE_LOCAL_ORIGIN = 4; - - // Runs over aggregated success rate statistics from every host in cluster and selects hosts for - // which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE = 5; - - // Runs over aggregated success rate statistics for local origin failures from every host in - // cluster and selects hosts for which ratio of failed replies is above configured value. - FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; -} - -// Represents possible action applied to upstream host -enum Action { - // In case host was excluded from service - EJECT = 0; - - // In case host was brought back into service - UNEJECT = 1; -} - -// [#next-free-field: 12] -message OutlierDetectionEvent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.cluster.v2alpha.OutlierDetectionEvent"; - - // In case of eject represents type of ejection that took place. - OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 2; - - // The time in seconds since the last action (either an ejection or unejection) took place. - google.protobuf.UInt64Value secs_since_last_action = 3; - - // The :ref:`cluster ` that owns the ejected host. - string cluster_name = 4 [(validate.rules).string = {min_len: 1}]; - - // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - string upstream_url = 5 [(validate.rules).string = {min_len: 1}]; - - // The action that took place. - Action action = 6 [(validate.rules).enum = {defined_only: true}]; - - // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to - // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and - // then re-added). - uint32 num_ejections = 7; - - // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was - // ejected. ``false`` means the event was logged but the host was not actually ejected. - bool enforced = 8; - - oneof event { - option (validate.required) = true; - - OutlierEjectSuccessRate eject_success_rate_event = 9; - - OutlierEjectConsecutive eject_consecutive_event = 10; - - OutlierEjectFailurePercentage eject_failure_percentage_event = 11; - } -} - -message OutlierEjectSuccessRate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.cluster.v2alpha.OutlierEjectSuccessRate"; - - // Host’s success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; - - // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 - // range. - uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; - - // Success rate ejection threshold at the time of the ejection event. - uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; -} - -message OutlierEjectConsecutive { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.cluster.v2alpha.OutlierEjectConsecutive"; -} - -message OutlierEjectFailurePercentage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.cluster.v2alpha.OutlierEjectFailurePercentage"; - - // Host's success rate at the time of the ejection event on a 0-100 range. - uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; -} diff --git a/generated_api_shadow/envoy/data/core/v2alpha/BUILD b/generated_api_shadow/envoy/data/core/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/data/core/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto b/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto deleted file mode 100644 index 00fd69fd42d3f..0000000000000 --- a/generated_api_shadow/envoy/data/core/v2alpha/health_check_event.proto +++ /dev/null @@ -1,88 +0,0 @@ -syntax = "proto3"; - -package envoy.data.core.v2alpha; - -import "envoy/api/v2/core/address.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.core.v2alpha"; -option java_outer_classname = "HealthCheckEventProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Health check logging events] -// :ref:`Health check logging `. - -enum HealthCheckFailureType { - ACTIVE = 0; - PASSIVE = 1; - NETWORK = 2; -} - -enum HealthCheckerType { - HTTP = 0; - TCP = 1; - GRPC = 2; - REDIS = 3; -} - -// [#next-free-field: 10] -message HealthCheckEvent { - HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}]; - - api.v2.core.Address host = 2; - - string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; - - oneof event { - option (validate.required) = true; - - // Host ejection. - HealthCheckEjectUnhealthy eject_unhealthy_event = 4; - - // Host addition. - HealthCheckAddHealthy add_healthy_event = 5; - - // Host failure. - HealthCheckFailure health_check_failure_event = 7; - - // Healthy host became degraded. - DegradedHealthyHost degraded_healthy_host = 8; - - // A degraded host returned to being healthy. - NoLongerDegradedHost no_longer_degraded_host = 9; - } - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 6; -} - -message HealthCheckEjectUnhealthy { - // The type of failure that caused this ejection. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; -} - -message HealthCheckAddHealthy { - // Whether this addition is the result of the first ever health check on a host, in which case - // the configured :ref:`healthy threshold ` - // is bypassed and the host is immediately added. - bool first_check = 1; -} - -message HealthCheckFailure { - // The type of failure that caused this event. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Whether this event is the result of the first ever health check on a host. - bool first_check = 2; -} - -message DegradedHealthyHost { -} - -message NoLongerDegradedHost { -} diff --git a/generated_api_shadow/envoy/data/core/v3/BUILD b/generated_api_shadow/envoy/data/core/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/data/core/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto b/generated_api_shadow/envoy/data/core/v3/health_check_event.proto deleted file mode 100644 index 92e2d68d255da..0000000000000 --- a/generated_api_shadow/envoy/data/core/v3/health_check_event.proto +++ /dev/null @@ -1,106 +0,0 @@ -syntax = "proto3"; - -package envoy.data.core.v3; - -import "envoy/config/core/v3/address.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.core.v3"; -option java_outer_classname = "HealthCheckEventProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Health check logging events] -// :ref:`Health check logging `. - -enum HealthCheckFailureType { - ACTIVE = 0; - PASSIVE = 1; - NETWORK = 2; - NETWORK_TIMEOUT = 3; -} - -enum HealthCheckerType { - HTTP = 0; - TCP = 1; - GRPC = 2; - REDIS = 3; -} - -// [#next-free-field: 10] -message HealthCheckEvent { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.HealthCheckEvent"; - - HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}]; - - config.core.v3.Address host = 2; - - string cluster_name = 3 [(validate.rules).string = {min_len: 1}]; - - oneof event { - option (validate.required) = true; - - // Host ejection. - HealthCheckEjectUnhealthy eject_unhealthy_event = 4; - - // Host addition. - HealthCheckAddHealthy add_healthy_event = 5; - - // Host failure. - HealthCheckFailure health_check_failure_event = 7; - - // Healthy host became degraded. - DegradedHealthyHost degraded_healthy_host = 8; - - // A degraded host returned to being healthy. - NoLongerDegradedHost no_longer_degraded_host = 9; - } - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 6; -} - -message HealthCheckEjectUnhealthy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.HealthCheckEjectUnhealthy"; - - // The type of failure that caused this ejection. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; -} - -message HealthCheckAddHealthy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.HealthCheckAddHealthy"; - - // Whether this addition is the result of the first ever health check on a host, in which case - // the configured :ref:`healthy threshold ` - // is bypassed and the host is immediately added. - bool first_check = 1; -} - -message HealthCheckFailure { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.HealthCheckFailure"; - - // The type of failure that caused this event. - HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Whether this event is the result of the first ever health check on a host. - bool first_check = 2; -} - -message DegradedHealthyHost { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.DegradedHealthyHost"; -} - -message NoLongerDegradedHost { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.core.v2alpha.NoLongerDegradedHost"; -} diff --git a/generated_api_shadow/envoy/data/dns/v2alpha/BUILD b/generated_api_shadow/envoy/data/dns/v2alpha/BUILD deleted file mode 100644 index e305003238a56..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto b/generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto deleted file mode 100644 index 7a9e535c4f3a2..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v2alpha/dns_table.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package envoy.data.dns.v2alpha; - -import "envoy/type/matcher/string.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.dns.v2alpha"; -option java_outer_classname = "DnsTableProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: DNS Filter Table Data] -// :ref:`DNS Filter config overview `. - -// This message contains the configuration for the DNS Filter if populated -// from the control plane -message DnsTable { - // This message contains a list of IP addresses returned for a query for a known name - message AddressList { - // This field contains a well formed IP address that is returned - // in the answer for a name query. The address field can be an - // IPv4 or IPv6 address. Address family detection is done automatically - // when Envoy parses the string. Since this field is repeated, - // Envoy will return one randomly chosen entry from this list in the - // DNS response. The random index will vary per query so that we prevent - // clients pinning on a single address for a configured domain - repeated string address = 1 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; - } - - // This message type is extensible and can contain a list of addresses - // or dictate some other method for resolving the addresses for an - // endpoint - message DnsEndpoint { - oneof endpoint_config { - option (validate.required) = true; - - AddressList address_list = 1; - } - } - - message DnsVirtualDomain { - // The domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; - - // The configuration containing the method to determine the address - // of this endpoint - DnsEndpoint endpoint = 2; - - // Sets the TTL in dns answers from Envoy returned to the client - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; - } - - // Control how many times envoy makes an attempt to forward a query to - // an external server - uint32 external_retry_count = 1; - - // Fully qualified domain names for which Envoy will respond to queries - repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; - - // This field serves to help Envoy determine whether it can authoritatively - // answer a query for a name matching a suffix in this list. If the query - // name does not match a suffix in this list, Envoy will forward - // the query to an upstream DNS server - repeated type.matcher.StringMatcher known_suffixes = 3; -} diff --git a/generated_api_shadow/envoy/data/dns/v3/BUILD b/generated_api_shadow/envoy/data/dns/v3/BUILD deleted file mode 100644 index 516369f09675b..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto b/generated_api_shadow/envoy/data/dns/v3/dns_table.proto deleted file mode 100644 index 5cc04440f700f..0000000000000 --- a/generated_api_shadow/envoy/data/dns/v3/dns_table.proto +++ /dev/null @@ -1,156 +0,0 @@ -syntax = "proto3"; - -package envoy.data.dns.v3; - -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.dns.v3"; -option java_outer_classname = "DnsTableProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: DNS Filter Table Data] -// :ref:`DNS Filter config overview `. - -// This message contains the configuration for the DNS Filter if populated -// from the control plane -message DnsTable { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable"; - - // This message contains a list of IP addresses returned for a query for a known name - message AddressList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v2alpha.DnsTable.AddressList"; - - // This field contains a well formed IP address that is returned in the answer for a - // name query. The address field can be an IPv4 or IPv6 address. Address family - // detection is done automatically when Envoy parses the string. Since this field is - // repeated, Envoy will return as many entries from this list in the DNS response while - // keeping the response under 512 bytes - repeated string address = 1 [(validate.rules).repeated = { - min_items: 1 - items {string {min_len: 3}} - }]; - } - - // Specify the service protocol using a numeric or string value - message DnsServiceProtocol { - oneof protocol_config { - option (validate.required) = true; - - // Specify the protocol number for the service. Envoy will try to resolve the number to - // the protocol name. For example, 6 will resolve to "tcp". Refer to: - // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml - // for protocol names and numbers - uint32 number = 1 [(validate.rules).uint32 = {lt: 255}]; - - // Specify the protocol name for the service. - string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - } - } - - // Specify the target for a given DNS service - // [#next-free-field: 6] - message DnsServiceTarget { - // Specify the name of the endpoint for the Service. The name is a hostname or a cluster - oneof endpoint_type { - option (validate.required) = true; - - // Use a resolvable hostname as the endpoint for a service. - string host_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // Use a cluster name as the endpoint for a service. - string cluster_name = 2 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - } - - // The priority of the service record target - uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}]; - - // The weight of the service record target - uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}]; - - // The port to which the service is bound. This value is optional if the target is a - // cluster. Setting port to zero in this case makes the filter use the port value - // from the cluster host - uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}]; - } - - // This message defines a service selection record returned for a service query in a domain - message DnsService { - // The name of the service without the protocol or domain name - string service_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // The service protocol. This can be specified as a string or the numeric value of the protocol - DnsServiceProtocol protocol = 2; - - // The service entry time to live. This is independent from the DNS Answer record TTL - google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}]; - - // The list of targets hosting the service - repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}]; - } - - // Define a list of service records for a given service - message DnsServiceList { - repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - message DnsEndpoint { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v2alpha.DnsTable.DnsEndpoint"; - - oneof endpoint_config { - option (validate.required) = true; - - // Define a list of addresses to return for the specified endpoint - AddressList address_list = 1; - - // Define a cluster whose addresses are returned for the specified endpoint - string cluster_name = 2; - - // Define a DNS Service List for the specified endpoint - DnsServiceList service_list = 3; - } - } - - message DnsVirtualDomain { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; - - // A domain name for which Envoy will respond to query requests - string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; - - // The configuration containing the method to determine the address of this endpoint - DnsEndpoint endpoint = 2; - - // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s - google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; - } - - // Control how many times Envoy makes an attempt to forward a query to an external DNS server - uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; - - // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this - // list empty, Envoy will forward all queries to external resolvers - repeated DnsVirtualDomain virtual_domains = 2; - - // This field is deprecated and no longer used in Envoy. The filter's behavior has changed - // internally to use a different data structure allowing the filter to determine whether a - // query is for known domain without the use of this field. - // - // This field serves to help Envoy determine whether it can authoritatively answer a query - // for a name matching a suffix in this list. If the query name does not match a suffix in - // this list, Envoy will forward the query to an upstream DNS server - repeated type.matcher.v3.StringMatcher known_suffixes = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/BUILD b/generated_api_shadow/envoy/data/tap/v2alpha/BUILD deleted file mode 100644 index 83bc0ab960e74..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/common.proto b/generated_api_shadow/envoy/data/tap/v2alpha/common.proto deleted file mode 100644 index 7c02aa7719542..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/common.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v2alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap common data] - -// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received -// and transmitted data, etc. -message Body { - oneof body_type { - // Body data as bytes. By default, tap body data will be present in this field, as the proto - // `bytes` type can contain any valid byte. - bytes as_bytes = 1; - - // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING - // ` sink - // format type is selected. See the documentation for that option for why this is useful. - string as_string = 2; - } - - // Specifies whether body data has been truncated to fit within the specified - // :ref:`max_buffered_rx_bytes - // ` and - // :ref:`max_buffered_tx_bytes - // ` settings. - bool truncated = 3; -} diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/http.proto b/generated_api_shadow/envoy/data/tap/v2alpha/http.proto deleted file mode 100644 index 60ea68b66d4ad..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/http.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/tap/v2alpha/common.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; -option java_outer_classname = "HttpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP tap data] - -// A fully buffered HTTP trace message. -message HttpBufferedTrace { - // HTTP message wrapper. - message Message { - // Message headers. - repeated api.v2.core.HeaderValue headers = 1; - - // Message body. - Body body = 2; - - // Message trailers. - repeated api.v2.core.HeaderValue trailers = 3; - } - - // Request message. - Message request = 1; - - // Response message. - Message response = 2; -} - -// A streamed HTTP trace segment. Multiple segments make up a full trace. -// [#next-free-field: 8] -message HttpStreamedTraceSegment { - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. - uint64 trace_id = 1; - - oneof message_piece { - // Request headers. - api.v2.core.HeaderMap request_headers = 2; - - // Request body chunk. - Body request_body_chunk = 3; - - // Request trailers. - api.v2.core.HeaderMap request_trailers = 4; - - // Response headers. - api.v2.core.HeaderMap response_headers = 5; - - // Response body chunk. - Body response_body_chunk = 6; - - // Response trailers. - api.v2.core.HeaderMap response_trailers = 7; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto b/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto deleted file mode 100644 index 82c2845ee338f..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/transport.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v2alpha; - -import "envoy/api/v2/core/address.proto"; -import "envoy/data/tap/v2alpha/common.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; -option java_outer_classname = "TransportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Transport tap data] -// Trace format for the tap transport socket extension. This dumps plain text read/write -// sequences on a socket. - -// Connection properties. -message Connection { - // Local address. - api.v2.core.Address local_address = 2; - - // Remote address. - api.v2.core.Address remote_address = 3; -} - -// Event in a socket trace. -message SocketEvent { - // Data read by Envoy from the transport socket. - message Read { - // TODO(htuch): Half-close for reads. - - // Binary data read. - Body data = 1; - } - - // Data written by Envoy to the transport socket. - message Write { - // Binary data written. - Body data = 1; - - // Stream was half closed after this write. - bool end_stream = 2; - } - - // The connection was closed. - message Closed { - // TODO(mattklein123): Close event type. - } - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 1; - - // Read or write with content as bytes string. - oneof event_selector { - Read read = 2; - - Write write = 3; - - Closed closed = 4; - } -} - -// Sequence of read/write events that constitute a buffered trace on a socket. -// [#next-free-field: 6] -message SocketBufferedTrace { - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. Matches connection IDs used in Envoy logs. - uint64 trace_id = 1; - - // Connection properties. - Connection connection = 2; - - // Sequence of observed events. - repeated SocketEvent events = 3; - - // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes - // ` setting. - bool read_truncated = 4; - - // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes - // ` setting. - bool write_truncated = 5; -} - -// A streamed socket trace segment. Multiple segments make up a full trace. -message SocketStreamedTraceSegment { - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. Matches connection IDs used in Envoy logs. - uint64 trace_id = 1; - - oneof message_piece { - // Connection properties. - Connection connection = 2; - - // Socket event. - SocketEvent event = 3; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto b/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto deleted file mode 100644 index 769b95c6160a3..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v2alpha/wrapper.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v2alpha; - -import "envoy/data/tap/v2alpha/http.proto"; -import "envoy/data/tap/v2alpha/transport.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; -option java_outer_classname = "WrapperProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap data wrappers] - -// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for -// sending traces over gRPC APIs or more easily persisting binary messages to files. -message TraceWrapper { - oneof trace { - option (validate.required) = true; - - // An HTTP buffered tap trace. - HttpBufferedTrace http_buffered_trace = 1; - - // An HTTP streamed tap trace segment. - HttpStreamedTraceSegment http_streamed_trace_segment = 2; - - // A socket buffered tap trace. - SocketBufferedTrace socket_buffered_trace = 3; - - // A socket streamed tap trace segment. - SocketStreamedTraceSegment socket_streamed_trace_segment = 4; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v3/BUILD b/generated_api_shadow/envoy/data/tap/v3/BUILD deleted file mode 100644 index 7cdbc28e7cd45..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/data/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/data/tap/v3/common.proto b/generated_api_shadow/envoy/data/tap/v3/common.proto deleted file mode 100644 index 2c4fb9c61a555..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/common.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v3"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap common data] - -// Wrapper for tapped body data. This includes HTTP request/response body, transport socket received -// and transmitted data, etc. -message Body { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Body"; - - oneof body_type { - // Body data as bytes. By default, tap body data will be present in this field, as the proto - // `bytes` type can contain any valid byte. - bytes as_bytes = 1; - - // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING - // ` sink - // format type is selected. See the documentation for that option for why this is useful. - string as_string = 2; - } - - // Specifies whether body data has been truncated to fit within the specified - // :ref:`max_buffered_rx_bytes - // ` and - // :ref:`max_buffered_tx_bytes - // ` settings. - bool truncated = 3; -} diff --git a/generated_api_shadow/envoy/data/tap/v3/http.proto b/generated_api_shadow/envoy/data/tap/v3/http.proto deleted file mode 100644 index d4f05fa09522e..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/http.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/data/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v3"; -option java_outer_classname = "HttpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP tap data] - -// A fully buffered HTTP trace message. -message HttpBufferedTrace { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.HttpBufferedTrace"; - - // HTTP message wrapper. - message Message { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.HttpBufferedTrace.Message"; - - // Message headers. - repeated config.core.v3.HeaderValue headers = 1; - - // Message body. - Body body = 2; - - // Message trailers. - repeated config.core.v3.HeaderValue trailers = 3; - } - - // Request message. - Message request = 1; - - // Response message. - Message response = 2; -} - -// A streamed HTTP trace segment. Multiple segments make up a full trace. -// [#next-free-field: 8] -message HttpStreamedTraceSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.HttpStreamedTraceSegment"; - - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. - uint64 trace_id = 1; - - oneof message_piece { - // Request headers. - config.core.v3.HeaderMap request_headers = 2; - - // Request body chunk. - Body request_body_chunk = 3; - - // Request trailers. - config.core.v3.HeaderMap request_trailers = 4; - - // Response headers. - config.core.v3.HeaderMap response_headers = 5; - - // Response body chunk. - Body response_body_chunk = 6; - - // Response trailers. - config.core.v3.HeaderMap response_trailers = 7; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v3/transport.proto b/generated_api_shadow/envoy/data/tap/v3/transport.proto deleted file mode 100644 index 0ff4b7da06043..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/transport.proto +++ /dev/null @@ -1,122 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/data/tap/v3/common.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v3"; -option java_outer_classname = "TransportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Transport tap data] -// Trace format for the tap transport socket extension. This dumps plain text read/write -// sequences on a socket. - -// Connection properties. -message Connection { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Connection"; - - // Local address. - config.core.v3.Address local_address = 2; - - // Remote address. - config.core.v3.Address remote_address = 3; -} - -// Event in a socket trace. -message SocketEvent { - option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketEvent"; - - // Data read by Envoy from the transport socket. - message Read { - // TODO(htuch): Half-close for reads. - - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketEvent.Read"; - - // Binary data read. - Body data = 1; - } - - // Data written by Envoy to the transport socket. - message Write { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketEvent.Write"; - - // Binary data written. - Body data = 1; - - // Stream was half closed after this write. - bool end_stream = 2; - } - - // The connection was closed. - message Closed { - // TODO(mattklein123): Close event type. - - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketEvent.Closed"; - } - - // Timestamp for event. - google.protobuf.Timestamp timestamp = 1; - - // Read or write with content as bytes string. - oneof event_selector { - Read read = 2; - - Write write = 3; - - Closed closed = 4; - } -} - -// Sequence of read/write events that constitute a buffered trace on a socket. -// [#next-free-field: 6] -message SocketBufferedTrace { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketBufferedTrace"; - - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. Matches connection IDs used in Envoy logs. - uint64 trace_id = 1; - - // Connection properties. - Connection connection = 2; - - // Sequence of observed events. - repeated SocketEvent events = 3; - - // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes - // ` setting. - bool read_truncated = 4; - - // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes - // ` setting. - bool write_truncated = 5; -} - -// A streamed socket trace segment. Multiple segments make up a full trace. -message SocketStreamedTraceSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.SocketStreamedTraceSegment"; - - // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used - // for long term stable uniqueness. Matches connection IDs used in Envoy logs. - uint64 trace_id = 1; - - oneof message_piece { - // Connection properties. - Connection connection = 2; - - // Socket event. - SocketEvent event = 3; - } -} diff --git a/generated_api_shadow/envoy/data/tap/v3/wrapper.proto b/generated_api_shadow/envoy/data/tap/v3/wrapper.proto deleted file mode 100644 index 636547614c268..0000000000000 --- a/generated_api_shadow/envoy/data/tap/v3/wrapper.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; - -package envoy.data.tap.v3; - -import "envoy/data/tap/v3/http.proto"; -import "envoy/data/tap/v3/transport.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.data.tap.v3"; -option java_outer_classname = "WrapperProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap data wrappers] - -// Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for -// sending traces over gRPC APIs or more easily persisting binary messages to files. -message TraceWrapper { - option (udpa.annotations.versioning).previous_message_type = - "envoy.data.tap.v2alpha.TraceWrapper"; - - oneof trace { - option (validate.required) = true; - - // An HTTP buffered tap trace. - HttpBufferedTrace http_buffered_trace = 1; - - // An HTTP streamed tap trace segment. - HttpStreamedTraceSegment http_streamed_trace_segment = 2; - - // A socket buffered tap trace. - SocketBufferedTrace socket_buffered_trace = 3; - - // A socket streamed tap trace segment. - SocketStreamedTraceSegment socket_streamed_trace_segment = 4; - } -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD deleted file mode 100644 index a1775bbe6f513..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto b/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto deleted file mode 100644 index bca7c913a65b5..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/file/v3/file.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.file.v3; - -import "envoy/config/core/v3/substitution_format_string.proto"; - -import "google/protobuf/struct.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v3"; -option java_outer_classname = "FileProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: File access log] -// [#extension: envoy.access_loggers.file] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* -// AccessLog. -// [#next-free-field: 6] -message FileAccessLog { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v2.FileAccessLog"; - - // A path to a local file to which to write the access log entries. - string path = 1 [(validate.rules).string = {min_len: 1}]; - - oneof access_log_format { - // Access log :ref:`format string`. - // Envoy supports :ref:`custom access log formats ` as well as a - // :ref:`default format `. - // This field is deprecated. - // Please use :ref:`log_format `. - string format = 2 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Access log :ref:`format dictionary`. All values - // are rendered as strings. - // This field is deprecated. - // Please use :ref:`log_format `. - google.protobuf.Struct json_format = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Access log :ref:`format dictionary`. Values are - // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may - // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the - // documentation for a specific command operator for details. - // This field is deprecated. - // Please use :ref:`log_format `. - google.protobuf.Struct typed_json_format = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v3.SubstitutionFormatString log_format = 5 - [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto b/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto deleted file mode 100644 index fa0a9f0f820d5..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/grpc/v3/als.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.grpc.v3; - -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.grpc.v3"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Configuration for the built-in *envoy.access_loggers.http_grpc* -// :ref:`AccessLog `. This configuration will -// populate :ref:`StreamAccessLogsMessage.http_logs -// `. -// [#extension: envoy.access_loggers.http_grpc] -message HttpGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v2.HttpGrpcAccessLogConfig"; - - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; - - // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers - // `. - repeated string additional_request_headers_to_log = 2; - - // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers - // `. - repeated string additional_response_headers_to_log = 3; - - // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers - // `. - repeated string additional_response_trailers_to_log = 4; -} - -// Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will -// populate *StreamAccessLogsMessage.tcp_logs*. -// [#extension: envoy.access_loggers.tcp_grpc] -message TcpGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v2.TcpGrpcAccessLogConfig"; - - CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; -} - -// Common configuration for gRPC access logs. -// [#next-free-field: 7] -message CommonGrpcAccessLogConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.accesslog.v2.CommonGrpcAccessLogConfig"; - - // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier - // `. This allows the - // access log server to differentiate between different access logs coming from the same Envoy. - string log_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The gRPC service for the access log service. - config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; - - // API version for access logs service transport protocol. This describes the access logs service - // gRPC endpoint and version of messages used on the wire. - config.core.v3.ApiVersion transport_api_version = 6 - [(validate.rules).enum = {defined_only: true}]; - - // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time - // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to - // 1 second. - google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; - - // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until - // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it - // to zero effectively disables the batching. Defaults to 16384. - google.protobuf.UInt32Value buffer_size_bytes = 4; - - // Additional filter state objects to log in :ref:`filter_state_objects - // `. - // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object. - repeated string filter_state_objects_to_log = 5; -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD deleted file mode 100644 index 37737510d8ea6..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/access_loggers/grpc/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opentelemetry_proto//:common", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto b/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto deleted file mode 100644 index 1b7027133e153..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/open_telemetry/v3alpha/logs_service.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.open_telemetry.v3alpha; - -import "envoy/extensions/access_loggers/grpc/v3/als.proto"; - -import "opentelemetry/proto/common/v1/common.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.open_telemetry.v3alpha"; -option java_outer_classname = "LogsServiceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: OpenTelemetry (gRPC) Access Log] - -// Configuration for the built-in *envoy.access_loggers.open_telemetry* -// :ref:`AccessLog `. This configuration will -// populate `opentelemetry.proto.collector.v1.logs.ExportLogsServiceRequest.resource_logs `_. -// OpenTelemetry `Resource `_ -// attributes are filled with Envoy node info. In addition, the request start time is set in the -// dedicated field. -// [#extension: envoy.access_loggers.open_telemetry] -// [#comment:TODO(itamarkam): allow configuration for resource attributes.] -message OpenTelemetryAccessLogConfig { - // [#comment:TODO(itamarkam): add 'filter_state_objects_to_log' to logs.] - grpc.v3.CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; - - // OpenTelemetry `LogResource `_ - // fields, following `Envoy access logging formatting `_. - // - // See 'body' in the LogResource proto for more details. - // Example: ``body { string_value: "%PROTOCOL%" }``. - opentelemetry.proto.common.v1.AnyValue body = 2; - - // See 'attributes' in the LogResource proto for more details. - // Example: ``attributes { values { key: "user_agent" value { string_value: "%REQ(USER-AGENT)%" } } }``. - opentelemetry.proto.common.v1.KeyValueList attributes = 3; -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/stream.proto b/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/stream.proto deleted file mode 100644 index bd704ccdb6768..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/stream/v3/stream.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.stream.v3; - -import "envoy/config/core/v3/substitution_format_string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.stream.v3"; -option java_outer_classname = "StreamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Standard Streams Access loggers] -// [#extension: envoy.access_loggers.stream] - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to the operating system's standard output. -message StdoutAccessLog { - oneof access_log_format { - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v3.SubstitutionFormatString log_format = 1 - [(validate.rules).message = {required: true}]; - } -} - -// Custom configuration for an :ref:`AccessLog ` -// that writes log entries directly to the operating system's standard error. -message StderrAccessLog { - oneof access_log_format { - // Configuration to form access log data and format. - // If not specified, use :ref:`default format `. - config.core.v3.SubstitutionFormatString log_format = 1 - [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD deleted file mode 100644 index c37174bdefc46..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/wasm/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto deleted file mode 100644 index 44e96345dfee5..0000000000000 --- a/generated_api_shadow/envoy/extensions/access_loggers/wasm/v3/wasm.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.access_loggers.wasm.v3; - -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.access_loggers.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm access log] -// [#extension: envoy.access_loggers.wasm] - -// Custom configuration for an :ref:`AccessLog ` -// that calls into a WASM VM. Configures the built-in *envoy.access_loggers.wasm* -// AccessLog. -message WasmAccessLog { - envoy.extensions.wasm.v3.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto b/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto deleted file mode 100644 index 1b42e9b3f93d4..0000000000000 --- a/generated_api_shadow/envoy/extensions/cache/simple_http_cache/v3alpha/config.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.cache.simple_http_cache.v3alpha; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.cache.simple_http_cache.v3alpha"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SimpleHttpCache CacheFilter storage plugin] - -// [#extension: envoy.cache.simple_http_cache] -message SimpleHttpCacheConfig { -} diff --git a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/BUILD b/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto b/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto deleted file mode 100644 index aead1c4517391..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/aggregate/v3/cluster.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.aggregate.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.aggregate.v3"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Aggregate cluster configuration] - -// Configuration for the aggregate cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.aggregate] -message ClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.aggregate.v2alpha.ClusterConfig"; - - // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they - // appear in this list. - repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD deleted file mode 100644 index 05f25a2fe5d91..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto b/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto deleted file mode 100644 index c4fc8285ee597..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.dynamic_forward_proxy.v3; - -import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.dynamic_forward_proxy.v3"; -option java_outer_classname = "ClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamic forward proxy cluster configuration] - -// Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.clusters.dynamic_forward_proxy] -message ClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig"; - - // The DNS cache configuration that the cluster will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy HTTP filter configuration - // `. - common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; - - // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options - // in the :ref:`cluster's upstream_http_protocol_options - // ` - bool allow_insecure_cluster_options = 2; -} diff --git a/generated_api_shadow/envoy/extensions/clusters/redis/v3/BUILD b/generated_api_shadow/envoy/extensions/clusters/redis/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/redis/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto b/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto deleted file mode 100644 index 73598eafbe9d2..0000000000000 --- a/generated_api_shadow/envoy/extensions/clusters/redis/v3/redis_cluster.proto +++ /dev/null @@ -1,85 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.redis.v3; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.redis.v3"; -option java_outer_classname = "RedisClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Redis Cluster Configuration] -// This cluster adds support for `Redis Cluster `_, as part -// of :ref:`Envoy's support for Redis Cluster `. -// -// Redis Cluster is an extension of Redis which supports sharding and high availability (where a -// shard that loses its primary fails over to a replica, and designates it as the new primary). -// However, as there is no unified frontend or proxy service in front of Redis Cluster, the client -// (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the -// topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS -// command `_. This result is then stored locally, and -// updated at user-configured intervals. -// -// Additionally, if -// :ref:`enable_redirection` -// is true, then moved and ask redirection errors from upstream servers will trigger a topology -// refresh when they exceed a user-configured error threshold. -// -// Example: -// -// .. code-block:: yaml -// -// name: name -// connect_timeout: 0.25s -// dns_lookup_family: V4_ONLY -// hosts: -// - socket_address: -// address: foo.bar.com -// port_value: 22120 -// cluster_type: -// name: envoy.clusters.redis -// typed_config: -// "@type": type.googleapis.com/google.protobuf.Struct -// value: -// cluster_refresh_rate: 30s -// cluster_refresh_timeout: 0.5s -// redirect_refresh_interval: 10s -// redirect_refresh_threshold: 10 -// [#extension: envoy.clusters.redis] - -// [#next-free-field: 7] -message RedisClusterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.redis.RedisClusterConfig"; - - // Interval between successive topology refresh requests. If not set, this defaults to 5s. - google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}]; - - // Timeout for topology refresh request. If not set, this defaults to 3s. - google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}]; - - // The minimum interval that must pass after triggering a topology refresh request before a new - // request can possibly be triggered again. Any errors received during one of these - // time intervals are ignored. If not set, this defaults to 5s. - google.protobuf.Duration redirect_refresh_interval = 3; - - // The number of redirection errors that must be received before - // triggering a topology refresh request. If not set, this defaults to 5. - // If this is set to 0, topology refresh after redirect is disabled. - google.protobuf.UInt32Value redirect_refresh_threshold = 4; - - // The number of failures that must be received before triggering a topology refresh request. - // If not set, this defaults to 0, which disables the topology refresh due to failure. - uint32 failure_refresh_threshold = 5; - - // The number of hosts became degraded or unhealthy before triggering a topology refresh request. - // If not set, this defaults to 0, which disables the topology refresh due to degraded or - // unhealthy host. - uint32 host_degraded_refresh_threshold = 6; -} diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD deleted file mode 100644 index 6e07b4a9226bb..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto deleted file mode 100644 index 4a0d87ff6c3b8..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ /dev/null @@ -1,146 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.dynamic_forward_proxy.v3; - -import "envoy/config/cluster/v3/cluster.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/resolver.proto"; -import "envoy/extensions/common/key_value/v3/config.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.dynamic_forward_proxy.v3"; -option java_outer_classname = "DnsCacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamic forward proxy common configuration] - -// Configuration of circuit breakers for resolver. -message DnsCacheCircuitBreakers { - // The maximum number of pending requests that Envoy will allow to the - // resolver. If not specified, the default is 1024. - google.protobuf.UInt32Value max_pending_requests = 1; -} - -// Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview -// ` for more information. -// [#next-free-field: 14] -message DnsCacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; - - // The name of the cache. Multiple named caches allow independent dynamic forward proxy - // configurations to operate within a single Envoy process using different configurations. All - // configurations with the same name *must* otherwise have the same settings when referenced - // from different configuration components. Configuration will fail to load if this is not - // the case. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The DNS lookup family to use during resolution. - // - // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy eyeballs" mode. The - // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and - // then configures a host to have a primary and fall back address. With this, we could very - // likely build a "happy eyeballs" connection pool which would race the primary / fall back - // address and return the one that wins. This same method could potentially also be used for - // QUIC to TCP fall back.] - config.cluster.v3.Cluster.DnsLookupFamily dns_lookup_family = 2 - [(validate.rules).enum = {defined_only: true}]; - - // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. - // - // .. note: - // - // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be - // added in a future change. - // - // .. note: - // - // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. - google.protobuf.Duration dns_refresh_rate = 3 - [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // The TTL for hosts that are unused. Hosts that have not been used in the configured time - // interval will be purged. If not specified defaults to 5m. - // - // .. note: - // - // The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This - // means that if the configured TTL is shorter than the refresh rate the host may not be removed - // immediately. - // - // .. note: - // - // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. - google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; - - // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. - // - // .. note: - // - // The implementation is approximate and enforced independently on each worker thread, thus - // it is possible for the maximum hosts in the cache to go slightly above the configured - // value depending on timing. This is similar to how other circuit breakers work. - google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; - - // If the DNS failure refresh rate is specified, - // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the dns_refresh_rate. - config.cluster.v3.Cluster.RefreshRate dns_failure_refresh_rate = 6; - - // The config of circuit breakers for resolver. It provides a configurable threshold. - // Envoy will use dns cache circuit breakers with default settings even if this value is not set. - DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; - - // Always use TCP queries instead of UDP queries for DNS lookups. - // Setting this value causes failure if the - // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during - // server startup. Apple' API only uses UDP for DNS resolution. - // This field is deprecated in favor of *dns_resolution_config* - // which aggregates all of the DNS resolver configuration in a single message. - bool use_tcp_for_dns_lookups = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - // *dns_resolution_config* will be deprecated once - // :ref:'typed_dns_resolver_config ' - // is fully supported. - config.core.v3.DnsResolutionConfig dns_resolution_config = 9; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of :ref:`DnsResolutionConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration will replace the - // :ref:'dns_resolution_config ' - // configuration eventually. - // TODO(yanjunxiang): Investigate the deprecation plan for *dns_resolution_config*. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // this configuration is optional. - // When *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. - // [#not-implemented-hide:] - config.core.v3.TypedExtensionConfig typed_dns_resolver_config = 12; - - // Hostnames that should be preresolved into the cache upon creation. This might provide a - // performance improvement, in the form of cache hits, for hostnames that are going to be - // resolved during steady state and are known at config load time. - repeated config.core.v3.SocketAddress preresolve_hostnames = 10; - - // The timeout used for DNS queries. This timeout is independent of any timeout and retry policy - // used by the underlying DNS implementation (e.g., c-areas and Apple DNS) which are opaque. - // Setting this timeout will ensure that queries succeed or fail within the specified time frame - // and are then retried using the standard refresh rates. Defaults to 5s if not set. - google.protobuf.Duration dns_query_timeout = 11 [(validate.rules).duration = {gt {}}]; - - // [#not-implemented-hide:] - // Configuration to flush the DNS cache to long term storage. - key_value.v3.KeyValueStoreConfig key_value_config = 13; -} diff --git a/generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD b/generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/key_value/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto b/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto deleted file mode 100644 index 66a55435437b3..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/key_value/v3/config.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.key_value.v3; - -import "envoy/config/core/v3/extension.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.key_value.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Key Value Store storage plugin] - -// [#alpha:] -// This shared configuration for Envoy key value stores. -message KeyValueStoreConfig { - // [#extension-category: envoy.common.key_value] - config.core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD b/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD deleted file mode 100644 index 1afd4545d9608..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/matching/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/common/matcher/v3:pkg", - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_github_cncf_udpa//xds/type/matcher/v3:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto b/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto deleted file mode 100644 index eee82a381633b..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/matching/v3/extension_matcher.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.matching.v3; - -import "envoy/config/common/matcher/v3/matcher.proto"; -import "envoy/config/core/v3/extension.proto"; - -import "xds/type/matcher/v3/matcher.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.matching.v3"; -option java_outer_classname = "ExtensionMatcherProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Extension Matcher] - -// Wrapper around an existing extension that provides an associated matcher. This allows -// decorating an existing extension with a matcher, which can be used to match against -// relevant protocol data. -// -// [#alpha:] -message ExtensionWithMatcher { - // The associated matcher. This is deprecated in favor of xds_matcher. - config.common.matcher.v3.Matcher matcher = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The associated matcher. - xds.type.matcher.v3.Matcher xds_matcher = 3; - - // The underlying extension config. - config.core.v3.TypedExtensionConfig extension_config = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD deleted file mode 100644 index 660d629ab7b00..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/api/v2/ratelimit:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto deleted file mode 100644 index 6bb771d25af94..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ /dev/null @@ -1,103 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.ratelimit.v3; - -import "envoy/type/v3/ratelimit_unit.proto"; -import "envoy/type/v3/token_bucket.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.ratelimit.v3"; -option java_outer_classname = "RatelimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common rate limit components] - -// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to -// determine the final rate limit key and overall allowed limit. Here are some examples of how -// they might be used for the domain "envoy". -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["remote_address": "10.0.0.1"] -// -// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The -// configuration supplies a default limit for the *remote_address* key. If there is a desire to -// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the -// configuration. -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["path": "/foo/bar"] -// -// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if -// configured that way in the service). -// -// .. code-block:: cpp -// -// ["authenticated": "false"], ["path": "/foo/bar"], ["remote_address": "10.0.0.1"] -// -// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. -// Like (1) we can raise/block specific IP addresses if we want with an override configuration. -// -// .. code-block:: cpp -// -// ["authenticated": "true"], ["client_id": "foo"] -// -// What it does: Limits all traffic for an authenticated client "foo" -// -// .. code-block:: cpp -// -// ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] -// -// What it does: Limits traffic to a specific path for an authenticated client "foo" -// -// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. -// This enables building complex application scenarios with a generic backend. -// -// Optionally the descriptor can contain a limit override under a "limit" key, that specifies -// the number of requests per unit to use instead of the number configured in the -// rate limiting service. -message RateLimitDescriptor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ratelimit.RateLimitDescriptor"; - - message Entry { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.ratelimit.RateLimitDescriptor.Entry"; - - // Descriptor key. - string key = 1 [(validate.rules).string = {min_len: 1}]; - - // Descriptor value. - string value = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Override rate limit to apply to this descriptor instead of the limit - // configured in the rate limit service. See :ref:`rate limit override - // ` for more information. - message RateLimitOverride { - // The number of requests per unit of time. - uint32 requests_per_unit = 1; - - // The unit of time. - type.v3.RateLimitUnit unit = 2 [(validate.rules).enum = {defined_only: true}]; - } - - // Descriptor entries. - repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Optional rate limit override to supply to the ratelimit service. - RateLimitOverride limit = 2; -} - -message LocalRateLimitDescriptor { - // Descriptor entries. - repeated v3.RateLimitDescriptor.Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Token Bucket algorithm for local ratelimiting. - type.v3.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD b/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD deleted file mode 100644 index a99fa811f859a..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto b/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto deleted file mode 100644 index 4c67af7d30081..0000000000000 --- a/generated_api_shadow/envoy/extensions/common/tap/v3/common.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.tap.v3; - -import "envoy/config/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.tap.v3"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common tap extension configuration] - -// Common configuration for all tap extensions. -message CommonExtensionConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.tap.v2alpha.CommonExtensionConfig"; - - oneof config_type { - option (validate.required) = true; - - // If specified, the tap filter will be configured via an admin handler. - AdminConfig admin_config = 1; - - // If specified, the tap filter will be configured via a static configuration that cannot be - // changed. - config.tap.v3.TapConfig static_config = 2; - } -} - -// Configuration for the admin handler. See :ref:`here ` for -// more information. -message AdminConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.common.tap.v2alpha.AdminConfig"; - - // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is - // matched to the configured filter opaque ID to determine which filter to configure. - string config_id = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/brotli.proto b/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/brotli.proto deleted file mode 100644 index cb2933dd5d385..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/brotli/compressor/v3/brotli.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.compression.brotli.compressor.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.compression.brotli.compressor.v3"; -option java_outer_classname = "BrotliProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Brotli Compressor] -// [#extension: envoy.compression.brotli.compressor] - -// [#next-free-field: 7] -message Brotli { - enum EncoderMode { - DEFAULT = 0; - GENERIC = 1; - TEXT = 2; - FONT = 3; - } - - // Value from 0 to 11 that controls the main compression speed-density lever. - // The higher quality, the slower compression. The default value is 3. - google.protobuf.UInt32Value quality = 1 [(validate.rules).uint32 = {lte: 11}]; - - // A value used to tune encoder for specific input. For more information about modes, - // please refer to brotli manual: https://brotli.org/encode.html#aa6f - // This field will be set to "DEFAULT" if not specified. - EncoderMode encoder_mode = 2 [(validate.rules).enum = {defined_only: true}]; - - // Value from 10 to 24 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 18. - // For more details about this parameter, please refer to brotli manual: - // https://brotli.org/encode.html#a9a8 - google.protobuf.UInt32Value window_bits = 3 [(validate.rules).uint32 = {lte: 24 gte: 10}]; - - // Value from 16 to 24 that represents the base two logarithmic of the compressor's input block - // size. Larger input block results in better compression at the expense of memory usage. The - // default is 24. For more details about this parameter, please refer to brotli manual: - // https://brotli.org/encode.html#a9a8 - google.protobuf.UInt32Value input_block_bits = 4 [(validate.rules).uint32 = {lte: 24 gte: 16}]; - - // Value for compressor's next output buffer. If not set, defaults to 4096. - google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; - - // If true, disables "literal context modeling" format feature. - // This flag is a "decoding-speed vs compression ratio" trade-off. - bool disable_literal_context_modeling = 6; -} diff --git a/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/brotli.proto b/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/brotli.proto deleted file mode 100644 index 24511861cf930..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/brotli/decompressor/v3/brotli.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.compression.brotli.decompressor.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.compression.brotli.decompressor.v3"; -option java_outer_classname = "BrotliProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Brotli Decompressor] -// [#extension: envoy.compression.brotli.decompressor] - -message Brotli { - // If true, disables "canny" ring buffer allocation strategy. - // Ring buffer is allocated according to window size, despite the real size of the content. - bool disable_ring_buffer_reallocation = 1; - - // Value for decompressor's next output buffer. If not set, defaults to 4096. - google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto deleted file mode 100644 index 2f37315be355c..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/gzip/compressor/v3/gzip.proto +++ /dev/null @@ -1,78 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.compression.gzip.compressor.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.compressor.v3"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Gzip Compressor] -// [#extension: envoy.compression.gzip.compressor] - -// [#next-free-field: 6] -message Gzip { - // All the values of this enumeration translate directly to zlib's compression strategies. - // For more information about each strategy, please refer to zlib manual. - enum CompressionStrategy { - DEFAULT_STRATEGY = 0; - FILTERED = 1; - HUFFMAN_ONLY = 2; - RLE = 3; - FIXED = 4; - } - - enum CompressionLevel { - option allow_alias = true; - - DEFAULT_COMPRESSION = 0; - BEST_SPEED = 1; - COMPRESSION_LEVEL_1 = 1; - COMPRESSION_LEVEL_2 = 2; - COMPRESSION_LEVEL_3 = 3; - COMPRESSION_LEVEL_4 = 4; - COMPRESSION_LEVEL_5 = 5; - COMPRESSION_LEVEL_6 = 6; - COMPRESSION_LEVEL_7 = 7; - COMPRESSION_LEVEL_8 = 8; - COMPRESSION_LEVEL_9 = 9; - BEST_COMPRESSION = 9; - } - - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; - - // A value used for selecting the zlib compression level. This setting will affect speed and - // amount of compression applied to the content. "BEST_COMPRESSION" provides higher compression - // at the cost of higher latency and is equal to "COMPRESSION_LEVEL_9". "BEST_SPEED" provides - // lower compression with minimum impact on response time, the same as "COMPRESSION_LEVEL_1". - // "DEFAULT_COMPRESSION" provides an optimal result between speed and compression. According - // to zlib's manual this level gives the same result as "COMPRESSION_LEVEL_6". - // This field will be set to "DEFAULT_COMPRESSION" if not specified. - CompressionLevel compression_level = 2 [(validate.rules).enum = {defined_only: true}]; - - // A value used for selecting the zlib compression strategy which is directly related to the - // characteristics of the content. Most of the time "DEFAULT_STRATEGY" will be the best choice, - // which is also the default value for the parameter, though there are situations when - // changing this parameter might produce better results. For example, run-length encoding (RLE) - // is typically used when the content is known for having sequences which same data occurs many - // consecutive times. For more information about each strategy, please refer to zlib manual. - CompressionStrategy compression_strategy = 3 [(validate.rules).enum = {defined_only: true}]; - - // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 12 - // which will produce a 4096 bytes window. For more details about this parameter, please refer to - // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 4 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Value for Zlib's next output buffer. If not set, defaults to 4096. - // See https://www.zlib.net/manual.html for more details. Also see - // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. - google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto b/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto deleted file mode 100644 index 8fb694e883618..0000000000000 --- a/generated_api_shadow/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.compression.gzip.decompressor.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.decompressor.v3"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Gzip Decompressor] -// [#extension: envoy.compression.gzip.decompressor] - -message Gzip { - // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size. - // The decompression window size needs to be equal or larger than the compression window size. - // The default window size is 15. - // This is so that the decompressor can decompress a response compressed by a compressor with any compression window size. - // For more details about this parameter, please refer to `zlib manual `_ > inflateInit2. - google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Value for zlib's decompressor output buffer. If not set, defaults to 4096. - // See https://www.zlib.net/manual.html for more details. - google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/dependency.proto b/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/dependency.proto deleted file mode 100644 index 9dce610afeefb..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/dependency/v3/dependency.proto +++ /dev/null @@ -1,59 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.common.dependency.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.common.dependency.v3"; -option java_outer_classname = "DependencyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Filter dependency specification] - -// Dependency specification and string identifier. -message Dependency { - enum DependencyType { - HEADER = 0; - FILTER_STATE_KEY = 1; - DYNAMIC_METADATA = 2; - } - - // The kind of dependency. - DependencyType type = 1; - - // The string identifier for the dependency. - string name = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Dependency specification for a filter. For a filter chain to be valid, any -// dependency that is required must be provided by an earlier filter. -message FilterDependencies { - // A list of dependencies required on the decode path. - repeated Dependency decode_required = 1; - - // A list of dependencies provided on the encode path. - repeated Dependency decode_provided = 2; - - // A list of dependencies required on the decode path. - repeated Dependency encode_required = 3; - - // A list of dependencies provided on the encode path. - repeated Dependency encode_provided = 4; -} - -// Matching requirements for a filter. For a match tree to be used with a filter, the match -// requirements must be satisfied. -// -// This protobuf is provided by the filter implementation as a way to communicate the matching -// requirements to the filter factories, allowing for config rejection if the requirements are -// not satisfied. -message MatchingRequirements { - message DataInputAllowList { - // An explicit list of data inputs that are allowed to be used with this filter. - repeated string type_url = 1; - } - - DataInputAllowList data_input_allow_list = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/BUILD deleted file mode 100644 index b5020d19d58b5..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/filter/fault/v2:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto deleted file mode 100644 index bcb5bdf9bbf55..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto +++ /dev/null @@ -1,101 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.common.fault.v3; - -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.common.fault.v3"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common fault injection types] - -// Delay specification is used to inject latency into the -// HTTP/Mongo operation. -// [#next-free-field: 6] -message FaultDelay { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultDelay"; - - enum FaultDelayType { - // Unused and deprecated. - FIXED = 0; - } - - // Fault delays are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderDelay { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultDelay.HeaderDelay"; - } - - reserved 2; - - oneof fault_delay_secifier { - option (validate.required) = true; - - // Add a fixed delay before forwarding the operation upstream. See - // https://developers.google.com/protocol-buffers/docs/proto3#json for - // the JSON/YAML Duration mapping. For HTTP/Mongo, the specified - // delay will be injected before a new request/operation. - // This is required if type is FIXED. - google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; - - // Fault delays are controlled via an HTTP header (if applicable). - HeaderDelay header_delay = 5; - } - - // The percentage of operations/connections/requests on which the delay will be injected. - type.v3.FractionalPercent percentage = 4; - - FaultDelayType hidden_envoy_deprecated_type = 1 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// Describes a rate limit to be applied. -message FaultRateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultRateLimit"; - - // Describes a fixed/constant rate limit. - message FixedLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultRateLimit.FixedLimit"; - - // The limit supplied in KiB/s. - uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}]; - } - - // Rate limits are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.fault.v2.FaultRateLimit.HeaderLimit"; - } - - oneof limit_type { - option (validate.required) = true; - - // A fixed rate limit. - FixedLimit fixed_limit = 1; - - // Rate limits are controlled via an HTTP header (if applicable). - HeaderLimit header_limit = 3; - } - - // The percentage of operations/connections/requests on which the rate limit will be injected. - type.v3.FractionalPercent percentage = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/skip_action.proto b/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/skip_action.proto deleted file mode 100644 index 2835c9f6d75a6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/common/matcher/action/v3/skip_action.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.common.matcher.action.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.common.matcher.action.v3"; -option java_outer_classname = "SkipActionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common Match Actions] - -// Configuration for the SkipFilter match action. When matching results in this action, the -// associated filter will be ignored for all filter callbacks (e.g. `encodeHeaders`, `encodeData`, -// etc. for HTTP filters) after the matcher arrives at the match, including the callback that -// caused the match result. For example, when used with a HTTP filter and the match result was -// resolved after receiving the HTTP response headers, the HTTP filter will *not* receive the -// response header callback. -// -// As a result, if this match action is resolved before the first filter callback (e.g. HTTP request -// headers), the filter will be completely skipped. -message SkipFilter { -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD deleted file mode 100644 index ad2fc9a9a84fd..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto deleted file mode 100644 index c524e022e8594..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ /dev/null @@ -1,107 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.adaptive_concurrency.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.adaptive_concurrency.v3"; -option java_outer_classname = "AdaptiveConcurrencyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Adaptive Concurrency] -// Adaptive Concurrency Control :ref:`configuration overview -// `. -// [#extension: envoy.filters.http.adaptive_concurrency] - -// Configuration parameters for the gradient controller. -message GradientControllerConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig"; - - // Parameters controlling the periodic recalculation of the concurrency limit from sampled request - // latencies. - message ConcurrencyLimitCalculationParams { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig." - "ConcurrencyLimitCalculationParams"; - - // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000. - google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}]; - - // The period of time samples are taken to recalculate the concurrency limit. - google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = { - required: true - gt {} - }]; - } - - // Parameters controlling the periodic minRTT recalculation. - // [#next-free-field: 6] - message MinimumRTTCalculationParams { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig." - "MinimumRTTCalculationParams"; - - // The time interval between recalculating the minimum request round-trip time. Has to be - // positive. - google.protobuf.Duration interval = 1 [(validate.rules).duration = { - required: true - gte {nanos: 1000000} - }]; - - // The number of requests to aggregate/sample during the minRTT recalculation window before - // updating. Defaults to 50. - google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}]; - - // Randomized time delta that will be introduced to the start of the minRTT calculation window. - // This is represented as a percentage of the interval duration. Defaults to 15%. - // - // Example: If the interval is 10s and the jitter is 15%, the next window will begin - // somewhere in the range (10s - 11.5s). - type.v3.Percent jitter = 3; - - // The concurrency limit set while measuring the minRTT. Defaults to 3. - google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}]; - - // Amount added to the measured minRTT to add stability to the concurrency limit during natural - // variability in latency. This is expressed as a percentage of the measured value and can be - // adjusted to allow more or less tolerance to the sampled latency values. - // - // Defaults to 25%. - type.v3.Percent buffer = 5; - } - - // The percentile to use when summarizing aggregated samples. Defaults to p50. - type.v3.Percent sample_aggregate_percentile = 1; - - ConcurrencyLimitCalculationParams concurrency_limit_params = 2 - [(validate.rules).message = {required: true}]; - - MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}]; -} - -message AdaptiveConcurrency { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency"; - - oneof concurrency_controller_config { - option (validate.required) = true; - - // Gradient concurrency control will be used. - GradientControllerConfig gradient_controller_config = 1 - [(validate.rules).message = {required: true}]; - } - - // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the - // message is unspecified, the filter will be enabled. - config.core.v3.RuntimeFeatureFlag enabled = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD deleted file mode 100644 index ad2fc9a9a84fd..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto b/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto deleted file mode 100644 index 9bb3603f9ebd6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto +++ /dev/null @@ -1,103 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.admission_control.v3alpha; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/range.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha"; -option java_outer_classname = "AdmissionControlProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Admission Control] -// [#extension: envoy.filters.http.admission_control] - -// [#next-free-field: 8] -message AdmissionControl { - // Default method of specifying what constitutes a successful request. All status codes that - // indicate a successful request must be explicitly specified if not relying on the default - // values. - message SuccessCriteria { - message HttpCriteria { - // Status code ranges that constitute a successful request. Configurable codes are in the - // range [100, 600). - repeated type.v3.Int32Range http_success_status = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - message GrpcCriteria { - // Status codes that constitute a successful request. - // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. - repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful - // responses. - // - // .. note:: - // - // The default HTTP codes considered successful by the admission controller are done so due - // to the unlikelihood that sending fewer requests would change their behavior (for example: - // redirects, unauthorized access, or bad requests won't be alleviated by sending less - // traffic). - HttpCriteria http_criteria = 1; - - // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok, - // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated, - // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented. - // - // .. note:: - // - // The default gRPC codes that are considered successful by the admission controller are - // chosen because of the unlikelihood that sending fewer requests will change the behavior. - GrpcCriteria grpc_criteria = 2; - } - - // If set to false, the admission control filter will operate as a pass-through filter. If the - // message is unspecified, the filter will be enabled. - config.core.v3.RuntimeFeatureFlag enabled = 1; - - // Defines how a request is considered a success/failure. - oneof evaluation_criteria { - option (validate.required) = true; - - SuccessCriteria success_criteria = 2; - } - - // The sliding time window over which the success rate is calculated. The window is rounded to the - // nearest second. Defaults to 30s. - google.protobuf.Duration sampling_window = 3; - - // Rejection probability is defined by the formula:: - // - // max(0, (rq_count - rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression) - // - // The aggression dictates how heavily the admission controller will throttle requests upon SR - // dropping at or below the threshold. A value of 1 will result in a linear increase in - // rejection probability as SR drops. Any values less than 1.0, will be set to 1.0. If the - // message is unspecified, the aggression is 1.0. See `the admission control documentation - // `_ - // for a diagram illustrating this. - config.core.v3.RuntimeDouble aggression = 4; - - // Dictates the success rate at which the rejection probability is non-zero. As success rate drops - // below this threshold, rejection probability will increase. Any success rate above the threshold - // results in a rejection probability of 0. Defaults to 95%. - config.core.v3.RuntimePercent sr_threshold = 5; - - // If the average RPS of the sampling window is below this threshold, the request - // will not be rejected, even if the success rate is lower than sr_threshold. - // Defaults to 0. - config.core.v3.RuntimeUInt32 rps_threshold = 6; - - // The probability of rejection will never exceed this value, even if the failure rate is rising. - // Defaults to 80%. - config.core.v3.RuntimePercent max_rejection_probability = 7; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto b/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto deleted file mode 100644 index e628a6ca73fbb..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.alternate_protocols_cache.v3; - -import "envoy/config/core/v3/protocol.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.alternate_protocols_cache.v3"; -option java_outer_classname = "AlternateProtocolsCacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Alternate Protocols Cache] - -// Configuration for the alternate protocols cache HTTP filter. -// [#extension: envoy.filters.http.alternate_protocols_cache] -// TODO(RyanTheOptimist): Move content from source/docs/http3_upstream.md to -// docs/root/intro/arch_overview/upstream/connection_pooling.rst when unhiding the proto. -message FilterConfig { - // [#not-implemented-hide:] - // If set, causes the use of the alternate protocols cache, which is responsible for - // parsing and caching HTTP Alt-Svc headers. This enables the use of HTTP/3 for upstream - // servers that advertise supporting it. - // TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled via auto_http. - config.core.v3.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto b/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto deleted file mode 100644 index b4b9cc398f2e4..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.aws_lambda.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_lambda.v3"; -option java_outer_classname = "AwsLambdaProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: AWS Lambda] -// AWS Lambda :ref:`configuration overview `. -// [#extension: envoy.filters.http.aws_lambda] - -// AWS Lambda filter config -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.aws_lambda.v2alpha.Config"; - - enum InvocationMode { - // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In - // this mode the output of the Lambda function becomes the response of the HTTP request. - SYNCHRONOUS = 0; - - // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be - // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the - // call which is translated to an HTTP 200 OK by the filter. - ASYNCHRONOUS = 1; - } - - // The ARN of the AWS Lambda to invoke when the filter is engaged - // Must be in the following format: - // arn::lambda:::function: - string arn = 1 [(validate.rules).string = {min_len: 1}]; - - // Whether to transform the request (headers and body) to a JSON payload or pass it as is. - bool payload_passthrough = 2; - - // Determines the way to invoke the Lambda function. - InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; -} - -// Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different -// version of the same Lambda depending on the route. -message PerRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.aws_lambda.v2alpha.PerRouteConfig"; - - Config invoke_config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto b/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto deleted file mode 100644 index ae46400130d52..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.aws_request_signing.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_request_signing.v3"; -option java_outer_classname = "AwsRequestSigningProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: AwsRequestSigning] -// AwsRequestSigning :ref:`configuration overview `. -// [#extension: envoy.filters.http.aws_request_signing] - -// Top level configuration for the AWS request signing filter. -message AwsRequestSigning { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.aws_request_signing.v2alpha.AwsRequestSigning"; - - // The `service namespace - // `_ - // of the HTTP endpoint. - // - // Example: s3 - string service_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The `region `_ hosting the HTTP - // endpoint. - // - // Example: us-west-2 - string region = 2 [(validate.rules).string = {min_len: 1}]; - - // Indicates that before signing headers, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both signing and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for signing whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite = 3; - - // Instead of buffering the request to calculate the payload hash, use the literal string ``UNSIGNED-PAYLOAD`` - // to calculate the payload hash. Not all services support this option. See the `S3 - // `_ policy for details. - bool use_unsigned_payload = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto deleted file mode 100644 index 4cd5f8268b704..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.bandwidth_limit.v3alpha; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.bandwidth_limit.v3alpha"; -option java_outer_classname = "BandwidthLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Bandwidth limit] -// Bandwidth limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.bandwidth_limit] - -// [#next-free-field: 6] -message BandwidthLimit { - // Defines the mode for the bandwidth limit filter. - // Values represent bitmask. - enum EnableMode { - // Filter is disabled. - DISABLED = 0; - - // Filter enabled only for incoming traffic. - REQUEST = 1; - - // Filter enabled only for outgoing traffic. - RESPONSE = 2; - - // Filter enabled for both incoming and outgoing traffic. - REQUEST_AND_RESPONSE = 3; - } - - // The human readable prefix to use when emitting stats. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The enable mode for the bandwidth limit filter. - // Default is Disabled. - EnableMode enable_mode = 2 [(validate.rules).enum = {defined_only: true}]; - - // The limit supplied in KiB/s. - // - // .. note:: - // It's fine for the limit to be unset for the global configuration since the bandwidth limit - // can be applied at a the virtual host or route level. Thus, the limit must be set for the - // per route configuration otherwise the config will be rejected. - // - // .. note:: - // When using per route configuration, the limit becomes unique to that route. - // - google.protobuf.UInt64Value limit_kbps = 3 [(validate.rules).uint64 = {gte: 1}]; - - // Optional Fill interval in milliseconds for the token refills. Defaults to 50ms. - // It must be at least 20ms to avoid too aggressive refills. - google.protobuf.Duration fill_interval = 4 [(validate.rules).duration = { - lte {seconds: 1} - gte {nanos: 20000000} - }]; - - // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults - // to enabled. - config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto b/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto deleted file mode 100644 index 6f73244032c4e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/buffer/v3/buffer.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.buffer.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.buffer.v3"; -option java_outer_classname = "BufferProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Buffer] -// Buffer :ref:`configuration overview `. -// [#extension: envoy.filters.http.buffer] - -message Buffer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.buffer.v2.Buffer"; - - reserved 2; - - // The maximum request size that the filter will buffer before the connection - // manager will stop buffering and return a 413 response. - google.protobuf.UInt32Value max_request_bytes = 1 - [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}]; -} - -message BufferPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.buffer.v2.BufferPerRoute"; - - oneof override { - option (validate.required) = true; - - // Disable the buffer filter for this particular vhost or route. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Override the global configuration of the filter with this new config. - Buffer buffer = 2 [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/BUILD deleted file mode 100644 index c0ffdf28daaf9..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto b/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto deleted file mode 100644 index 5f0a5befa4bb3..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cache/v3alpha/cache.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.cache.v3alpha; - -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha"; -option java_outer_classname = "CacheProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP Cache Filter] - -// [#extension: envoy.filters.http.cache] -message CacheConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.cache.v2alpha.CacheConfig"; - - // [#not-implemented-hide:] - // Modifies cache key creation by restricting which parts of the URL are included. - message KeyCreatorParams { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.cache.v2alpha.CacheConfig.KeyCreatorParams"; - - // If true, exclude the URL scheme from the cache key. Set to true if your origins always - // produce the same response for http and https requests. - bool exclude_scheme = 1; - - // If true, exclude the host from the cache key. Set to true if your origins' responses don't - // ever depend on host. - bool exclude_host = 2; - - // If *query_parameters_included* is nonempty, only query parameters matched - // by one or more of its matchers are included in the cache key. Any other - // query params will not affect cache lookup. - repeated config.route.v3.QueryParameterMatcher query_parameters_included = 3; - - // If *query_parameters_excluded* is nonempty, query parameters matched by one - // or more of its matchers are excluded from the cache key (even if also - // matched by *query_parameters_included*), and will not affect cache lookup. - repeated config.route.v3.QueryParameterMatcher query_parameters_excluded = 4; - } - - // Config specific to the cache storage implementation. - // [#extension-category: envoy.filters.http.cache] - google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; - - // List of matching rules that defines allowed *Vary* headers. - // - // The *vary* response header holds a list of header names that affect the - // contents of a response, as described by - // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. - // - // During insertion, *allowed_vary_headers* acts as a allowlist: if a - // response's *vary* header mentions any header names that aren't matched by any rules in - // *allowed_vary_headers*, that response will not be cached. - // - // During lookup, *allowed_vary_headers* controls what request headers will be - // sent to the cache storage implementation. - repeated type.matcher.v3.StringMatcher allowed_vary_headers = 2; - - // [#not-implemented-hide:] - // - // - // Modifies cache key creation by restricting which parts of the URL are included. - KeyCreatorParams key_creator_params = 3; - - // [#not-implemented-hide:] - // - // - // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache - // storage implementation may have its own limit beyond which it will reject insertions). - uint32 max_body_bytes = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto b/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto deleted file mode 100644 index 5f201026c66b3..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.cdn_loop.v3alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha"; -option java_outer_classname = "CdnLoopProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP CDN-Loop Filter] -// [#extension: envoy.filters.http.cdn_loop] - -// CDN-Loop Header filter config. See the :ref:`configuration overview -// ` for more information. -message CdnLoopConfig { - // The CDN identifier to use for loop checks and to append to the - // CDN-Loop header. - // - // RFC 8586 calls this the cdn-id. The cdn-id can either be a - // pseudonym or hostname the CDN is in control of. - // - // cdn_id must not be empty. - string cdn_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The maximum allowed count of cdn_id in the downstream CDN-Loop - // request header. - // - // The default of 0 means a request can transit the CdnLoopFilter - // once. A value of 1 means that a request can transit the - // CdnLoopFilter twice and so on. - uint32 max_allowed_occurrences = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/composite/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/composite/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/composite/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/composite/v3/composite.proto b/generated_api_shadow/envoy/extensions/filters/http/composite/v3/composite.proto deleted file mode 100644 index f8a3bd83af567..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/composite/v3/composite.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.composite.v3; - -import "envoy/config/core/v3/extension.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.composite.v3"; -option java_outer_classname = "CompositeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Composite] -// Composite Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.composite] - -// :ref:`Composite filter ` config. The composite filter config -// allows delegating filter handling to another filter as determined by matching on the request -// headers. This makes it possible to use different filters or filter configurations based on the -// incoming request. -// -// This is intended to be used with -// :ref:`ExtensionWithMatcher ` -// where a match tree is specified that indicates (via -// :ref:`ExecuteFilterAction `) -// which filter configuration to create and delegate to. -// -// [#alpha:] -message Composite { -} - -// Composite match action (see :ref:`matching docs ` for more info on match actions). -// This specifies the filter configuration of the filter that the composite filter should delegate filter interactions to. -message ExecuteFilterAction { - config.core.v3.TypedExtensionConfig typed_config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD deleted file mode 100644 index a1775bbe6f513..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto deleted file mode 100644 index 72b435c93ddaa..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ /dev/null @@ -1,125 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.compressor.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v3"; -option java_outer_classname = "CompressorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Compressor] -// Compressor :ref:`configuration overview `. -// [#extension: envoy.filters.http.compressor] - -// [#next-free-field: 9] -message Compressor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.compressor.v2.Compressor"; - - message CommonDirectionConfig { - // Runtime flag that controls whether compression is enabled or not for the direction this - // common config is put in. If set to false, the filter will operate as a pass-through filter - // in the chosen direction. If the field is omitted, the filter will be enabled. - config.core.v3.RuntimeFeatureFlag enabled = 1; - - // Minimum value of Content-Length header of request or response messages (depending on the direction - // this common config is put in), in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value min_content_length = 2; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" - // and their synonyms. - repeated string content_type = 3; - } - - // Configuration for filter behavior on the request direction. - message RequestDirectionConfig { - CommonDirectionConfig common_config = 1; - } - - // Configuration for filter behavior on the response direction. - message ResponseDirectionConfig { - CommonDirectionConfig common_config = 1; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - bool disable_on_etag_header = 2; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // - // .. attention:: - // - // To avoid interfering with other compression filters in the same chain use this option in - // the filter closest to the upstream. - bool remove_accept_encoding_header = 3; - } - - // Minimum response length, in bytes, which will trigger compression. The default value is 30. - google.protobuf.UInt32Value content_length = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Set of strings that allows specifying which mime-types yield compression; e.g., - // application/json, text/html, etc. When this field is not defined, compression will be applied - // to the following mime-types: "application/javascript", "application/json", - // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" - // and their synonyms. - repeated string content_type = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If true, disables compression when the response contains an etag header. When it is false, the - // filter will preserve weak etags and remove the ones that require strong validation. - bool disable_on_etag_header = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // If true, removes accept-encoding from the request headers before dispatching it to the upstream - // so that responses do not get compressed before reaching the filter. - // - // .. attention:: - // - // To avoid interfering with other compression filters in the same chain use this option in - // the filter closest to the upstream. - bool remove_accept_encoding_header = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Runtime flag that controls whether the filter is enabled or not. If set to false, the - // filter will operate as a pass-through filter. If not specified, defaults to enabled. - config.core.v3.RuntimeFeatureFlag runtime_enabled = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // A compressor library to use for compression. Currently only - // :ref:`envoy.compression.gzip.compressor` - // is included in Envoy. - // [#extension-category: envoy.compression.compressor] - config.core.v3.TypedExtensionConfig compressor_library = 6 - [(validate.rules).message = {required: true}]; - - // Configuration for request compression. Compression is disabled by default if left empty. - RequestDirectionConfig request_direction_config = 7; - - // Configuration for response compression. Compression is enabled by default if left empty. - // - // .. attention:: - // - // If the field is not empty then the duplicate deprecated fields of the `Compressor` message, - // such as `content_length`, `content_type`, `disable_on_etag_header`, - // `remove_accept_encoding_header` and `runtime_enabled`, are ignored. - // - // Also all the statistics related to response compression will be rooted in - // `.compressor...response.*` - // instead of - // `.compressor...*`. - ResponseDirectionConfig response_direction_config = 8; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/cors/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto b/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto deleted file mode 100644 index 0269e1bdfd8c7..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/cors/v3/cors.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.cors.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.cors.v3"; -option java_outer_classname = "CorsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Cors] -// CORS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.cors] - -// Cors filter config. -message Cors { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.cors.v2.Cors"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/BUILD deleted file mode 100644 index 3f3a5395d2aa7..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto b/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto deleted file mode 100644 index 39b0455bd7981..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/csrf/v3/csrf.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.csrf.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v3"; -option java_outer_classname = "CsrfProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: CSRF] -// Cross-Site Request Forgery :ref:`configuration overview `. -// [#extension: envoy.filters.http.csrf] - -// CSRF filter config. -message CsrfPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.csrf.v2.CsrfPolicy"; - - // Specifies the % of requests for which the CSRF filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - config.core.v3.RuntimeFractionalPercent filter_enabled = 1 - [(validate.rules).message = {required: true}]; - - // Specifies that CSRF policies will be evaluated and tracked, but not enforced. - // - // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* and *Destination* to determine if it's valid, but will not - // enforce any policies. - config.core.v3.RuntimeFractionalPercent shadow_enabled = 2; - - // Specifies additional source origins that will be allowed in addition to - // the destination origin. - // - // More information on how this can be configured via runtime can be found - // :ref:`here `. - repeated type.matcher.v3.StringMatcher additional_origins = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto b/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto deleted file mode 100644 index c4cca44020f6d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/decompressor/v3/decompressor.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.decompressor.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.decompressor.v3"; -option java_outer_classname = "DecompressorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Decompressor] -// [#extension: envoy.filters.http.decompressor] - -message Decompressor { - // Common configuration for filter behavior on both the request and response direction. - message CommonDirectionConfig { - // Runtime flag that controls whether the filter is enabled for decompression or not. If set to false, the - // filter will operate as a pass-through filter. If the message is unspecified, the filter will be enabled. - config.core.v3.RuntimeFeatureFlag enabled = 1; - } - - // Configuration for filter behavior on the request direction. - message RequestDirectionConfig { - CommonDirectionConfig common_config = 1; - - // If set to true, and response decompression is enabled, the filter modifies the Accept-Encoding - // request header by appending the decompressor_library's encoding. Defaults to true. - google.protobuf.BoolValue advertise_accept_encoding = 2; - } - - // Configuration for filter behavior on the response direction. - message ResponseDirectionConfig { - CommonDirectionConfig common_config = 1; - } - - // A decompressor library to use for both request and response decompression. Currently only - // :ref:`envoy.compression.gzip.compressor` - // is included in Envoy. - // [#extension-category: envoy.compression.decompressor] - config.core.v3.TypedExtensionConfig decompressor_library = 1 - [(validate.rules).message = {required: true}]; - - // Configuration for request decompression. Decompression is enabled by default if left empty. - RequestDirectionConfig request_direction_config = 2; - - // Configuration for response decompression. Decompression is enabled by default if left empty. - ResponseDirectionConfig response_direction_config = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD deleted file mode 100644 index 05f25a2fe5d91..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto b/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto deleted file mode 100644 index a5d7223b98d28..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.dynamic_forward_proxy.v3; - -import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamic_forward_proxy.v3"; -option java_outer_classname = "DynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamic forward proxy] - -// Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview -// ` for more information. -// [#extension: envoy.filters.http.dynamic_forward_proxy] -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig"; - - // The DNS cache configuration that the filter will attach to. Note this configuration must - // match that of associated :ref:`dynamic forward proxy cluster configuration - // `. - common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; -} - -// Per route Configuration for the dynamic forward proxy HTTP filter. -message PerRouteConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.dynamic_forward_proxy.v2alpha.PerRouteConfig"; - - oneof host_rewrite_specifier { - // Indicates that before DNS lookup, the host header will be swapped with - // this value. If not set or empty, the original host header value - // will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite ` given that the - // value set here would be used for DNS lookups whereas the value set in the HCM would be used - // for host header forwarding which is not the desired outcome. - string host_rewrite_literal = 1; - - // Indicates that before DNS lookup, the host header will be swapped with - // the value of this header. If not set or empty, the original host header - // value will be used and no rewrite will happen. - // - // Note: this rewrite affects both DNS lookup and host header forwarding. However, this - // option shouldn't be used with - // :ref:`HCM host rewrite header ` - // given that the value set here would be used for DNS lookups whereas the value set in the HCM - // would be used for host header forwarding which is not the desired outcome. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string host_rewrite_header = 2; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto b/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto deleted file mode 100644 index 13a4f1c6ceee0..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/dynamo/v3/dynamo.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.dynamo.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamo.v3"; -option java_outer_classname = "DynamoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamo] -// Dynamo :ref:`configuration overview `. -// [#extension: envoy.filters.http.dynamo] - -// Dynamo filter config. -message Dynamo { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.dynamo.v2.Dynamo"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD deleted file mode 100644 index bc2a58d2a7f1c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto deleted file mode 100644 index 62feb51b191d5..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ /dev/null @@ -1,317 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ext_authz.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/config/core/v3/http_uri.proto"; -import "envoy/type/matcher/v3/metadata.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/v3/http_status.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v3"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: External Authorization] -// External Authorization :ref:`configuration overview `. -// [#extension: envoy.filters.http.ext_authz] - -// [#next-free-field: 16] -message ExtAuthz { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; - - reserved 4; - - reserved "use_alpha"; - - // External authorization service configuration. - oneof services { - // gRPC service configuration (default timeout: 200ms). - config.core.v3.GrpcService grpc_service = 1; - - // HTTP service configuration (default timeout: 200ms). - HttpService http_service = 3; - } - - // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and - // version of messages used on the wire. - config.core.v3.ApiVersion transport_api_version = 12 - [(validate.rules).enum = {defined_only: true}]; - - // Changes filter's behaviour on errors: - // - // 1. When set to true, the filter will *accept* client request even if the communication with - // the authorization service has failed, or if the authorization service has returned a HTTP 5xx - // error. - // - // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* - // response if the communication with the authorization service has failed, or if the - // authorization service has returned a HTTP 5xx error. - // - // Note that errors can be *always* tracked in the :ref:`stats - // `. - bool failure_mode_allow = 2; - - // Enables filter to buffer the client request body and send it within the authorization request. - // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization - // request message indicating if the body data is partial. - BufferSettings with_request_body = 5; - - // Clears route cache in order to allow the external authorization service to correctly affect - // routing decisions. Filter clears all cached routes when: - // - // 1. The field is set to *true*. - // - // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. - // - // 3. At least one *authorization response header* is added to the client request, or is used for - // altering another client request header. - // - bool clear_route_cache = 6; - - // Sets the HTTP status that is returned to the client when there is a network error between the - // filter and the authorization server. The default status is HTTP 403 Forbidden. - type.v3.HttpStatus status_on_error = 7; - - // Specifies a list of metadata namespaces whose values, if present, will be passed to the - // ext_authz service as an opaque *protobuf::Struct*. - // - // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata - // ` is set, - // then the following will pass the jwt payload to the authorization server. - // - // .. code-block:: yaml - // - // metadata_context_namespaces: - // - envoy.filters.http.jwt_authn - // - repeated string metadata_context_namespaces = 8; - - // Specifies if the filter is enabled. - // - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to get the percentage of requests to filter. - // - // If this field is not specified, the filter will be enabled for all requests. - config.core.v3.RuntimeFractionalPercent filter_enabled = 9; - - // Specifies if the filter is enabled with metadata matcher. - // If this field is not specified, the filter will be enabled for all requests. - type.matcher.v3.MetadataMatcher filter_enabled_metadata = 14; - - // Specifies whether to deny the requests, when the filter is disabled. - // If :ref:`runtime_key ` is specified, - // Envoy will lookup the runtime key to determine whether to deny request for - // filter protected path at filter disabling. If filter is disabled in - // typed_per_filter_config for the path, requests will not be denied. - // - // If this field is not specified, all requests will be allowed when disabled. - config.core.v3.RuntimeFeatureFlag deny_at_disable = 11; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 10; - - // Optional additional prefix to use when emitting statistics. This allows to distinguish - // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: - // - // .. code-block:: yaml - // - // http_filters: - // - name: envoy.filters.http.ext_authz - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. - // - name: envoy.filters.http.ext_authz - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz - // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. - // - string stat_prefix = 13; - - // Optional labels that will be passed to :ref:`labels` in - // :ref:`destination`. - // The labels will be read from :ref:`metadata` with the specified key. - string bootstrap_metadata_labels_key = 15; -} - -// Configuration for buffering the request data. -message BufferSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.BufferSettings"; - - // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return - // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number - // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow - // `. - uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; - - // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. - // The authorization request will be dispatched and no 413 HTTP error will be returned by the - // filter. - bool allow_partial_message = 2; - - // If true, the body sent to the external authorization service is set with raw bytes, it sets - // the :ref:`raw_body` - // field of HTTP request attribute context. Otherwise, :ref:` - // body` will be filled - // with UTF-8 string request body. - bool pack_as_bytes = 3; -} - -// HttpService is used for raw HTTP communication between the filter and the authorization service. -// When configured, the filter will parse the client request and use these attributes to call the -// authorization server. Depending on the response, the filter may reject or accept the client -// request. Note that in any of these events, metadata can be added, removed or overridden by the -// filter: -// -// *On authorization request*, a list of allowed request headers may be supplied. See -// :ref:`allowed_headers -// ` -// for details. Additional headers metadata may be added to the authorization request. See -// :ref:`headers_to_add -// ` for -// details. -// -// On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and -// additional headers metadata may be added to the original client request. See -// :ref:`allowed_upstream_headers -// ` -// for details. Additionally, the filter may add additional headers to the client's response. See -// :ref:`allowed_client_headers_on_success -// ` -// for details. -// -// On other authorization response statuses, the filter will not allow traffic. Additional headers -// metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers -// ` -// for details. -// [#next-free-field: 9] -message HttpService { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.HttpService"; - - reserved 3, 4, 5, 6; - - // Sets the HTTP server URI which the authorization requests must be sent to. - config.core.v3.HttpUri server_uri = 1; - - // Sets a prefix to the value of authorization request header *Path*. - string path_prefix = 2; - - // Settings used for controlling authorization request metadata. - AuthorizationRequest authorization_request = 7; - - // Settings used for controlling authorization response metadata. - AuthorizationResponse authorization_response = 8; -} - -message AuthorizationRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.AuthorizationRequest"; - - // Authorization request includes the client request headers that have a correspondent match - // in the :ref:`list `. - // - // .. note:: - // - // In addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, - // ``Content-Length``, and ``Authorization`` are **automatically included** to the list. - // - // .. note:: - // - // By default, ``Content-Length`` header is set to ``0`` and the request to the authorization - // service has no message body. However, the authorization request *may* include the buffered - // client request body (controlled by :ref:`with_request_body - // ` - // setting) hence the value of its ``Content-Length`` reflects the size of its payload size. - // - type.matcher.v3.ListStringMatcher allowed_headers = 1; - - // Sets a list of headers that will be included to the request to authorization service. Note that - // client request of the same key will be overridden. - repeated config.core.v3.HeaderValue headers_to_add = 2; -} - -message AuthorizationResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.AuthorizationResponse"; - - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the original client request. - // Note that coexistent headers will be overridden. - type.matcher.v3.ListStringMatcher allowed_upstream_headers = 1; - - // When this :ref:`list ` is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that coexistent headers will be appended. - type.matcher.v3.ListStringMatcher allowed_upstream_headers_to_append = 3; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response. Note - // that when this list is *not* set, all the authorization response headers, except *Authority - // (Host)* will be in the response to the client. When a header is included in this list, *Path*, - // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. - type.matcher.v3.ListStringMatcher allowed_client_headers = 2; - - // When this :ref:`list `. is set, authorization - // response headers that have a correspondent match will be added to the client's response when - // the authorization response itself is successful, i.e. not failed or denied. When this list is - // *not* set, no additional headers will be added to the client's response on success. - type.matcher.v3.ListStringMatcher allowed_client_headers_on_success = 4; -} - -// Extra settings on a per virtualhost/route/weighted-cluster level. -message ExtAuthzPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.ExtAuthzPerRoute"; - - oneof override { - option (validate.required) = true; - - // Disable the ext auth filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Check request settings for this route. - CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; - } -} - -// Extra settings for the check request. -message CheckSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ext_authz.v2.CheckSettings"; - - // Context extensions to set on the CheckRequest's - // :ref:`AttributeContext.context_extensions` - // - // You can use this to provide extra context for the external authorization server on specific - // virtual hosts/routes. For example, adding a context extension on the virtual host level can - // give the ext-authz server information on what virtual host is used without needing to parse the - // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged - // in order, and the result will be used. - // - // Merge semantics for this field are such that keys from more specific configs override. - // - // .. note:: - // - // These settings are only applied to a filter configured with a - // :ref:`grpc_service`. - map context_extensions = 1 [(udpa.annotations.sensitive) = true]; - - // When set to true, disable the configured :ref:`with_request_body - // ` for a route. - bool disable_request_body_buffering = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto deleted file mode 100644 index 37560feba3c27..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto +++ /dev/null @@ -1,186 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ext_proc.v3alpha; - -import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha"; -option java_outer_classname = "ExtProcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: External Processing Filter] -// External Processing Filter -// [#extension: envoy.filters.http.ext_proc] - -// The External Processing filter allows an external service to act on HTTP traffic in a flexible way. - -// **Current Implementation Status:** -// All options and processing modes are implemented except for the following: -// -// * Request and response attributes are not sent and not processed. -// * Dynamic metadata in responses from the external processor is ignored. -// * "async mode" is not implemented -// * Per-route configuration is not implemented - -// The filter communicates with an external gRPC service called an "external processor" -// that can do a variety of things with the request and response: -// -// * Access and modify the HTTP headers on the request, response, or both -// * Access and modify the HTTP request and response bodies -// * Access and modify the dynamic stream metadata -// * Immediately send an HTTP response downstream and terminate other processing -// -// The filter communicates with the server using a gRPC bidirectional stream. After the initial -// request, the external server is in control over what additional data is sent to it -// and how it should be processed. -// -// By implementing the protocol specified by the stream, the external server can choose: -// -// * Whether it receives the response message at all -// * Whether it receives the message body at all, in separate chunks, or as a single buffer -// * Whether subsequent HTTP requests are transmitted synchronously or whether they are -// sent asynchronously. -// * To modify request or response trailers if they already exist -// * To add request or response trailers where they are not present -// -// The filter supports up to six different processing steps. Each is represented by -// a gRPC stream message that is sent to the external processor. For each message, the -// processor must send a matching response. -// -// * Request headers: Contains the headers from the original HTTP request. -// * Request body: Sent in a single message if the BUFFERED or BUFFERED_PARTIAL -// mode is chosen, in multiple messages if the STREAMED mode is chosen, and not -// at all otherwise. -// * Request trailers: Delivered if they are present and if the trailer mode is set -// to SEND. -// * Response headers: Contains the headers from the HTTP response. Keep in mind -// that if the upstream system sends them before processing the request body that -// this message may arrive before the complete body. -// * Response body: Sent according to the processing mode like the request body. -// * Response trailers: Delivered according to the processing mode like the -// request trailers. -// -// By default, the processor sends only the request and response headers messages. -// This may be changed to include any of the six steps by changing the processing_mode -// setting of the filter configuration, or by setting the mode_override of any response -// from the external processor. This way, a processor may, for example, use information -// in the request header to determine whether the message body must be examined, or whether -// the proxy should simply stream it straight through. -// -// All of this together allows a server to process the filter traffic in fairly -// sophisticated ways. For example: -// -// * A server may choose to examine all or part of the HTTP message bodies depending -// on the content of the headers. -// * A server may choose to immediately reject some messages based on their HTTP -// headers (or other dynamic metadata) and more carefully examine others -// * A server may asynchronously monitor traffic coming through the filter by inspecting -// headers, bodies, or both, and then decide to switch to a synchronous processing -// mode, either permanently or temporarily. -// -// The protocol itself is based on a bidirectional gRPC stream. Envoy will send the -// server -// :ref:`ProcessingRequest ` -// messages, and the server must reply with -// :ref:`ProcessingResponse `. - -// [#next-free-field: 9] -message ExternalProcessor { - // Configuration for the gRPC service that the filter will communicate with. - // The filter supports both the "Envoy" and "Google" gRPC clients. - config.core.v3.GrpcService grpc_service = 1; - - // By default, if the gRPC stream cannot be established, or if it is closed - // prematurely with an error, the filter will fail. Specifically, if the - // response headers have not yet been delivered, then it will return a 500 - // error downstream. If they have been delivered, then instead the HTTP stream to the - // downstream client will be reset. - // With this parameter set to true, however, then if the gRPC stream is prematurely closed - // or could not be opened, processing continues without error. - bool failure_mode_allow = 2; - - // Specifies default options for how HTTP headers, trailers, and bodies are - // sent. See ProcessingMode for details. - ProcessingMode processing_mode = 3; - - // [#not-implemented-hide:] - // If true, send each part of the HTTP request or response specified by ProcessingMode - // asynchronously -- in other words, send the message on the gRPC stream and then continue - // filter processing. If false, which is the default, suspend filter execution after - // each message is sent to the remote service and wait up to "message_timeout" - // for a reply. - bool async_mode = 4; - - // [#not-implemented-hide:] - // Envoy provides a number of :ref:`attributes ` - // for expressive policies. Each attribute name provided in this field will be - // matched against that list and populated in the request_headers message. - // See the :ref:`attribute documentation ` - // for the list of supported attributes and their types. - repeated string request_attributes = 5; - - // [#not-implemented-hide:] - // Envoy provides a number of :ref:`attributes ` - // for expressive policies. Each attribute name provided in this field will be - // matched against that list and populated in the response_headers message. - // See the :ref:`attribute documentation ` - // for the list of supported attributes and their types. - repeated string response_attributes = 6; - - // Specifies the timeout for each individual message sent on the stream and - // when the filter is running in synchronous mode. Whenever - // the proxy sends a message on the stream that requires a response, it will - // reset this timer, and will stop processing and return an error (subject - // to the processing mode) if the timer expires before a matching response. - // is received. There is no timeout when the filter is running in asynchronous - // mode. Default is 200 milliseconds. - google.protobuf.Duration message_timeout = 7; - - // [#not-implemented-hide:] - // Optional additional prefix to use when emitting statistics. This allows to distinguish - // emitted statistics between configured *ext_proc* filters in an HTTP filter chain. - string stat_prefix = 8; -} - -// Extra settings that may be added to per-route configuration for a -// virtual host or cluster. -message ExtProcPerRoute { - oneof override { - option (validate.required) = true; - - // Disable the filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Override aspects of the configuration for this route. A set of - // overrides in a more specific configuration will override a "disabled" - // flag set in a less-specific one. - ExtProcOverrides overrides = 2; - } -} - -// Overrides that may be set on a per-route basis -message ExtProcOverrides { - // Set a different processing mode for this route than the default. - ProcessingMode processing_mode = 1; - - // [#not-implemented-hide:] - // Set a different asynchronous processing option than the default. - bool async_mode = 2; - - // [#not-implemented-hide:] - // Set different optional properties than the default. - repeated string request_properties = 3; - - // [#not-implemented-hide:] - // Set different optional properties than the default. - repeated string response_properties = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto deleted file mode 100644 index d085790d34ab1..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ext_proc.v3alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_proc.v3alpha"; -option java_outer_classname = "ProcessingModeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: External Processing Filter] -// External Processing Filter Processing Mode -// [#extension: envoy.filters.http.ext_proc] - -// This configuration describes which parts of an HTTP request and -// response are sent to a remote server and how they are delivered. - -// [#next-free-field: 7] -message ProcessingMode { - // Control how headers and trailers are handled - enum HeaderSendMode { - // The default HeaderSendMode depends on which part of the message is being - // processed. By default, request and response headers are sent, - // while trailers are skipped. - DEFAULT = 0; - - // Send the header or trailer. - SEND = 1; - - // Do not send the header or trailer. - SKIP = 2; - } - - // Control how the request and response bodies are handled - enum BodySendMode { - // Do not send the body at all. This is the default. - NONE = 0; - - // Stream the body to the server in pieces as they arrive at the - // proxy. - STREAMED = 1; - - // Buffer the message body in memory and send the entire body at once. - // If the body exceeds the configured buffer limit, then the - // downstream system will receive an error. - BUFFERED = 2; - - // Buffer the message body in memory and send the entire body in one - // chunk. If the body exceeds the configured buffer limit, then the body contents - // up to the buffer limit will be sent. - BUFFERED_PARTIAL = 3; - } - - // How to handle the request header. Default is "SEND". - HeaderSendMode request_header_mode = 1 [(validate.rules).enum = {defined_only: true}]; - - // How to handle the response header. Default is "SEND". - HeaderSendMode response_header_mode = 2 [(validate.rules).enum = {defined_only: true}]; - - // How to handle the request body. Default is "NONE". - BodySendMode request_body_mode = 3 [(validate.rules).enum = {defined_only: true}]; - - // How do handle the response body. Default is "NONE". - BodySendMode response_body_mode = 4 [(validate.rules).enum = {defined_only: true}]; - - // How to handle the request trailers. Default is "SKIP". - HeaderSendMode request_trailer_mode = 5 [(validate.rules).enum = {defined_only: true}]; - - // How to handle the response trailers. Default is "SKIP". - HeaderSendMode response_trailer_mode = 6 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/BUILD deleted file mode 100644 index 53db91cad82c3..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v3:pkg", - "//envoy/extensions/filters/common/fault/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto deleted file mode 100644 index 0c7fbb4480cfe..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/fault/v3/fault.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.fault.v3; - -import "envoy/config/route/v3/route_components.proto"; -import "envoy/extensions/filters/common/fault/v3/fault.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v3"; -option java_outer_classname = "FaultProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Fault Injection] -// Fault Injection :ref:`configuration overview `. -// [#extension: envoy.filters.http.fault] - -// [#next-free-field: 6] -message FaultAbort { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.fault.v2.FaultAbort"; - - // Fault aborts are controlled via an HTTP header (if applicable). See the - // :ref:`HTTP fault filter ` documentation for - // more information. - message HeaderAbort { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.fault.v2.FaultAbort.HeaderAbort"; - } - - reserved 1; - - oneof error_type { - option (validate.required) = true; - - // HTTP status code to use to abort the HTTP request. - uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // gRPC status code to use to abort the gRPC request. - uint32 grpc_status = 5; - - // Fault aborts are controlled via an HTTP header (if applicable). - HeaderAbort header_abort = 4; - } - - // The percentage of requests/operations/connections that will be aborted with the error code - // provided. - type.v3.FractionalPercent percentage = 3; -} - -// [#next-free-field: 16] -message HTTPFault { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.fault.v2.HTTPFault"; - - // If specified, the filter will inject delays based on the values in the - // object. - common.fault.v3.FaultDelay delay = 1; - - // If specified, the filter will abort requests based on the values in - // the object. At least *abort* or *delay* must be specified. - FaultAbort abort = 2; - - // Specifies the name of the (destination) upstream cluster that the - // filter should match on. Fault injection will be restricted to requests - // bound to the specific upstream cluster. - string upstream_cluster = 3; - - // Specifies a set of headers that the filter should match on. The fault - // injection filter can be applied selectively to requests that match a set of - // headers specified in the fault filter config. The chances of actual fault - // injection further depend on the value of the :ref:`percentage - // ` field. - // The filter will check the request's headers against all the specified - // headers in the filter config. A match will happen if all the headers in the - // config are present in the request with the same values (or based on - // presence if the *value* field is not in the config). - repeated config.route.v3.HeaderMatcher headers = 4; - - // Faults are injected for the specified list of downstream hosts. If this - // setting is not set, faults are injected for all downstream nodes. - // Downstream node name is taken from :ref:`the HTTP - // x-envoy-downstream-service-node - // ` header and compared - // against downstream_nodes list. - repeated string downstream_nodes = 5; - - // The maximum number of faults that can be active at a single time via the configured fault - // filter. Note that because this setting can be overridden at the route level, it's possible - // for the number of active faults to be greater than this value (if injected via a different - // route). If not specified, defaults to unlimited. This setting can be overridden via - // `runtime ` and any faults that are not injected - // due to overflow will be indicated via the `faults_overflow - // ` stat. - // - // .. attention:: - // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy - // limit. It's possible for the number of active faults to rise slightly above the configured - // amount due to the implementation details. - google.protobuf.UInt32Value max_active_faults = 6; - - // The response rate limit to be applied to the response body of the stream. When configured, - // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent - // ` runtime key. - // - // .. attention:: - // This is a per-stream limit versus a connection level limit. This means that concurrent streams - // will each get an independent limit. - common.fault.v3.FaultRateLimit response_rate_limit = 7; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_delay_percent - string delay_percent_runtime = 8; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.abort_percent - string abort_percent_runtime = 9; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.delay.fixed_duration_ms - string delay_duration_runtime = 10; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.http_status - string abort_http_status_runtime = 11; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.max_active_faults - string max_active_faults_runtime = 12; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.rate_limit.response_percent - string response_rate_limit_percent_runtime = 13; - - // The runtime key to override the :ref:`default ` - // runtime. The default is: fault.http.abort.grpc_status - string abort_grpc_status_runtime = 14; - - // To control whether stats storage is allocated dynamically for each downstream server. - // If set to true, "x-envoy-downstream-service-cluster" field of header will be ignored by this filter. - // If set to false, dynamic stats storage will be allocated for the downstream cluster name. - // Default value is false. - bool disable_downstream_cluster_stats = 15; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto deleted file mode 100644 index 7e31da49e92ba..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_http1_bridge.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_bridge.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC HTTP/1.1 Bridge] -// gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_http1_bridge] - -// gRPC HTTP/1.1 Bridge filter config. -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_http1_bridge.v2.Config"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto deleted file mode 100644 index 615fea923a8e1..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto +++ /dev/null @@ -1,60 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] -// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview -// `. -// [#extension: envoy.filters.http.grpc_http1_reverse_bridge] - -// gRPC reverse bridge filter configuration -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfig"; - - // The content-type to pass to the upstream when the gRPC bridge filter is applied. - // The filter will also validate that the upstream responds with the same content type. - string content_type = 1 [(validate.rules).string = {min_len: 1}]; - - // If true, Envoy will assume that the upstream doesn't understand gRPC frames and - // strip the gRPC frame from the request, and add it back in to the response. This will - // hide the gRPC semantics from the upstream, allowing it to receive and respond with a - // simple binary encoded protobuf. In order to calculate the `Content-Length` header value, Envoy - // will buffer the upstream response unless :ref:`response_size_header - // ` - // is set, in which case Envoy will use the value of an upstream header to calculate the content - // length. - bool withhold_grpc_frames = 2; - - // When :ref:`withhold_grpc_frames - // ` - // is true, this option controls how Envoy calculates the `Content-Length`. When - // *response_size_header* is empty, Envoy will buffer the upstream response to calculate its - // size. When *response_size_header* is set to a non-empty string, Envoy will stream the response - // to the downstream and it will use the value of the response header with this name to set the - // `Content-Length` header and gRPC frame size. If the header with this name is repeated, only - // the first value will be used. - // - // Envoy will treat the upstream response as an error if this option is specified and the header - // is missing or if the value does not match the actual response body size. - string response_size_header = 3; -} - -// gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level. -message FilterConfigPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfigPerRoute"; - - // If true, disables gRPC reverse bridge filter for this particular vhost or route. - // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - bool disabled = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto deleted file mode 100644 index a4feeff31f158..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto +++ /dev/null @@ -1,235 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_json_transcoder.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_json_transcoder.v3"; -option java_outer_classname = "TranscoderProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC-JSON transcoder] -// gRPC-JSON transcoder :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_json_transcoder] - -// [#next-free-field: 12] -// GrpcJsonTranscoder filter configuration. -// The filter itself can be used per route / per virtual host or on the general level. The most -// specific one is being used for a given route. If the list of services is empty - filter -// is considered to be disabled. -// Note that if specifying the filter per route, first the route is matched, and then transcoding -// filter is applied. It matters when specifying the route configuration and paths to match the -// request - for per-route grpc transcoder configs, the original path should be matched, while -// in other cases, the grpc-like path is expected (the one AFTER the filter is applied). -message GrpcJsonTranscoder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder"; - - enum UrlUnescapeSpec { - // URL path parameters will not decode RFC 6570 reserved characters. - // For example, segment `%2f%23/%20%2523` is unescaped to `%2f%23/ %23`. - ALL_CHARACTERS_EXCEPT_RESERVED = 0; - - // URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // For example, segment `%2f%23/%20%2523` is unescaped to `%2f#/ %23`. - ALL_CHARACTERS_EXCEPT_SLASH = 1; - - // URL path parameters will be fully URI-decoded. - // For example, segment `%2f%23/%20%2523` is unescaped to `/#/ %23`. - ALL_CHARACTERS = 2; - } - - message PrintOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder.PrintOptions"; - - // Whether to add spaces, line breaks and indentation to make the JSON - // output easy to read. Defaults to false. - bool add_whitespace = 1; - - // Whether to always print primitive fields. By default primitive - // fields with default values will be omitted in JSON output. For - // example, an int32 field set to 0 will be omitted. Setting this flag to - // true will override the default behavior and print primitive fields - // regardless of their values. Defaults to false. - bool always_print_primitive_fields = 2; - - // Whether to always print enums as ints. By default they are rendered - // as strings. Defaults to false. - bool always_print_enums_as_ints = 3; - - // Whether to preserve proto field names. By default protobuf will - // generate JSON field names using the ``json_name`` option, or lower camel case, - // in that order. Setting this flag will preserve the original field names. Defaults to false. - bool preserve_proto_field_names = 4; - } - - message RequestValidationOptions { - // By default, a request that cannot be mapped to any specified gRPC - // :ref:`services ` - // will pass-through this filter. - // When set to true, the request will be rejected with a ``HTTP 404 Not Found``. - bool reject_unknown_method = 1; - - // By default, a request with query parameters that cannot be mapped to the gRPC request message - // will pass-through this filter. - // When set to true, the request will be rejected with a ``HTTP 400 Bad Request``. - // - // The fields - // :ref:`ignore_unknown_query_parameters ` - // and - // :ref:`ignored_query_parameters ` - // have priority over this strict validation behavior. - bool reject_unknown_query_parameters = 2; - } - - oneof descriptor_set { - option (validate.required) = true; - - // Supplies the filename of - // :ref:`the proto descriptor set ` for the gRPC - // services. - string proto_descriptor = 1; - - // Supplies the binary content of - // :ref:`the proto descriptor set ` for the gRPC - // services. - bytes proto_descriptor_bin = 4; - } - - // A list of strings that - // supplies the fully qualified service names (i.e. "package_name.service_name") that - // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, - // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than - // the service names specified here, but they won't be translated. - // - // By default, the filter will pass through requests that do not map to any specified services. - // If the list of services is empty, filter is considered disabled. - // However, this behavior changes if - // :ref:`reject_unknown_method ` - // is enabled. - repeated string services = 2; - - // Control options for response JSON. These options are passed directly to - // `JsonPrintOptions `_. - PrintOptions print_options = 3; - - // Whether to keep the incoming request route after the outgoing headers have been transformed to - // the match the upstream gRPC service. Note: This means that routes for gRPC services that are - // not transcoded cannot be used in combination with *match_incoming_request_route*. - bool match_incoming_request_route = 5; - - // A list of query parameters to be ignored for transcoding method mapping. - // By default, the transcoder filter will not transcode a request if there are any - // unknown/invalid query parameters. - // - // Example : - // - // .. code-block:: proto - // - // service Bookstore { - // rpc GetShelf(GetShelfRequest) returns (Shelf) { - // option (google.api.http) = { - // get: "/shelves/{shelf}" - // }; - // } - // } - // - // message GetShelfRequest { - // int64 shelf = 1; - // } - // - // message Shelf {} - // - // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable - // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow - // the same request to be mapped to ``GetShelf``. - repeated string ignored_query_parameters = 6; - - // Whether to route methods without the ``google.api.http`` option. - // - // Example : - // - // .. code-block:: proto - // - // package bookstore; - // - // service Bookstore { - // rpc GetShelf(GetShelfRequest) returns (Shelf) {} - // } - // - // message GetShelfRequest { - // int64 shelf = 1; - // } - // - // message Shelf {} - // - // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of - // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. - bool auto_mapping = 7; - - // Whether to ignore query parameters that cannot be mapped to a corresponding - // protobuf field. Use this if you cannot control the query parameters and do - // not know them beforehand. Otherwise use ``ignored_query_parameters``. - // Defaults to false. - bool ignore_unknown_query_parameters = 8; - - // Whether to convert gRPC status headers to JSON. - // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` - // from the ``grpc-status-details-bin`` header and use it as JSON body. - // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and - // ``grpc-message`` headers. - // The error details types must be present in the ``proto_descriptor``. - // - // For example, if an upstream server replies with headers: - // - // .. code-block:: none - // - // grpc-status: 5 - // grpc-status-details-bin: - // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ - // - // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message - // ``google.rpc.Status``. It will be transcoded into: - // - // .. code-block:: none - // - // HTTP/1.1 404 Not Found - // content-type: application/json - // - // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} - // - // In order to transcode the message, the ``google.rpc.RequestInfo`` type from - // the ``google/rpc/error_details.proto`` should be included in the configured - // :ref:`proto descriptor set `. - bool convert_grpc_status = 9; - - // URL unescaping policy. - // This spec is only applied when extracting variable with multiple segments. - // For example, in case of `/foo/{x=*}/bar/{y=prefix/*}/{z=**}` `x` variable is single segment and `y` and `z` are multiple segments. - // For a path with `/foo/first/bar/prefix/second/third/fourth`, `x=first`, `y=prefix/second`, `z=third/fourth`. - // If this setting is not specified, the value defaults to :ref:`ALL_CHARACTERS_EXCEPT_RESERVED`. - UrlUnescapeSpec url_unescape_spec = 10 [(validate.rules).enum = {defined_only: true}]; - - // Configure the behavior when handling requests that cannot be transcoded. - // - // By default, the transcoder will silently pass through HTTP requests that are malformed. - // This includes requests with unknown query parameters, unregister paths, etc. - // - // Set these options to enable strict HTTP request validation, resulting in the transcoder rejecting - // such requests with a ``HTTP 4xx``. See each individual option for more details on the validation. - // gRPC requests will still silently pass through without transcoding. - // - // The benefit is a proper error message to the downstream. - // If the upstream is a gRPC server, it cannot handle the passed-through HTTP requests and will reset - // the TCP connection. The downstream will then - // receive a ``HTTP 503 Service Unavailable`` due to the upstream connection reset. - // This incorrect error message may conflict with other Envoy components, such as retry policies. - RequestValidationOptions request_validation_options = 11; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto deleted file mode 100644 index 79ecb7a92b706..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_stats/v3/config.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_stats.v3; - -import "envoy/config/core/v3/grpc_method_list.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_stats.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC statistics] gRPC statistics filter -// :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_stats] - -// gRPC statistics filter configuration -message FilterConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_stats.v2alpha.FilterConfig"; - - // If true, the filter maintains a filter state object with the request and response message - // counts. - bool emit_filter_state = 1; - - oneof per_method_stat_specifier { - // If set, specifies an allowlist of service/methods that will have individual stats - // emitted for them. Any call that does not match the allowlist will be counted - // in a stat with no method specifier: `cluster..grpc.*`. - config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2; - - // If set to true, emit stats for all service/method names. - // - // If set to false, emit stats for all service/message types to the same stats without including - // the service/method in the name, with prefix `cluster..grpc`. This can be useful if - // service/method granularity is not needed, or if each cluster only receives a single method. - // - // .. attention:: - // This option is only safe if all clients are trusted. If this option is enabled - // with untrusted clients, the clients could cause unbounded growth in the number of stats in - // Envoy, using unbounded memory and potentially slowing down stats pipelines. - // - // .. attention:: - // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the - // behavior will default to `stats_for_all_methods=false`. This default value is changed due - // to the previous value being deprecated. This behavior can be changed with runtime override - // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. - google.protobuf.BoolValue stats_for_all_methods = 3; - } - - // If true, the filter will gather a histogram for the request time of the upstream. - // It works with :ref:`stats_for_all_methods - // ` - // and :ref:`individual_method_stats_allowlist - // ` the same way - // request_message_count and response_message_count works. - bool enable_upstream_stats = 4; -} - -// gRPC statistics filter state object in protobuf form. -message FilterObject { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_stats.v2alpha.FilterObject"; - - // Count of request messages in the request stream. - uint64 request_message_count = 1; - - // Count of response messages in the response stream. - uint64 response_message_count = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto b/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto deleted file mode 100644 index 8161139f547b5..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_web.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_web.v3"; -option java_outer_classname = "GrpcWebProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC Web] -// gRPC Web :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_web] - -// gRPC Web filter config. -message GrpcWeb { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.grpc_web.v2.GrpcWeb"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD deleted file mode 100644 index bfe5d198e6129..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/filters/http/compressor/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto deleted file mode 100644 index a931ab78689ff..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto +++ /dev/null @@ -1,81 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.gzip.v3; - -import "envoy/extensions/filters/http/compressor/v3/compressor.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.gzip.v3"; -option java_outer_classname = "GzipProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Gzip] - -// [#next-free-field: 12] -message Gzip { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.gzip.v2.Gzip"; - - enum CompressionStrategy { - DEFAULT = 0; - FILTERED = 1; - HUFFMAN = 2; - RLE = 3; - } - - message CompressionLevel { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.gzip.v2.Gzip.CompressionLevel"; - - enum Enum { - DEFAULT = 0; - BEST = 1; - SPEED = 2; - } - } - - reserved 2, 6, 7, 8; - - reserved "content_length", "content_type", "disable_on_etag_header", - "remove_accept_encoding_header"; - - // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values - // use more memory, but are faster and produce better compression results. The default value is 5. - google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; - - // A value used for selecting the zlib compression level. This setting will affect speed and - // amount of compression applied to the content. "BEST" provides higher compression at the cost of - // higher latency, "SPEED" provides lower compression with minimum impact on response time. - // "DEFAULT" provides an optimal result between speed and compression. This field will be set to - // "DEFAULT" if not specified. - CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; - - // A value used for selecting the zlib compression strategy which is directly related to the - // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though - // there are situations which changing this parameter might produce better results. For example, - // run-length encoding (RLE) is typically used when the content is known for having sequences - // which same data occurs many consecutive times. For more information about each strategy, please - // refer to zlib manual. - CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; - - // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. - // Larger window results in better compression at the expense of memory usage. The default is 12 - // which will produce a 4096 bytes window. For more details about this parameter, please refer to - // zlib manual > deflateInit2. - google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; - - // Set of configuration parameters common for all compression filters. You can define - // `content_length`, `content_type` and other parameters in this field. - compressor.v3.Compressor compressor = 10; - - // Value for Zlib's next output buffer. If not set, defaults to 4096. - // See https://www.zlib.net/manual.html for more details. Also see - // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. - google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD deleted file mode 100644 index 693f0b92ff34d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto deleted file mode 100644 index 5e399790a7eca..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto +++ /dev/null @@ -1,132 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.header_to_metadata.v3; - -import "envoy/type/matcher/v3/regex.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v3"; -option java_outer_classname = "HeaderToMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Header-To-Metadata Filter] -// -// The configuration for transforming headers into metadata. This is useful -// for matching load balancer subsets, logging, etc. -// -// Header to Metadata :ref:`configuration overview `. -// [#extension: envoy.filters.http.header_to_metadata] - -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.header_to_metadata.v2.Config"; - - enum ValueType { - STRING = 0; - - NUMBER = 1; - - // The value is a serialized `protobuf.Value - // `_. - PROTOBUF_VALUE = 2; - } - - // ValueEncode defines the encoding algorithm. - enum ValueEncode { - // The value is not encoded. - NONE = 0; - - // The value is encoded in `Base64 `_. - // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the - // non-ASCII characters in the header. - BASE64 = 1; - } - - // [#next-free-field: 7] - message KeyValuePair { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.header_to_metadata.v2.Config.KeyValuePair"; - - // The namespace — if this is empty, the filter's namespace will be used. - string metadata_namespace = 1; - - // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_len: 1}]; - - // The value to pair with the given key. - // - // When used for a - // :ref:`on_header_present ` - // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. - // - // When used for a :ref:`on_header_missing ` - // case, a non-empty value must be provided otherwise no metadata is added. - string value = 3 [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; - - // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value - // is used as-is. - // - // This is only used for :ref:`on_header_present `. - // - // Note: if the `value` field is non-empty this field should be empty. - type.matcher.v3.RegexMatchAndSubstitute regex_value_rewrite = 6 - [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; - - // The value's type — defaults to string. - ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; - - // How is the value encoded, default is NONE (not encoded). - // The value will be decoded accordingly before storing to metadata. - ValueEncode encode = 5; - } - - // A Rule defines what metadata to apply when a header is present or missing. - // [#next-free-field: 6] - message Rule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.header_to_metadata.v2.Config.Rule"; - - // Specifies that a match will be performed on the value of a header or a cookie. - // - // The header to be extracted. - string header = 1 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, - (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" - ]; - - // The cookie to be extracted. - string cookie = 5 [ - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, - (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" - ]; - - // If the header or cookie is present, apply this metadata KeyValuePair. - // - // If the value in the KeyValuePair is non-empty, it'll be used instead - // of the header or cookie value. - KeyValuePair on_header_present = 2 [(udpa.annotations.field_migrate).rename = "on_present"]; - - // If the header or cookie is not present, apply this metadata KeyValuePair. - // - // The value in the KeyValuePair must be set, since it'll be used in lieu - // of the missing header or cookie value. - KeyValuePair on_header_missing = 3 [(udpa.annotations.field_migrate).rename = "on_missing"]; - - // Whether or not to remove the header after a rule is applied. - // - // This prevents headers from leaking. - // This field is not supported in case of a cookie. - bool remove = 4; - } - - // The list of rules to apply to requests. - repeated Rule request_rules = 1; - - // The list of rules to apply to responses. - repeated Rule response_rules = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD deleted file mode 100644 index c6ef74063aabe..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto deleted file mode 100644 index f3a0c42c388c6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/health_check.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.health_check.v3; - -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v3"; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Health check] -// Health check :ref:`configuration overview `. -// [#extension: envoy.filters.http.health_check] - -// [#next-free-field: 6] -message HealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.health_check.v2.HealthCheck"; - - reserved 2; - - // Specifies whether the filter operates in pass through mode or not. - google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; - - // If operating in pass through mode, the amount of time in milliseconds - // that the filter should cache the upstream response. - google.protobuf.Duration cache_time = 3; - - // If operating in non-pass-through mode, specifies a set of upstream cluster - // names and the minimum percentage of servers in each of those clusters that - // must be healthy or degraded in order for the filter to return a 200. - // - // .. note:: - // - // This value is interpreted as an integer by truncating, so 12.50% will be calculated - // as if it were 12%. - map cluster_min_healthy_percentages = 4; - - // Specifies a set of health check request headers to match on. The health check filter will - // check a request’s headers against all the specified headers. To specify the health check - // endpoint, set the ``:path`` header to match on. - repeated config.route.v3.HeaderMatcher headers = 5; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto b/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto deleted file mode 100644 index a23ad9dea0a90..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto +++ /dev/null @@ -1,61 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ip_tagging.v3; - -import "envoy/config/core/v3/address.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ip_tagging.v3"; -option java_outer_classname = "IpTaggingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: IP tagging] -// IP tagging :ref:`configuration overview `. -// [#extension: envoy.filters.http.ip_tagging] - -message IPTagging { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ip_tagging.v2.IPTagging"; - - // The type of requests the filter should apply to. The supported types - // are internal, external or both. The - // :ref:`x-forwarded-for` header is - // used to determine if a request is internal and will result in - // :ref:`x-envoy-internal` - // being set. The filter defaults to both, and it will apply to all request types. - enum RequestType { - // Both external and internal requests will be tagged. This is the default value. - BOTH = 0; - - // Only internal requests will be tagged. - INTERNAL = 1; - - // Only external requests will be tagged. - EXTERNAL = 2; - } - - // Supplies the IP tag name and the IP address subnets. - message IPTag { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.ip_tagging.v2.IPTagging.IPTag"; - - // Specifies the IP tag name to apply. - string ip_tag_name = 1; - - // A list of IP address subnets that will be tagged with - // ip_tag_name. Both IPv4 and IPv6 are supported. - repeated config.core.v3.CidrRange ip_list = 2; - } - - // The type of request the filter should apply to. - RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. - // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] - // The set of IP tags for the filter. - repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/BUILD deleted file mode 100644 index 6eb33fe8151ad..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto deleted file mode 100644 index 9718dbe0550ab..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ /dev/null @@ -1,678 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.jwt_authn.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/http_uri.proto"; -import "envoy/config/route/v3/route_components.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: JWT Authentication] -// JWT Authentication :ref:`configuration overview `. -// [#extension: envoy.filters.http.jwt_authn] - -// Please see following for JWT authentication flow: -// -// * `JSON Web Token (JWT) `_ -// * `The OAuth 2.0 Authorization Framework `_ -// * `OpenID Connect `_ -// -// A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: -// -// * issuer: the principal that issues the JWT. If specified, it has to match the *iss* field in JWT. -// * allowed audiences: the ones in the token have to be listed here. -// * how to fetch public key JWKS to verify the token signature. -// * how to extract JWT token in the request. -// * how to pass successfully verified token payload. -// -// Example: -// -// .. code-block:: yaml -// -// issuer: https://example.com -// audiences: -// - bookstore_android.apps.googleusercontent.com -// - bookstore_web.apps.googleusercontent.com -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// timeout: 1s -// cache_duration: -// seconds: 300 -// -// [#next-free-field: 14] -message JwtProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; - - // Specify the `principal `_ that issued - // the JWT, usually a URL or an email address. - // - // It is optional. If specified, it has to match the *iss* field in JWT. - // - // If a JWT has *iss* field and this field is specified, they have to match, otherwise the - // JWT *iss* field is not checked. - // - // Note: *JwtRequirement* :ref:`allow_missing ` - // and :ref:`allow_missing_or_failed ` - // are implemented differently than other *JwtRequirements*. Hence the usage of this field - // is different as follows if *allow_missing* or *allow_missing_or_failed* is used: - // - // * If a JWT has *iss* field, it needs to be specified by this field in one of *JwtProviders*. - // * If a JWT doesn't have *iss* field, one of *JwtProviders* should fill this field empty. - // * Multiple *JwtProviders* should not have same value in this field. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com - // - string issuer = 1; - - // The list of JWT `audiences `_ are - // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, - // will not check audiences in the token. - // - // Example: - // - // .. code-block:: yaml - // - // audiences: - // - bookstore_android.apps.googleusercontent.com - // - bookstore_web.apps.googleusercontent.com - // - repeated string audiences = 2; - - // `JSON Web Key Set (JWKS) `_ is needed to - // validate signature of a JWT. This field specifies where to fetch JWKS. - oneof jwks_source_specifier { - option (validate.required) = true; - - // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP - // URI and how the fetched JWKS should be cached. - // - // Example: - // - // .. code-block:: yaml - // - // remote_jwks: - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // timeout: 1s - // cache_duration: - // seconds: 300 - // - RemoteJwks remote_jwks = 3; - - // JWKS is in local data source. It could be either in a local file or embedded in the - // inline_string. - // - // Example: local file - // - // .. code-block:: yaml - // - // local_jwks: - // filename: /etc/envoy/jwks/jwks1.txt - // - // Example: inline_string - // - // .. code-block:: yaml - // - // local_jwks: - // inline_string: ACADADADADA - // - config.core.v3.DataSource local_jwks = 4; - } - - // If false, the JWT is removed in the request after a success verification. If true, the JWT is - // not removed in the request. Default value is false. - bool forward = 5; - - // Two fields below define where to extract the JWT from an HTTP request. - // - // If no explicit location is specified, the following default locations are tried in order: - // - // 1. The Authorization header using the `Bearer schema - // `_. Example:: - // - // Authorization: Bearer . - // - // 2. `access_token `_ query parameter. - // - // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations - // its provider specified or from the default locations. - // - // Specify the HTTP headers to extract JWT token. For examples, following config: - // - // .. code-block:: yaml - // - // from_headers: - // - name: x-goog-iap-jwt-assertion - // - // can be used to extract token from header:: - // - // ``x-goog-iap-jwt-assertion: ``. - // - repeated JwtHeader from_headers = 6; - - // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. - // - // For example, if config is: - // - // .. code-block:: yaml - // - // from_params: - // - jwt_token - // - // The JWT format in query parameter is:: - // - // /path?jwt_token= - // - repeated string from_params = 7; - - // JWT is sent in a cookie. `from_cookies` represents the cookie names to extract from. - // - // For example, if config is: - // - // .. code-block:: yaml - // - // from_cookies: - // - auth-token - // - // Then JWT will be extracted from `auth-token` cookie in the request. - // - repeated string from_cookies = 13; - - // This field specifies the header name to forward a successfully verified JWT payload to the - // backend. The forwarded data is:: - // - // base64url_encoded(jwt_payload_in_JSON) - // - // If it is not specified, the payload will not be forwarded. - string forward_payload_header = 8 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // When :ref:`forward_payload_header ` - // is specified, the base64 encoded payload will be added to the headers. - // Normally JWT based64 encode doesn't add padding. If this field is true, - // the header will be padded. - // - // This field is only relevant if :ref:`forward_payload_header ` - // is specified. - bool pad_forward_payload_header = 11; - - // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata - // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** - // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* - // and the value is the *protobuf::Struct* converted from JWT JSON payload. - // - // For example, if payload_in_metadata is *my_payload*: - // - // .. code-block:: yaml - // - // envoy.filters.http.jwt_authn: - // my_payload: - // iss: https://example.com - // sub: test@example.com - // aud: https://example.com - // exp: 1501281058 - // - string payload_in_metadata = 9; - - // Specify the clock skew in seconds when verifying JWT time constraint, - // such as `exp`, and `nbf`. If not specified, default is 60 seconds. - uint32 clock_skew_seconds = 10; - - // Enables JWT cache, its size is specified by *jwt_cache_size*. - // Only valid JWT tokens are cached. - JwtCacheConfig jwt_cache_config = 12; -} - -// This message specifies JWT Cache configuration. -message JwtCacheConfig { - // The unit is number of JWT tokens, default to 100. - uint32 jwt_cache_size = 1; -} - -// This message specifies how to fetch JWKS from remote and how to cache it. -message RemoteJwks { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.RemoteJwks"; - - // The HTTP URI to fetch the JWKS. For example: - // - // .. code-block:: yaml - // - // http_uri: - // uri: https://www.googleapis.com/oauth2/v1/certs - // cluster: jwt.www.googleapis.com|443 - // timeout: 1s - // - config.core.v3.HttpUri http_uri = 1; - - // Duration after which the cached JWKS should be expired. If not specified, default cache - // duration is 5 minutes. - google.protobuf.Duration cache_duration = 2; - - // Fetch Jwks asynchronously in the main thread before the listener is activated. - // Fetched Jwks can be used by all worker threads. - // - // If this feature is not enabled: - // - // * The Jwks is fetched on-demand when the requests come. During the fetching, first - // few requests are paused until the Jwks is fetched. - // * Each worker thread fetches its own Jwks since Jwks cache is per worker thread. - // - // If this feature is enabled: - // - // * Fetched Jwks is done in the main thread before the listener is activated. Its fetched - // Jwks can be used by all worker threads. Each worker thread doesn't need to fetch its own. - // * Jwks is ready when the requests come, not need to wait for the Jwks fetching. - // - JwksAsyncFetch async_fetch = 3; - - // Retry policy for fetching Jwks. optional. turned off by default. - // - // For example: - // - // .. code-block:: yaml - // - // retry_policy: - // retry_back_off: - // base_interval: 0.01s - // max_interval: 20s - // num_retries: 10 - // - // will yield a randomized truncated exponential backoff policy with an initial delay of 10ms - // 10 maximum attempts spaced at most 20s seconds. - // - // .. code-block:: yaml - // - // retry_policy: - // num_retries:1 - // - // uses the default :ref:`retry backoff strategy `. - // with the default base interval is 1000 milliseconds. and the default maximum interval of 10 times the base interval. - // - // if num_retries is omitted, the default is to allow only one retry. - // - // - // If enabled, the retry policy will apply to all Jwks fetching approaches, e.g. on demand or asynchronously in background. - // - // - config.core.v3.RetryPolicy retry_policy = 4; -} - -// Fetch Jwks asynchronously in the main thread when the filter config is parsed. -// The listener is activated only after the Jwks is fetched. -// When the Jwks is expired in the cache, it is fetched again in the main thread. -// The fetched Jwks from the main thread can be used by all worker threads. -message JwksAsyncFetch { - // If false, the listener is activated after the initial fetch is completed. - // The initial fetch result can be either successful or failed. - // If true, it is activated without waiting for the initial fetch to complete. - // Default is false. - bool fast_listener = 1; -} - -// This message specifies a header location to extract JWT token. -message JwtHeader { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtHeader"; - - // The HTTP header name. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // The value prefix. The value format is "value_prefix" - // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the - // end. - string value_prefix = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; -} - -// Specify a required provider with audiences. -message ProviderWithAudiences { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.ProviderWithAudiences"; - - // Specify a required provider name. - string provider_name = 1; - - // This field overrides the one specified in the JwtProvider. - repeated string audiences = 2; -} - -// This message specifies a Jwt requirement. An empty message means JWT verification is not -// required. Here are some config examples: -// -// .. code-block:: yaml -// -// # Example 1: not required with an empty message -// -// # Example 2: require A -// provider_name: provider-A -// -// # Example 3: require A or B -// requires_any: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 4: require A and B -// requires_all: -// requirements: -// - provider_name: provider-A -// - provider_name: provider-B -// -// # Example 5: require A and (B or C) -// requires_all: -// requirements: -// - provider_name: provider-A -// - requires_any: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 6: require A or (B and C) -// requires_any: -// requirements: -// - provider_name: provider-A -// - requires_all: -// requirements: -// - provider_name: provider-B -// - provider_name: provider-C -// -// # Example 7: A is optional (if token from A is provided, it must be valid, but also allows -// missing token.) -// requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// -// # Example 8: A is optional and B is required. -// requires_all: -// requirements: -// - requires_any: -// requirements: -// - provider_name: provider-A -// - allow_missing: {} -// - provider_name: provider-B -// -// [#next-free-field: 7] -message JwtRequirement { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirement"; - - oneof requires_type { - // Specify a required provider name. - string provider_name = 1; - - // Specify a required provider with audiences. - ProviderWithAudiences provider_and_audiences = 2; - - // Specify list of JwtRequirement. Their results are OR-ed. - // If any one of them passes, the result is passed. - JwtRequirementOrList requires_any = 3; - - // Specify list of JwtRequirement. Their results are AND-ed. - // All of them must pass, if one of them fails or missing, it fails. - JwtRequirementAndList requires_all = 4; - - // The requirement is always satisfied even if JWT is missing or the JWT - // verification fails. A typical usage is: this filter is used to only verify - // JWTs and pass the verified JWT payloads to another filter, the other filter - // will make decision. In this mode, all JWT tokens will be verified. - google.protobuf.Empty allow_missing_or_failed = 5; - - // The requirement is satisfied if JWT is missing, but failed if JWT is - // presented but invalid. Similar to allow_missing_or_failed, this is used - // to only verify JWTs and pass the verified payload to another filter. The - // different is this mode will reject requests with invalid tokens. - google.protobuf.Empty allow_missing = 6; - } -} - -// This message specifies a list of RequiredProvider. -// Their results are OR-ed; if any one of them passes, the result is passed -message JwtRequirementOrList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementOrList"; - - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a list of RequiredProvider. -// Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. -message JwtRequirementAndList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementAndList"; - - // Specify a list of JwtRequirement. - repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; -} - -// This message specifies a Jwt requirement for a specific Route condition. -// Example 1: -// -// .. code-block:: yaml -// -// - match: -// prefix: /healthz -// -// In above example, "requires" field is empty for /healthz prefix match, -// it means that requests matching the path prefix don't require JWT authentication. -// -// Example 2: -// -// .. code-block:: yaml -// -// - match: -// prefix: / -// requires: { provider_name: provider-A } -// -// In above example, all requests matched the path prefix require jwt authentication -// from "provider-A". -message RequirementRule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.RequirementRule"; - - // The route matching parameter. Only when the match is satisfied, the "requires" field will - // apply. - // - // For example: following match will match all requests. - // - // .. code-block:: yaml - // - // match: - // prefix: / - // - config.route.v3.RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Specify a Jwt requirement. - // If not specified, Jwt verification is disabled. - oneof requirement_type { - // Specify a Jwt requirement. Please see detail comment in message JwtRequirement. - JwtRequirement requires = 2; - - // Use requirement_name to specify a Jwt requirement. - // This requirement_name MUST be specified at the - // :ref:`requirement_map ` - // in `JwtAuthentication`. - string requirement_name = 3 [(validate.rules).string = {min_len: 1}]; - } -} - -// This message specifies Jwt requirements based on stream_info.filterState. -// This FilterState should use `Router::StringAccessor` object to set a string value. -// Other HTTP filters can use it to specify Jwt requirements dynamically. -// -// Example: -// -// .. code-block:: yaml -// -// name: jwt_selector -// requires: -// issuer_1: -// provider_name: issuer1 -// issuer_2: -// provider_name: issuer2 -// -// If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, -// jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. -message FilterStateRule { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.FilterStateRule"; - - // The filter state name to retrieve the `Router::StringAccessor` object. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // A map of string keys to requirements. The string key is the string value - // in the FilterState with the name specified in the *name* field above. - map requires = 3; -} - -// This is the Envoy HTTP filter config for JWT authentication. -// -// For example: -// -// .. code-block:: yaml -// -// providers: -// provider1: -// issuer: issuer1 -// audiences: -// - audience1 -// - audience2 -// remote_jwks: -// http_uri: -// uri: https://example.com/.well-known/jwks.json -// cluster: example_jwks_cluster -// timeout: 1s -// provider2: -// issuer: issuer2 -// local_jwks: -// inline_string: jwks_string -// -// rules: -// # Not jwt verification is required for /health path -// - match: -// prefix: /health -// -// # Jwt verification for provider1 is required for path prefixed with "prefix" -// - match: -// prefix: /prefix -// requires: -// provider_name: provider1 -// -// # Jwt verification for either provider1 or provider2 is required for all other requests. -// - match: -// prefix: / -// requires: -// requires_any: -// requirements: -// - provider_name: provider1 -// - provider_name: provider2 -// -// [#next-free-field: 6] -message JwtAuthentication { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.jwt_authn.v2alpha.JwtAuthentication"; - - // Map of provider names to JwtProviders. - // - // .. code-block:: yaml - // - // providers: - // provider1: - // issuer: issuer1 - // audiences: - // - audience1 - // - audience2 - // remote_jwks: - // http_uri: - // uri: https://example.com/.well-known/jwks.json - // cluster: example_jwks_cluster - // timeout: 1s - // provider2: - // issuer: provider2 - // local_jwks: - // inline_string: jwks_string - // - map providers = 1; - - // Specifies requirements based on the route matches. The first matched requirement will be - // applied. If there are overlapped match conditions, please put the most specific match first. - // - // Examples - // - // .. code-block:: yaml - // - // rules: - // - match: - // prefix: /healthz - // - match: - // prefix: /baz - // requires: - // provider_name: provider1 - // - match: - // prefix: /foo - // requires: - // requires_any: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - match: - // prefix: /bar - // requires: - // requires_all: - // requirements: - // - provider_name: provider1 - // - provider_name: provider2 - // - repeated RequirementRule rules = 2; - - // This message specifies Jwt requirements based on stream_info.filterState. - // Other HTTP filters can use it to specify Jwt requirements dynamically. - // The *rules* field above is checked first, if it could not find any matches, - // check this one. - FilterStateRule filter_state_rules = 3; - - // When set to true, bypass the `CORS preflight request - // `_ regardless of JWT - // requirements specified in the rules. - bool bypass_cors_preflight = 4; - - // A map of unique requirement_names to JwtRequirements. - // :ref:`requirement_name ` - // in `PerRouteConfig` uses this map to specify a JwtRequirement. - map requirement_map = 5; -} - -// Specify per-route config. -message PerRouteConfig { - oneof requirement_specifier { - option (validate.required) = true; - - // Disable Jwt Authentication for this route. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // Use requirement_name to specify a JwtRequirement. - // This requirement_name MUST be specified at the - // :ref:`requirement_map ` - // in `JwtAuthentication`. If no, the requests using this route will be rejected with 403. - string requirement_name = 2 [(validate.rules).string = {min_len: 1}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/BUILD deleted file mode 100644 index 9a76b7e148e03..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/kill_request.proto b/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/kill_request.proto deleted file mode 100644 index a0a23b0de3a34..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/kill_request/v3/kill_request.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.kill_request.v3; - -import "envoy/type/v3/percent.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.kill_request.v3"; -option java_outer_classname = "KillRequestProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Kill Request] -// Kill Request :ref:`configuration overview `. -// [#extension: envoy.filters.http.kill_request] - -// Configuration for KillRequest filter. -message KillRequest { - // On which direction should the filter check for the `kill_request_header`. - // Default to `REQUEST` if unspecified. - enum Direction { - REQUEST = 0; - RESPONSE = 1; - } - - // The probability that a Kill request will be triggered. - type.v3.FractionalPercent probability = 1; - - // The name of the kill request header. If this field is not empty, it will override the :ref:`default header ` name. Otherwise the default header name will be used. - string kill_request_header = 2 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - Direction direction = 3 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD deleted file mode 100644 index 6c58a43e4ff6b..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto deleted file mode 100644 index 1cf6c5f2fa52c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto +++ /dev/null @@ -1,109 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.local_ratelimit.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; -import "envoy/type/v3/http_status.proto"; -import "envoy/type/v3/token_bucket.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.local_ratelimit.v3"; -option java_outer_classname = "LocalRateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Local Rate limit] -// Local Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.local_ratelimit] - -// [#next-free-field: 12] -message LocalRateLimit { - // The human readable prefix to use when emitting stats. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // This field allows for a custom HTTP response status code to the downstream client when - // the request has been rate limited. - // Defaults to 429 (TooManyRequests). - // - // .. note:: - // If this is set to < 400, 429 will be used instead. - type.v3.HttpStatus status = 2; - - // The token bucket configuration to use for rate limiting requests that are processed by this - // filter. Each request processed by the filter consumes a single token. If the token is available, - // the request will be allowed. If no tokens are available, the request will receive the configured - // rate limit status. - // - // .. note:: - // It's fine for the token bucket to be unset for the global configuration since the rate limit - // can be applied at a the virtual host or route level. Thus, the token bucket must be set - // for the per route configuration otherwise the config will be rejected. - // - // .. note:: - // When using per route configuration, the bucket becomes unique to that route. - // - // .. note:: - // In the current implementation the token bucket's :ref:`fill_interval - // ` must be >= 50ms to avoid too aggressive - // refills. - type.v3.TokenBucket token_bucket = 3; - - // If set, this will enable -- but not necessarily enforce -- the rate limit for the given - // fraction of requests. - // Defaults to 0% of requests for safety. - config.core.v3.RuntimeFractionalPercent filter_enabled = 4; - - // If set, this will enforce the rate limit decisions for the given fraction of requests. - // - // Note: this only applies to the fraction of enabled requests. - // - // Defaults to 0% of requests for safety. - config.core.v3.RuntimeFractionalPercent filter_enforced = 5; - - // Specifies a list of HTTP headers that should be added to each request that - // has been rate limited and is also forwarded upstream. This can only occur when the - // filter is enabled but not enforced. - repeated config.core.v3.HeaderValueOption request_headers_to_add_when_not_enforced = 10 - [(validate.rules).repeated = {max_items: 10}]; - - // Specifies a list of HTTP headers that should be added to each response for requests that - // have been rate limited. This occurs when the filter is either enabled or fully enforced. - repeated config.core.v3.HeaderValueOption response_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 10}]; - - // The rate limit descriptor list to use in the local rate limit to override - // on. The rate limit descriptor is selected by the first full match from the - // request descriptors. - // - // Example on how to use ::ref:`this ` - // - // .. note:: - // - // In the current implementation the descriptor's token bucket :ref:`fill_interval - // ` must be a multiple - // global :ref:`token bucket's` fill interval. - // - // The descriptors must match verbatim for rate limiting to apply. There is no partial - // match by a subset of descriptor entries in the current implementation. - repeated common.ratelimit.v3.LocalRateLimitDescriptor descriptors = 8; - - // Specifies the rate limit configurations to be applied with the same - // stage number. If not set, the default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 9 [(validate.rules).uint32 = {lte: 10}]; - - // Specifies the scope of the rate limiter's token bucket. - // If set to false, the token bucket is shared across all worker threads, - // thus the rate limits are applied per Envoy process. - // If set to true, a token bucket is allocated for each connection. - // Thus the rate limits are applied per connection thereby allowing - // one to rate limit requests on a per connection basis. - // If unspecified, the default value is false. - bool local_rate_limit_per_downstream_connection = 11; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto b/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto deleted file mode 100644 index 1636c01ab1c75..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/lua/v3/lua.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.lua.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.lua.v3"; -option java_outer_classname = "LuaProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Lua] -// Lua :ref:`configuration overview `. -// [#extension: envoy.filters.http.lua] - -message Lua { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.lua.v2.Lua"; - - // The Lua code that Envoy will execute. This can be a very small script that - // further loads code from disk if desired. Note that if JSON configuration is used, the code must - // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line - // strings so complex scripts can be easily expressed inline in the configuration. - string inline_code = 1 [(validate.rules).string = {min_len: 1}]; - - // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute - // `. The Lua source codes can be - // loaded from inline string or local files. - // - // Example: - // - // .. code-block:: yaml - // - // source_codes: - // hello.lua: - // inline_string: | - // function envoy_on_response(response_handle) - // -- Do something. - // end - // world.lua: - // filename: /etc/lua/world.lua - // - map source_codes = 2; -} - -message LuaPerRoute { - oneof override { - option (validate.required) = true; - - // Disable the Lua filter for this particular vhost or route. If disabled is specified in - // multiple per-filter-configs, the most specific one will be used. - bool disabled = 1 [(validate.rules).bool = {const: true}]; - - // A name of a Lua source code stored in - // :ref:`Lua.source_codes `. - string name = 2 [(validate.rules).string = {min_len: 1}]; - - // A configured per-route Lua source code that can be served by RDS or provided inline. - config.core.v3.DataSource source_code = 3; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/BUILD deleted file mode 100644 index 75d36b709935c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto b/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto deleted file mode 100644 index e5f990512ca87..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.oauth2.v3alpha; - -import "envoy/config/core/v3/http_uri.proto"; -import "envoy/config/route/v3/route_components.proto"; -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; -import "envoy/type/matcher/v3/path.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3alpha"; -option java_outer_classname = "OauthProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: OAuth] -// OAuth :ref:`configuration overview `. -// [#extension: envoy.filters.http.oauth2] -// - -message OAuth2Credentials { - // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. - string client_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. - transport_sockets.tls.v3.SdsSecretConfig token_secret = 2 - [(validate.rules).message = {required: true}]; - - // Configures how the secret token should be created. - oneof token_formation { - option (validate.required) = true; - - // If present, the secret token will be a HMAC using the provided secret. - transport_sockets.tls.v3.SdsSecretConfig hmac_secret = 3 - [(validate.rules).message = {required: true}]; - } -} - -// OAuth config -// -// [#next-free-field: 11] -message OAuth2Config { - // Endpoint on the authorization server to retrieve the access token from. - config.core.v3.HttpUri token_endpoint = 1; - - // The endpoint redirect to for authorization in response to unauthorized requests. - string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; - - // Credentials used for OAuth. - OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; - - // The redirect URI passed to the authorization endpoint. Supports header formatting - // tokens. For more information, including details on header value syntax, see the - // documentation on :ref:`custom request headers `. - // - // This URI should not contain any query parameters. - string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; - - // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. - type.matcher.v3.PathMatcher redirect_path_matcher = 5 - [(validate.rules).message = {required: true}]; - - // The path to sign a user out, clearing their credential cookies. - type.matcher.v3.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}]; - - // Forward the OAuth token as a Bearer to upstream web service. - bool forward_bearer_token = 7; - - // Any request that matches any of the provided matchers will be passed through without OAuth validation. - repeated config.route.v3.HeaderMatcher pass_through_matcher = 8; - - // Optional list of OAuth scopes to be claimed in the authorization request. If not specified, - // defaults to "user" scope. - // OAuth RFC https://tools.ietf.org/html/rfc6749#section-3.3 - repeated string auth_scopes = 9; - - // Optional resource parameter for authorization request - // RFC: https://tools.ietf.org/html/rfc8707 - repeated string resources = 10; -} - -// Filter config. -message OAuth2 { - // Leave this empty to disable OAuth2 for a specific route, using per filter config. - OAuth2Config config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto b/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto deleted file mode 100644 index 27e709f7a8d6c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/on_demand/v3/on_demand.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.on_demand.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.on_demand.v3"; -option java_outer_classname = "OnDemandProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: OnDemand] -// IP tagging :ref:`configuration overview `. -// [#extension: envoy.filters.http.on_demand] - -message OnDemand { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.on_demand.v2.OnDemand"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto b/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto deleted file mode 100644 index ca752b4c75ce6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/original_src/v3/original_src.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.original_src.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.original_src.v3"; -option java_outer_classname = "OriginalSrcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Original Src Filter] -// Use the Original source address on upstream connections. - -// The Original Src filter binds upstream connections to the original source address determined -// for the request. This address could come from something like the Proxy Protocol filter, or it -// could come from trusted http headers. -// [#extension: envoy.filters.http.original_src] -message OriginalSrc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.original_src.v2alpha1.OriginalSrc"; - - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to - // ensure that non-local addresses may be routed back through envoy when binding to the original - // source address. The option will not be applied if the mark is 0. - uint32 mark = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/BUILD deleted file mode 100644 index 0bad14913d217..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto deleted file mode 100644 index bc58e7f9b2e1a..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ /dev/null @@ -1,122 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.ratelimit.v3; - -import "envoy/config/ratelimit/v3/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.ratelimit.v3"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.http.ratelimit] - -// [#next-free-field: 10] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.rate_limit.v2.RateLimit"; - - // Defines the version of the standard to use for X-RateLimit headers. - enum XRateLimitHeadersRFCVersion { - // X-RateLimit headers disabled. - OFF = 0; - - // Use `draft RFC Version 03 `_. - DRAFT_VERSION_03 = 1; - } - - // The rate limit domain to use when calling the rate limit service. - string domain = 1 [(validate.rules).string = {min_len: 1}]; - - // Specifies the rate limit configurations to be applied with the same - // stage number. If not set, the default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The type of requests the filter should apply to. The supported - // types are *internal*, *external* or *both*. A request is considered internal if - // :ref:`x-envoy-internal` is set to true. If - // :ref:`x-envoy-internal` is not set or false, a - // request is considered external. The filter defaults to *both*, and it will apply to all request - // types. - string request_type = 3 - [(validate.rules).string = {in: "internal" in: "external" in: "both" in: ""}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - bool failure_mode_deny = 5; - - // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead - // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The - // HTTP code will be 200 for a gRPC response. - bool rate_limited_as_resource_exhausted = 6; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 7 - [(validate.rules).message = {required: true}]; - - // Defines the standard version to use for X-RateLimit headers emitted by the filter: - // - // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the - // client in the current time-window followed by the description of the - // quota policy. The values are returned by the rate limiting service in - // :ref:`current_limit` - // field. Example: `10, 10;w=1;name="per-ip", 1000;w=3600`. - // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the - // current time-window. The values are returned by the rate limiting service - // in :ref:`limit_remaining` - // field. - // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of - // the current time-window. The values are returned by the rate limiting service - // in :ref:`duration_until_reset` - // field. - // - // In case rate limiting policy specifies more then one time window, the values - // above represent the window that is closest to reaching its limit. - // - // For more information about the headers specification see selected version of - // the `draft RFC `_. - // - // Disabled by default. - XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 - [(validate.rules).enum = {defined_only: true}]; - - // Disables emitting the :ref:`x-envoy-ratelimited` header - // in case of rate limiting (i.e. 429 responses). - // Having this header not present potentially makes the request retriable. - bool disable_x_envoy_ratelimited_header = 9; -} - -message RateLimitPerRoute { - enum VhRateLimitsOptions { - // Use the virtual host rate limits unless the route has a rate limit policy. - OVERRIDE = 0; - - // Use the virtual host rate limits even if the route has a rate limit policy. - INCLUDE = 1; - - // Ignore the virtual host rate limits even if the route does not have a rate limit policy. - IGNORE = 2; - } - - // Specifies if the rate limit filter should include the virtual host rate limits. - VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/BUILD deleted file mode 100644 index fd183569e5a1e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto b/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto deleted file mode 100644 index 7ad7ac5e6aa25..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/rbac/v3/rbac.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.rbac.v3; - -import "envoy/config/rbac/v3/rbac.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v3"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.http.rbac] - -// RBAC filter config. -message RBAC { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.rbac.v2.RBAC"; - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - // If present and empty, DENY. - config.rbac.v3.RBAC rules = 1; - - // Shadow rules are not enforced by the filter (i.e., returning a 403) - // but will emit stats and logs and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v3.RBAC shadow_rules = 2; - - // If specified, shadow rules will emit stats with the given prefix. - // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with - // shadow rules. - string shadow_rules_stat_prefix = 3; -} - -message RBACPerRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.rbac.v2.RBACPerRoute"; - - reserved 1; - - // Override the global configuration of the filter with this new config. - // If absent, the global RBAC policy will be disabled for this route. - RBAC rbac = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/router/v3/BUILD deleted file mode 100644 index 0b02b988e42ff..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/router/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/accesslog/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto b/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto deleted file mode 100644 index ce595c057c01f..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/router/v3/router.proto +++ /dev/null @@ -1,91 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.router.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v3"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Router] -// Router :ref:`configuration overview `. -// [#extension: envoy.filters.http.router] - -// [#next-free-field: 8] -message Router { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.router.v2.Router"; - - // Whether the router generates dynamic cluster statistics. Defaults to - // true. Can be disabled in high performance scenarios. - google.protobuf.BoolValue dynamic_stats = 1; - - // Whether to start a child span for egress routed calls. This can be - // useful in scenarios where other filters (auth, ratelimit, etc.) make - // outbound calls and have child spans rooted at the same ingress - // parent. Defaults to false. - bool start_child_span = 2; - - // Configuration for HTTP upstream logs emitted by the router. Upstream logs - // are configured in the same way as access logs, but each log entry represents - // an upstream request. Presuming retries are configured, multiple upstream - // requests may be made for each downstream (inbound) request. - repeated config.accesslog.v3.AccessLog upstream_log = 3; - - // Do not add any additional *x-envoy-* headers to requests or responses. This - // only affects the :ref:`router filter generated *x-envoy-* headers - // `, other Envoy filters and the HTTP - // connection manager may continue to set *x-envoy-* headers. - bool suppress_envoy_headers = 4; - - // Specifies a list of HTTP headers to strictly validate. Envoy will reject a - // request and respond with HTTP status 400 if the request contains an invalid - // value for any of the headers listed in this field. Strict header checking - // is only supported for the following headers: - // - // Value must be a ','-delimited list (i.e. no spaces) of supported retry - // policy values: - // - // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` - // * :ref:`config_http_filters_router_x-envoy-retry-on` - // - // Value must be an integer: - // - // * :ref:`config_http_filters_router_x-envoy-max-retries` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` - // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` - repeated string strict_check_headers = 5 [(validate.rules).repeated = { - items { - string { - in: "x-envoy-upstream-rq-timeout-ms" - in: "x-envoy-upstream-rq-per-try-timeout-ms" - in: "x-envoy-max-retries" - in: "x-envoy-retry-grpc-on" - in: "x-envoy-retry-on" - } - } - }]; - - // If not set, ingress Envoy will ignore - // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress - // Envoy, when deriving timeout for upstream cluster. - bool respect_expected_rq_timeout = 6; - - // If set, Envoy will avoid incrementing HTTP failure code stats - // on gRPC requests. This includes the individual status code value - // (e.g. upstream_rq_504) and group stats (e.g. upstream_rq_5xx). - // This field is useful if interested in relying only on the gRPC - // stats filter to define success and failure metrics for gRPC requests - // as not all failed gRPC requests charge HTTP status code metrics. See - // :ref:`gRPC stats filter` documentation - // for more details. - bool suppress_grpc_request_failure_code_stats = 7; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/set_metadata.proto b/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/set_metadata.proto deleted file mode 100644 index f7ff348e20255..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/set_metadata/v3/set_metadata.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.set_metadata.v3; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.set_metadata.v3"; -option java_outer_classname = "SetMetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Set-Metadata Filter] -// -// This filters adds or updates dynamic metadata with static data. -// -// [#extension: envoy.filters.http.set_metadata] - -message Config { - // The metadata namespace. - string metadata_namespace = 1 [(validate.rules).string = {min_len: 1}]; - - // The value to update the namespace with. See - // :ref:`the filter documentation ` for - // more information on how this value is merged with potentially existing - // ones. - google.protobuf.Struct value = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/tap/v3/BUILD deleted file mode 100644 index 6b2b1215048c6..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto b/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto deleted file mode 100644 index 81779443e4a54..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/tap/v3/tap.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.tap.v3; - -import "envoy/extensions/common/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v3"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap] -// Tap :ref:`configuration overview `. -// [#extension: envoy.filters.http.tap] - -// Top level configuration for the tap filter. -message Tap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.http.tap.v2alpha.Tap"; - - // Common configuration for the HTTP tap filter. - common.tap.v3.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD deleted file mode 100644 index c37174bdefc46..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/wasm/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto deleted file mode 100644 index a0cfcae1afb57..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/http/wasm/v3/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.wasm.v3; - -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm] -// [#extension: envoy.filters.http.wasm] -// Wasm :ref:`configuration overview `. - -message Wasm { - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto b/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto deleted file mode 100644 index cb439b0973ba9..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.http_inspector.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.http_inspector.v3"; -option java_outer_classname = "HttpInspectorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP Inspector Filter] -// Detect whether the application protocol is HTTP. -// [#extension: envoy.filters.listener.http_inspector] - -message HttpInspector { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.http_inspector.v2.HttpInspector"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto b/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto deleted file mode 100644 index 8239c5c42c528..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.original_dst.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_dst.v3"; -option java_outer_classname = "OriginalDstProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Original Dst Filter] -// Use the Original destination address on downstream connections. -// [#extension: envoy.filters.listener.original_dst] - -message OriginalDst { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.original_dst.v2.OriginalDst"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto b/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto deleted file mode 100644 index aa0603cdff47d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/original_src/v3/original_src.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.original_src.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_src.v3"; -option java_outer_classname = "OriginalSrcProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Original Src Filter] -// Use the Original source address on upstream connections. -// [#extension: envoy.filters.listener.original_src] - -// The Original Src filter binds upstream connections to the original source address determined -// for the connection. This address could come from something like the Proxy Protocol filter, or it -// could come from trusted http headers. -message OriginalSrc { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.original_src.v2alpha1.OriginalSrc"; - - // Whether to bind the port to the one used in the original downstream connection. - // [#not-implemented-hide:] - bool bind_port = 1; - - // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to - // ensure that non-local addresses may be routed back through envoy when binding to the original - // source address. The option will not be applied if the mark is 0. - uint32 mark = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto b/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto deleted file mode 100644 index fb8047d391e95..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.proxy_protocol.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3"; -option java_outer_classname = "ProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Proxy Protocol Filter] -// PROXY protocol listener filter. -// [#extension: envoy.filters.listener.proxy_protocol] - -message ProxyProtocol { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.proxy_protocol.v2.ProxyProtocol"; - - message KeyValuePair { - // The namespace — if this is empty, the filter's namespace will be used. - string metadata_namespace = 1; - - // The key to use within the namespace. - string key = 2 [(validate.rules).string = {min_len: 1}]; - } - - // A Rule defines what metadata to apply when a header is present or missing. - message Rule { - // The type that triggers the rule - required - // TLV type is defined as uint8_t in proxy protocol. See `the spec - // `_ for details. - uint32 tlv_type = 1 [(validate.rules).uint32 = {lt: 256}]; - - // If the TLV type is present, apply this metadata KeyValuePair. - KeyValuePair on_tlv_present = 2; - } - - // The list of rules to apply to requests. - repeated Rule rules = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto b/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto deleted file mode 100644 index eff9774844f4b..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.listener.tls_inspector.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.listener.tls_inspector.v3"; -option java_outer_classname = "TlsInspectorProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: TLS Inspector Filter] -// Allows detecting whether the transport appears to be TLS or plaintext. -// [#extension: envoy.filters.listener.tls_inspector] - -message TlsInspector { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.listener.tls_inspector.v2.TlsInspector"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto b/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto deleted file mode 100644 index 2ed14c7f0e237..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.client_ssl_auth.v3; - -import "envoy/config/core/v3/address.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.client_ssl_auth.v3"; -option java_outer_classname = "ClientSslAuthProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Client TLS authentication] -// Client TLS authentication -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.client_ssl_auth] - -message ClientSSLAuth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.client_ssl_auth.v2.ClientSSLAuth"; - - // The :ref:`cluster manager ` cluster that runs - // the authentication service. The filter will connect to the service every 60s to fetch the list - // of principals. The service must support the expected :ref:`REST API - // `. - string auth_api_cluster = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; - - // Time in milliseconds between principal refreshes from the - // authentication service. Default is 60000 (60s). The actual fetch time - // will be this value plus a random jittered value between - // 0-refresh_delay_ms milliseconds. - google.protobuf.Duration refresh_delay = 3; - - // An optional list of IP address and subnet masks that should be white - // listed for access by the filter. If no list is provided, there is no - // IP allowlist. - repeated config.core.v3.CidrRange ip_white_list = 4 - [(udpa.annotations.field_migrate).rename = "ip_allowlist"]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/connection_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/connection_limit.proto deleted file mode 100644 index ccd30aaba6922..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/connection_limit/v3/connection_limit.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.connection_limit.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.connection_limit.v3"; -option java_outer_classname = "ConnectionLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Connection limit] -// Connection limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.connection_limit] - -message ConnectionLimit { - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The max connections configuration to use for new incoming connections that are processed - // by the filter's filter chain. When max_connection is reached, the incoming connection - // will be closed after delay duration. - google.protobuf.UInt64Value max_connections = 2 [(validate.rules).uint64 = {gte: 1}]; - - // The delay configuration to use for rejecting the connection after some specified time duration - // instead of immediately rejecting the connection. That way, a malicious user is not able to - // retry as fast as possible which provides a better DoS protection for Envoy. If this is not present, - // the connection will be closed immediately. - google.protobuf.Duration delay = 3; - - // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults - // to enabled. - config.core.v3.RuntimeFeatureFlag runtime_enabled = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto deleted file mode 100644 index 2742372b2f91d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/direct_response/v3/config.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.direct_response.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.direct_response.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Direct response] -// Direct response :ref:`configuration overview `. -// [#extension: envoy.filters.network.direct_response] - -message Config { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.direct_response.v2.Config"; - - // Response data as a data source. - config.core.v3.DataSource response = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto deleted file mode 100644 index fa1959a425c8e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.router.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.router.v3"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Router] -// Dubbo router :ref:`configuration overview `. - -message Router { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.dubbo.router.v2alpha1.Router"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD deleted file mode 100644 index b6e6273d28f50..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/route/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto deleted file mode 100644 index 646f053ca9b6c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.v3; - -import "envoy/extensions/filters/network/dubbo_proxy/v3/route.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; -option java_outer_classname = "DubboProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dubbo Proxy] -// Dubbo Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.dubbo_proxy] - -// Dubbo Protocol types supported by Envoy. -enum ProtocolType { - // the default protocol. - Dubbo = 0; -} - -// Dubbo Serialization types supported by Envoy. -enum SerializationType { - // the default serialization protocol. - Hessian2 = 0; -} - -// [#next-free-field: 6] -message DubboProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy"; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Configure the protocol used. - ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; - - // Configure the serialization protocol used. - SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; - - // The route table for the connection manager is static and is specified in this property. - repeated RouteConfiguration route_config = 4; - - // A list of individual Dubbo filters that make up the filter chain for requests made to the - // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no dubbo_filters are specified, a default Dubbo router filter - // (`envoy.filters.dubbo.router`) is used. - repeated DubboFilter dubbo_filters = 5; -} - -// DubboFilter configures a Dubbo filter. -message DubboFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter"; - - // The name of the filter to instantiate. The name must match a supported - // filter. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - google.protobuf.Any config = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto deleted file mode 100644 index e255985ed8e46..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto +++ /dev/null @@ -1,129 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.dubbo_proxy.v3; - -import "envoy/config/route/v3/route_components.proto"; -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/v3/range.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dubbo Proxy Route Configuration] -// Dubbo Proxy :ref:`configuration overview `. - -// [#next-free-field: 6] -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteConfiguration"; - - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The interface name of the service. Wildcard interface are supported in the suffix or prefix form. - // e.g. ``*.methods.add`` will match ``com.dev.methods.add``, ``com.prod.methods.add``, etc. - // ``com.dev.methods.*`` will match ``com.dev.methods.add``, ``com.dev.methods.update``, etc. - // Special wildcard ``*`` matching any interface. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*.methods.add`` will match ``com.dev.methods.add`` but not ``.methods.add``. - string interface = 2; - - // Which group does the interface belong to. - string group = 3; - - // The version number of the interface. - string version = 4; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 5; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteMatch"; - - // Method level routing matching. - MethodMatch method = 1; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). - repeated config.route.v3.HeaderMatcher headers = 2; -} - -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteAction"; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed. - string cluster = 1; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - // Currently ClusterWeight only supports the name and weight fields. - config.route.v3.WeightedCluster weighted_clusters = 2; - } -} - -message MethodMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch"; - - // The parameter matching type. - message ParameterMatchSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch.ParameterMatchSpecifier"; - - oneof parameter_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 3; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting - // of an optional plus or minus sign followed by a sequence of digits. The rule will not match - // if the header value does not represent an integer. Match will fail for empty values, - // floating point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, - // "somestring", 10.9, "-1somestring" - type.v3.Int64Range range_match = 4; - } - } - - // The name of the method. - type.matcher.v3.StringMatcher name = 1; - - // Method parameter definition. - // The key is the parameter index, starting from 0. - // The value is the parameter matching type. - map params_match = 2; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/echo/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto b/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto deleted file mode 100644 index 077d87259b6b2..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/echo/v3/echo.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.echo.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.echo.v3"; -option java_outer_classname = "EchoProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Echo] -// Echo :ref:`configuration overview `. -// [#extension: envoy.filters.network.echo] - -message Echo { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.echo.v2.Echo"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD deleted file mode 100644 index 3f3a5395d2aa7..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto deleted file mode 100644 index c40adb5f26bd8..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.ext_authz.v3; - -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/type/matcher/v3/metadata.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v3"; -option java_outer_classname = "ExtAuthzProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Network External Authorization ] -// The network layer external authorization service configuration -// :ref:`configuration overview `. -// [#extension: envoy.filters.network.ext_authz] - -// External Authorization filter calls out to an external service over the -// gRPC Authorization API defined by -// :ref:`CheckRequest `. -// A failed check will cause this filter to close the TCP connection. -// [#next-free-field: 8] -message ExtAuthz { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.ext_authz.v2.ExtAuthz"; - - // The prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The external authorization gRPC service configuration. - // The default timeout is set to 200ms by this filter. - config.core.v3.GrpcService grpc_service = 2; - - // The filter's behaviour in case the external authorization service does - // not respond back. When it is set to true, Envoy will also allow traffic in case of - // communication failure between authorization service and the proxy. - // Defaults to false. - bool failure_mode_allow = 3; - - // Specifies if the peer certificate is sent to the external service. - // - // When this field is true, Envoy will include the peer X.509 certificate, if available, in the - // :ref:`certificate`. - bool include_peer_certificate = 4; - - // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and - // version of Check{Request,Response} used on the wire. - config.core.v3.ApiVersion transport_api_version = 5 - [(validate.rules).enum = {defined_only: true}]; - - // Specifies if the filter is enabled with metadata matcher. - // If this field is not specified, the filter will be enabled for all requests. - type.matcher.v3.MetadataMatcher filter_enabled_metadata = 6; - - // Optional labels that will be passed to :ref:`labels` in - // :ref:`destination`. - // The labels will be read from :ref:`metadata` with the specified key. - string bootstrap_metadata_labels_key = 7; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD deleted file mode 100644 index 456f4e9e61702..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/network/http_connection_manager/v2:pkg", - "//envoy/config/route/v3:pkg", - "//envoy/config/trace/v3:pkg", - "//envoy/type/http/v3:pkg", - "//envoy/type/tracing/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto deleted file mode 100644 index b5544eaa93b7c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ /dev/null @@ -1,1018 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.http_connection_manager.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/config/core/v3/protocol.proto"; -import "envoy/config/core/v3/substitution_format_string.proto"; -import "envoy/config/route/v3/route.proto"; -import "envoy/config/route/v3/scoped_route.proto"; -import "envoy/config/trace/v3/http_tracer.proto"; -import "envoy/type/http/v3/path_transformation.proto"; -import "envoy/type/tracing/v3/custom_tag.proto"; -import "envoy/type/v3/percent.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/security.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3"; -option java_outer_classname = "HttpConnectionManagerProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP connection manager] -// HTTP connection manager :ref:`configuration overview `. -// [#extension: envoy.filters.network.http_connection_manager] - -// [#next-free-field: 49] -message HttpConnectionManager { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; - - enum CodecType { - // For every new connection, the connection manager will determine which - // codec to use. This mode supports both ALPN for TLS listeners as well as - // protocol inference for plaintext listeners. If ALPN data is available, it - // is preferred, otherwise protocol inference is used. In almost all cases, - // this is the right option to choose for this setting. - AUTO = 0; - - // The connection manager will assume that the client is speaking HTTP/1.1. - HTTP1 = 1; - - // The connection manager will assume that the client is speaking HTTP/2 - // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. - // Prior knowledge is allowed). - HTTP2 = 2; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 3; - } - - enum ServerHeaderTransformation { - // Overwrite any Server header with the contents of server_name. - OVERWRITE = 0; - - // If no Server header is present, append Server server_name - // If a Server header is present, pass it through. - APPEND_IF_ABSENT = 1; - - // Pass through the value of the server header, and do not append a header - // if none is present. - PASS_THROUGH = 2; - } - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - enum ForwardClientCertDetails { - // Do not send the XFCC header to the next hop. This is the default value. - SANITIZE = 0; - - // When the client connection is mTLS (Mutual TLS), forward the XFCC header - // in the request. - FORWARD_ONLY = 1; - - // When the client connection is mTLS, append the client certificate - // information to the request’s XFCC header and forward it. - APPEND_FORWARD = 2; - - // When the client connection is mTLS, reset the XFCC header with the client - // certificate information and send it to the next hop. - SANITIZE_SET = 3; - - // Always forward the XFCC header in the request, regardless of whether the - // client connection is mTLS. - ALWAYS_FORWARD_ONLY = 4; - } - - // Determines the action for request that contain %2F, %2f, %5C or %5c sequences in the URI path. - // This operation occurs before URL normalization and the merge slashes transformations if they were enabled. - enum PathWithEscapedSlashesAction { - // Default behavior specific to implementation (i.e. Envoy) of this configuration option. - // Envoy, by default, takes the KEEP_UNCHANGED action. - // NOTE: the implementation may change the default behavior at-will. - IMPLEMENTATION_SPECIFIC_DEFAULT = 0; - - // Keep escaped slashes. - KEEP_UNCHANGED = 1; - - // Reject client request with the 400 status. gRPC requests will be rejected with the INTERNAL (13) error code. - // The "httpN.downstream_rq_failed_path_normalization" counter is incremented for each rejected request. - REJECT_REQUEST = 2; - - // Unescape %2F and %5C sequences and redirect request to the new path if these sequences were present. - // Redirect occurs after path normalization and merge slashes transformations if they were configured. - // NOTE: gRPC requests will be rejected with the INTERNAL (13) error code. - // This option minimizes possibility of path confusion exploits by forcing request with unescaped slashes to - // traverse all parties: downstream client, intermediate proxies, Envoy and upstream server. - // The "httpN.downstream_rq_redirected_with_normalized_path" counter is incremented for each - // redirected request. - UNESCAPE_AND_REDIRECT = 3; - - // Unescape %2F and %5C sequences. - // Note: this option should not be enabled if intermediaries perform path based access control as - // it may lead to path confusion vulnerabilities. - UNESCAPE_AND_FORWARD = 4; - } - - // [#next-free-field: 10] - message Tracing { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing"; - - enum OperationName { - // The HTTP listener is used for ingress/incoming requests. - INGRESS = 0; - - // The HTTP listener is used for egress/outgoing requests. - EGRESS = 1; - } - - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.v3.Percent client_sampling = 3; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.Percent random_sampling = 4; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.v3.Percent overall_sampling = 5; - - // Whether to annotate spans with additional data. If true, spans will include logs for stream - // events. - bool verbose = 6; - - // Maximum length of the request path to extract and include in the HttpUrl tag. Used to - // truncate lengthy request paths to meet the needs of a tracing backend. - // Default: 256 - google.protobuf.UInt32Value max_path_tag_length = 7; - - // A list of custom tags with unique tag name to create tags for the active span. - repeated type.tracing.v3.CustomTag custom_tags = 8; - - // Configuration for an external tracing provider. - // If not specified, no tracing will be performed. - // - // .. attention:: - // Please be aware that *envoy.tracers.opencensus* provider can only be configured once - // in Envoy lifetime. - // Any attempts to reconfigure it or to use different configurations for different HCM filters - // will be rejected. - // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes - // on OpenCensus side. - config.trace.v3.Tracing.Http provider = 9; - - OperationName hidden_envoy_deprecated_operation_name = 1 [ - deprecated = true, - (validate.rules).enum = {defined_only: true}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - - repeated string hidden_envoy_deprecated_request_headers_for_tags = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - message InternalAddressConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager." - "InternalAddressConfig"; - - // Whether unix socket addresses should be considered internal. - bool unix_sockets = 1; - } - - // [#next-free-field: 7] - message SetCurrentClientCertDetails { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager." - "SetCurrentClientCertDetails"; - - reserved 2; - - // Whether to forward the subject of the client cert. Defaults to false. - google.protobuf.BoolValue subject = 1; - - // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - // XFCC header comma separated from other values with the value Cert="PEM". - // Defaults to false. - bool cert = 3; - - // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - // format. This will appear in the XFCC header comma separated from other values with the value - // Chain="PEM". - // Defaults to false. - bool chain = 6; - - // Whether to forward the DNS type Subject Alternative Names of the client cert. - // Defaults to false. - bool dns = 4; - - // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - // false. - bool uri = 5; - } - - // The configuration for HTTP upgrades. - // For each upgrade type desired, an UpgradeConfig must be added. - // - // .. warning:: - // - // The current implementation of upgrade headers does not handle - // multi-valued upgrade headers. Support for multi-valued headers may be - // added in the future if needed. - // - // .. warning:: - // The current implementation of upgrade headers does not work with HTTP/2 - // upstreams. - message UpgradeConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager." - "UpgradeConfig"; - - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] - // will be proxied upstream. - string upgrade_type = 1; - - // If present, this represents the filter chain which will be created for - // this type of upgrade. If no filters are present, the filter chain for - // HTTP connections will be used for this upgrade type. - repeated HttpFilter filters = 2; - - // Determines if upgrades are enabled or disabled by default. Defaults to true. - // This can be overridden on a per-route basis with :ref:`cluster - // ` as documented in the - // :ref:`upgrade documentation `. - google.protobuf.BoolValue enabled = 3; - } - - // [#not-implemented-hide:] Transformations that apply to path headers. Transformations are applied - // before any processing of requests by HTTP filters, routing, and matching. Only the normalized - // path will be visible internally if a transformation is enabled. Any path rewrites that the - // router performs (e.g. :ref:`regex_rewrite - // ` or :ref:`prefix_rewrite - // `) will apply to the *:path* header - // destined for the upstream. - // - // Note: access logging and tracing will show the original *:path* header. - message PathNormalizationOptions { - // [#not-implemented-hide:] Normalization applies internally before any processing of requests by - // HTTP filters, routing, and matching *and* will affect the forwarded *:path* header. Defaults - // to :ref:`NormalizePathRFC3986 - // `. When not - // specified, this value may be overridden by the runtime variable - // :ref:`http_connection_manager.normalize_path`. - // Envoy will respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 - // normalization due to disallowed characters.) - type.http.v3.PathTransformation forwarding_transformation = 1; - - // [#not-implemented-hide:] Normalization only applies internally before any processing of - // requests by HTTP filters, routing, and matching. These will be applied after full - // transformation is applied. The *:path* header before this transformation will be restored in - // the router filter and sent upstream unless it was mutated by a filter. Defaults to no - // transformations. - // Multiple actions can be applied in the same Transformation, forming a sequential - // pipeline. The transformations will be performed in the order that they appear. Envoy will - // respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 - // normalization due to disallowed characters.) - type.http.v3.PathTransformation http_filter_transformation = 2; - } - - reserved 27; - - // Supplies the type of codec that the connection manager should use. - CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics for the - // connection manager. See the :ref:`statistics documentation ` for - // more information. - string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The connection manager’s route table will be dynamically loaded via the RDS API. - Rds rds = 3; - - // The route table for the connection manager is static and is specified in this property. - config.route.v3.RouteConfiguration route_config = 4; - - // A route table will be dynamically assigned to each request based on request attributes - // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - // specified in this message. - ScopedRoutes scoped_routes = 31; - } - - // A list of individual HTTP filters that make up the filter chain for - // requests made to the connection manager. :ref:`Order matters ` - // as the filters are processed sequentially as request events happen. - repeated HttpFilter http_filters = 5; - - // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - // documentation for more information. Defaults to false. - google.protobuf.BoolValue add_user_agent = 6; - - // Presence of the object defines whether the connection manager - // emits :ref:`tracing ` data to the :ref:`configured tracing provider - // `. - Tracing tracing = 7; - - // Additional settings for HTTP requests handled by the connection manager. These will be - // applicable to both HTTP1 and HTTP2 requests. - config.core.v3.HttpProtocolOptions common_http_protocol_options = 35 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Additional HTTP/1 settings that are passed to the HTTP/1 codec. - config.core.v3.Http1ProtocolOptions http_protocol_options = 8; - - // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - config.core.v3.Http2ProtocolOptions http2_protocol_options = 9 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. - // [#not-implemented-hide:] - config.core.v3.Http3ProtocolOptions http3_protocol_options = 44; - - // An optional override that the connection manager will write to the server - // header in responses. If not set, the default is *envoy*. - string server_name = 10 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Defines the action to be applied to the Server header on the response path. - // By default, Envoy will overwrite the header with the value specified in - // server_name. - ServerHeaderTransformation server_header_transformation = 34 - [(validate.rules).enum = {defined_only: true}]; - - // Allows for explicit transformation of the :scheme header on the request path. - // If not set, Envoy's default :ref:`scheme ` - // handling applies. - config.core.v3.SchemeHeaderTransformation scheme_header_transformation = 48; - - // The maximum request headers size for incoming connections. - // If unconfigured, the default max request headers allowed is 60 KiB. - // Requests that exceed this limit will receive a 431 response. - google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32 = {lte: 8192 gt: 0}]; - - // The stream idle timeout for connections managed by the connection manager. - // If not specified, this defaults to 5 minutes. The default value was selected - // so as not to interfere with any smaller configured timeouts that may have - // existed in configurations prior to the introduction of this feature, while - // introducing robustness to TCP connections that terminate without a FIN. - // - // This idle timeout applies to new streams and is overridable by the - // :ref:`route-level idle_timeout - // `. Even on a stream in - // which the override applies, prior to receipt of the initial request - // headers, the :ref:`stream_idle_timeout - // ` - // applies. Each time an encode/decode event for headers or data is processed - // for the stream, the timer will be reset. If the timeout fires, the stream - // is terminated with a 408 Request Timeout error code if no upstream response - // header has been received, otherwise a stream reset occurs. - // - // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough - // window to write any remaining stream data once the entirety of stream data (local end stream is - // true) has been buffered pending available window. In other words, this timeout defends against - // a peer that does not release enough window to completely write the stream, even though all - // data has been proxied within available flow control windows. If the timeout is hit in this - // case, the :ref:`tx_flush_timeout ` counter will be - // incremented. Note that :ref:`max_stream_duration - // ` does not apply to - // this corner case. - // - // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" - // is configured, this timeout is scaled according to the value for - // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. - // - // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due - // to the granularity of events presented to the connection manager. For example, while receiving - // very large request headers, it may be the case that there is traffic regularly arriving on the - // wire while the connection manage is only able to observe the end-of-headers event, hence the - // stream may still idle timeout. - // - // A value of 0 will completely disable the connection manager stream idle - // timeout, although per-route idle timeout overrides will continue to apply. - google.protobuf.Duration stream_idle_timeout = 24 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The amount of time that Envoy will wait for the entire request to be received. - // The timer is activated when the request is initiated, and is disarmed when the last byte of the - // request is sent upstream (i.e. all decoding filters have processed the request), OR when the - // response is initiated. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_timeout = 28 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The amount of time that Envoy will wait for the request headers to be received. The timer is - // activated when the first byte of the headers is received, and is disarmed when the last byte of - // the headers has been received. If not specified or set to 0, this timeout is disabled. - google.protobuf.Duration request_headers_timeout = 41 [ - (validate.rules).duration = {gte {}}, - (udpa.annotations.security).configure_for_untrusted_downstream = true - ]; - - // The time that Envoy will wait between sending an HTTP/2 “shutdown - // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - // This is used so that Envoy provides a grace period for new streams that - // race with the final GOAWAY frame. During this grace period, Envoy will - // continue to accept new streams. After the grace period, a final GOAWAY - // frame is sent and Envoy will start refusing new streams. Draining occurs - // both when a connection hits the idle timeout or during general server - // draining. The default grace period is 5000 milliseconds (5 seconds) if this - // option is not specified. - google.protobuf.Duration drain_timeout = 12; - - // The delayed close timeout is for downstream connections managed by the HTTP connection manager. - // It is defined as a grace period after connection close processing has been locally initiated - // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy - // from the downstream connection) prior to Envoy closing the socket associated with that - // connection. - // NOTE: This timeout is enforced even when the socket associated with the downstream connection - // is pending a flush of the write buffer. However, any progress made writing data to the socket - // will restart the timer associated with this timeout. This means that the total grace period for - // a socket in this state will be - // +. - // - // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close - // sequence mitigates a race condition that exists when downstream clients do not drain/process - // data in a connection's receive buffer after a remote close has been detected via a socket - // write(). This race leads to such clients failing to process the response code sent by Envoy, - // which could result in erroneous downstream processing. - // - // If the timeout triggers, Envoy will close the connection's socket. - // - // The default timeout is 1000 ms if this option is not specified. - // - // .. NOTE:: - // To be useful in avoiding the race condition described above, this timeout must be set - // to *at least* +<100ms to account for - // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. - // - // .. WARNING:: - // A value of 0 will completely disable delayed close processing. When disabled, the downstream - // connection's socket will be closed immediately after the write flush is completed or will - // never close if the write flush does not complete. - google.protobuf.Duration delayed_close_timeout = 26; - - // Configuration for :ref:`HTTP access logs ` - // emitted by the connection manager. - repeated config.accesslog.v3.AccessLog access_log = 13; - - // If set to true, the connection manager will use the real remote address - // of the client connection when determining internal versus external origin and manipulating - // various headers. If set to false or absent, the connection manager will use the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for`, - // :ref:`config_http_conn_man_headers_x-envoy-internal`, and - // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - google.protobuf.BoolValue use_remote_address = 14 - [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - - // The number of additional ingress proxy hops from the right side of the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when - // determining the origin client's IP address. The default is zero if this option - // is not specified. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - uint32 xff_num_trusted_hops = 19; - - // The configuration for the original IP detection extensions. - // - // When configured the extensions will be called along with the request headers - // and information about the downstream connection, such as the directly connected address. - // Each extension will then use these parameters to decide the request's effective remote address. - // If an extension fails to detect the original IP address and isn't configured to reject - // the request, the HCM will try the remaining extensions until one succeeds or rejects - // the request. If the request isn't rejected nor any extension succeeds, the HCM will - // fallback to using the remote address. - // - // .. WARNING:: - // Extensions cannot be used in conjunction with :ref:`use_remote_address - // ` - // nor :ref:`xff_num_trusted_hops - // `. - // - // [#extension-category: envoy.http.original_ip_detection] - repeated config.core.v3.TypedExtensionConfig original_ip_detection_extensions = 46; - - // Configures what network addresses are considered internal for stats and header sanitation - // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. - // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information about internal/external addresses. - InternalAddressConfig internal_address_config = 25; - - // If set, Envoy will not append the remote address to the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in - // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager - // has mutated the request headers. While :ref:`use_remote_address - // ` - // will also suppress XFF addition, it has consequences for logging and other - // Envoy uses of the remote address, so *skip_xff_append* should be used - // when only an elision of XFF addition is intended. - bool skip_xff_append = 21; - - // Via header value to append to request and response headers. If this is - // empty, no via header will be appended. - string via = 22 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; - - // Whether the connection manager will generate the :ref:`x-request-id - // ` header if it does not exist. This defaults to - // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature - // is not desired it can be disabled. - google.protobuf.BoolValue generate_request_id = 15; - - // Whether the connection manager will keep the :ref:`x-request-id - // ` header if passed for a request that is edge - // (Edge request is the request from external clients to front Envoy) and not reset it, which - // is the current Envoy behaviour. This defaults to false. - bool preserve_external_request_id = 32; - - // If set, Envoy will always set :ref:`x-request-id ` header in response. - // If this is false or not set, the request ID is returned in responses only if tracing is forced using - // :ref:`x-envoy-force-trace ` header. - bool always_set_request_id_in_response = 37; - - // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - // header. - ForwardClientCertDetails forward_client_cert_details = 16 - [(validate.rules).enum = {defined_only: true}]; - - // This field is valid only when :ref:`forward_client_cert_details - // ` - // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in - // the client certificate to be forwarded. Note that in the - // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and - // *By* is always set when the client certificate presents the URI type Subject Alternative Name - // value. - SetCurrentClientCertDetails set_current_client_cert_details = 17; - - // If proxy_100_continue is true, Envoy will proxy incoming "Expect: - // 100-continue" headers upstream, and forward "100 Continue" responses - // downstream. If this is false or not set, Envoy will instead strip the - // "Expect: 100-continue" header, and send a "100 Continue" response itself. - bool proxy_100_continue = 18; - - // If - // :ref:`use_remote_address - // ` - // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is - // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. - // This is useful for testing compatibility of upstream services that parse the header value. For - // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses - // `_ for details. This will also affect the - // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See - // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - // ` for runtime - // control. - // [#not-implemented-hide:] - bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; - - repeated UpgradeConfig upgrade_configs = 23; - - // Should paths be normalized according to RFC 3986 before any processing of - // requests by HTTP filters or routing? This affects the upstream *:path* header - // as well. For paths that fail this check, Envoy will respond with 400 to - // paths that are malformed. This defaults to false currently but will default - // true in the future. When not specified, this value may be overridden by the - // runtime variable - // :ref:`http_connection_manager.normalize_path`. - // See `Normalization and Comparison `_ - // for details of normalization. - // Note that Envoy does not perform - // `case normalization `_ - google.protobuf.BoolValue normalize_path = 30; - - // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - // `HTTP spec `_ and is provided for convenience. - bool merge_slashes = 33; - - // Action to take when request URL path contains escaped slash sequences (%2F, %2f, %5C and %5c). - // The default value can be overridden by the :ref:`http_connection_manager.path_with_escaped_slashes_action` - // runtime variable. - // The :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime - // variable can be used to apply the action to a portion of all requests. - PathWithEscapedSlashesAction path_with_escaped_slashes_action = 45; - - // The configuration of the request ID extension. This includes operations such as - // generation, validation, and associated tracing operations. If empty, the - // :ref:`UuidRequestIdConfig ` - // default extension is used with default parameters. See the documentation for that extension - // for details on what it does. Customizing the configuration for the default extension can be - // achieved by configuring it explicitly here. For example, to disable trace reason packing, - // the following configuration can be used: - // - // .. validated-code-block:: yaml - // :type-name: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension - // - // typed_config: - // "@type": type.googleapis.com/envoy.extensions.request_id.uuid.v3.UuidRequestIdConfig - // pack_trace_reason: false - // - // [#extension-category: envoy.request_id] - RequestIDExtension request_id_extension = 36; - - // The configuration to customize local reply returned by Envoy. It can customize status code, - // body text and response content type. If not specified, status code and text body are hard - // coded in Envoy, the response content type is plain text. - LocalReplyConfig local_reply_config = 38; - - // Determines if the port part should be removed from host/authority header before any processing - // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` - // local port. This affects the upstream host header unless the method is - // CONNECT in which case if no filter adds a port the original port will be restored before headers are - // sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. - bool strip_matching_host_port = 39 - [(udpa.annotations.field_migrate).oneof_promotion = "strip_port_mode"]; - - oneof strip_port_mode { - // Determines if the port part should be removed from host/authority header before any processing - // of request by HTTP filters or routing. - // This affects the upstream host header unless the method is CONNECT in - // which case if no filter adds a port the original port will be restored before headers are sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part - // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. - bool strip_any_host_port = 42; - } - - // Governs Envoy's behavior when receiving invalid HTTP from downstream. - // If this option is false (default), Envoy will err on the conservative side handling HTTP - // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. - // If this option is set to true, Envoy will be more permissive, only resetting the invalid - // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire - // request is read for HTTP/1.1) - // In general this should be true for deployments receiving trusted traffic (L2 Envoys, - // company-internal mesh) and false when receiving untrusted traffic (edge deployments). - // - // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are - // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message - // ` or the new HTTP/2 option - // :ref:`override_stream_error_on_invalid_http_message - // ` - // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging - // ` - google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; - - // [#not-implemented-hide:] Path normalization configuration. This includes - // configurations for transformations (e.g. RFC 3986 normalization or merge - // adjacent slashes) and the policy to apply them. The policy determines - // whether transformations affect the forwarded *:path* header. RFC 3986 path - // normalization is enabled by default and the default policy is that the - // normalized header will be forwarded. See :ref:`PathNormalizationOptions - // ` - // for details. - PathNormalizationOptions path_normalization_options = 43; - - // Determines if trailing dot of the host should be removed from host/authority header before any - // processing of request by HTTP filters or routing. - // This affects the upstream host header. - // Without setting this option, incoming requests with host `example.com.` will not match against - // route with :ref:`domains` match set to `example.com`. Defaults to `false`. - // When the incoming request contains a host/authority header that includes a port number, - // setting this option will strip a trailing dot, if present, from the host section, - // leaving the port as is (e.g. host value `example.com.:443` will be updated to `example.com:443`). - bool strip_trailing_host_dot = 47; - - google.protobuf.Duration hidden_envoy_deprecated_idle_timeout = 11 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// The configuration to customize local reply returned by Envoy. -message LocalReplyConfig { - // Configuration of list of mappers which allows to filter and change local response. - // The mappers will be checked by the specified order until one is matched. - repeated ResponseMapper mappers = 1; - - // The configuration to form response body from the :ref:`command operators ` - // and to specify response content type as one of: plain/text or application/json. - // - // Example one: "plain/text" ``body_format``. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" - // - // The following response body in "plain/text" format will be generated for a request with - // local reply body of "upstream connection error", response_code=503 and path=/foo. - // - // .. code-block:: text - // - // upstream connect error:503:path=/foo - // - // Example two: "application/json" ``body_format``. - // - // .. validated-code-block:: yaml - // :type-name: envoy.config.core.v3.SubstitutionFormatString - // - // json_format: - // status: "%RESPONSE_CODE%" - // message: "%LOCAL_REPLY_BODY%" - // path: "%REQ(:path)%" - // - // The following response body in "application/json" format would be generated for a request with - // local reply body of "upstream connection error", response_code=503 and path=/foo. - // - // .. code-block:: json - // - // { - // "status": 503, - // "message": "upstream connection error", - // "path": "/foo" - // } - // - config.core.v3.SubstitutionFormatString body_format = 2; -} - -// The configuration to filter and change local response. -// [#next-free-field: 6] -message ResponseMapper { - // Filter to determine if this mapper should apply. - config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; - - // The new response status code if specified. - google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; - - // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` - // command operator in the `body_format`. - config.core.v3.DataSource body = 3; - - // A per mapper `body_format` to override the :ref:`body_format `. - // It will be used when this mapper is matched. - config.core.v3.SubstitutionFormatString body_format_override = 4; - - // HTTP headers to add to a local reply. This allows the response mapper to append, to add - // or to override headers of any local reply before it is sent to a downstream client. - repeated config.core.v3.HeaderValueOption headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; -} - -message Rds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.Rds"; - - // Configuration source specifier for RDS. - config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; - - // The name of the route configuration. This name will be passed to the RDS - // API. This allows an Envoy configuration with multiple HTTP listeners (and - // associated HTTP connection manager filters) to use different route - // configurations. - string route_config_name = 2; -} - -// This message is used to work around the limitations with 'oneof' and repeated fields. -message ScopedRouteConfigurationsList { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRouteConfigurationsList"; - - repeated config.route.v3.ScopedRouteConfiguration scoped_route_configurations = 1 - [(validate.rules).repeated = {min_items: 1}]; -} - -// [#next-free-field: 6] -message ScopedRoutes { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes"; - - // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These - // keys are matched against a set of :ref:`Key` - // objects assembled from :ref:`ScopedRouteConfiguration` - // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via - // :ref:`scoped_route_configurations_list`. - // - // Upon receiving a request's headers, the Router will build a key using the algorithm specified - // by this message. This key will be used to look up the routing table (i.e., the - // :ref:`RouteConfiguration`) to use for the request. - message ScopeKeyBuilder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder"; - - // Specifies the mechanism for constructing key fragments which are composed into scope keys. - message FragmentBuilder { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder." - "FragmentBuilder"; - - // Specifies how the value of a header should be extracted. - // The following example maps the structure of a header to the fields in this message. - // - // .. code:: - // - // <0> <1> <-- index - // X-Header: a=b;c=d - // | || | - // | || \----> - // | || - // | |\----> - // | | - // | \----> - // | - // \----> - // - // Each 'a=b' key-value pair constitutes an 'element' of the header field. - message HeaderValueExtractor { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder." - "FragmentBuilder.HeaderValueExtractor"; - - // Specifies a header field's key value pair to match on. - message KvElement { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder." - "FragmentBuilder.HeaderValueExtractor.KvElement"; - - // The separator between key and value (e.g., '=' separates 'k=v;...'). - // If an element is an empty string, the element is ignored. - // If an element contains no separator, the whole element is parsed as key and the - // fragment value is an empty string. - // If there are multiple values for a matched key, the first value is returned. - string separator = 1 [(validate.rules).string = {min_len: 1}]; - - // The key to match on. - string key = 2 [(validate.rules).string = {min_len: 1}]; - } - - // The name of the header field to extract the value from. - // - // .. note:: - // - // If the header appears multiple times only the first value is used. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The element separator (e.g., ';' separates 'a;b;c;d'). - // Default: empty string. This causes the entirety of the header field to be extracted. - // If this field is set to an empty string and 'index' is used in the oneof below, 'index' - // must be set to 0. - string element_separator = 2; - - oneof extract_type { - // Specifies the zero based index of the element to extract. - // Note Envoy concatenates multiple values of the same header key into a comma separated - // string, the splitting always happens after the concatenation. - uint32 index = 3; - - // Specifies the key value pair to extract the value from. - KvElement element = 4; - } - } - - oneof type { - option (validate.required) = true; - - // Specifies how a header field's value should be extracted. - HeaderValueExtractor header_value_extractor = 1; - } - } - - // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the - // fragments of a :ref:`ScopedRouteConfiguration`. - // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. - repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The name assigned to the scoped routing configuration. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // The algorithm to use for constructing a scope key for each request. - ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; - - // Configuration source specifier for RDS. - // This config source is used to subscribe to RouteConfiguration resources specified in - // ScopedRouteConfiguration messages. - config.core.v3.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; - - oneof config_specifier { - option (validate.required) = true; - - // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by - // matching a key constructed from the request's attributes according to the algorithm specified - // by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRouteConfigurationsList scoped_route_configurations_list = 4; - - // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS - // API. A scope is assigned to a request by matching a key constructed from the request's - // attributes according to the algorithm specified by the - // :ref:`ScopeKeyBuilder` - // in this message. - ScopedRds scoped_rds = 5; - } -} - -message ScopedRds { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.ScopedRds"; - - // Configuration source specifier for scoped RDS. - config.core.v3.ConfigSource scoped_rds_config_source = 1 - [(validate.rules).message = {required: true}]; - - // xdstp:// resource locator for scoped RDS collection. - // [#not-implemented-hide:] - string srds_resources_locator = 2; -} - -// [#next-free-field: 7] -message HttpFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.HttpFilter"; - - reserved 3; - - // The name of the filter configuration. The name is used as a fallback to - // select an extension if the type of the configuration proto is not - // sufficient. It also serves as a resource name in ExtensionConfigDS. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - oneof config_type { - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - // - // To support configuring a :ref:`match tree `, use an - // :ref:`ExtensionWithMatcher ` - // with the desired HTTP filter. - // [#extension-category: envoy.filters.http] - google.protobuf.Any typed_config = 4; - - // Configuration source specifier for an extension configuration discovery service. - // In case of a failure and without the default configuration, the HTTP listener responds with code 500. - // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). - // - // To support configuring a :ref:`match tree `, use an - // :ref:`ExtensionWithMatcher ` - // with the desired HTTP filter. This works for both the default filter configuration as well - // as for filters provided via the API. - config.core.v3.ExtensionConfigSource config_discovery = 5; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // If true, clients that do not support this filter may ignore the - // filter but otherwise accept the config. - // Otherwise, clients that do not support this filter must reject the config. - // This is also same with typed per filter config. - bool is_optional = 6; -} - -message RequestIDExtension { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.http_connection_manager.v2.RequestIDExtension"; - - // Request ID extension specific configuration. - google.protobuf.Any typed_config = 1; -} - -// [#protodoc-title: Envoy Mobile HTTP connection manager] -// HTTP connection manager for use in Envoy mobile. -// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] -message EnvoyMobileHttpConnectionManager { - // The configuration for the underlying HttpConnectionManager which will be - // instantiated for Envoy mobile. - HttpConnectionManager config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/BUILD deleted file mode 100644 index ad2fc9a9a84fd..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto deleted file mode 100644 index 3ee3655b7c3c9..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.local_ratelimit.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/token_bucket.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.local_ratelimit.v3"; -option java_outer_classname = "LocalRateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Local rate limit] -// Local rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.local_ratelimit] - -message LocalRateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.local_rate_limit.v2alpha.LocalRateLimit"; - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The token bucket configuration to use for rate limiting connections that are processed by the - // filter's filter chain. Each incoming connection processed by the filter consumes a single - // token. If the token is available, the connection will be allowed. If no tokens are available, - // the connection will be immediately closed. - // - // .. note:: - // In the current implementation each filter and filter chain has an independent rate limit. - // - // .. note:: - // In the current implementation the token bucket's :ref:`fill_interval - // ` must be >= 50ms to avoid too aggressive - // refills. - type.v3.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}]; - - // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults - // to enabled. - config.core.v3.RuntimeFeatureFlag runtime_enabled = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/BUILD deleted file mode 100644 index d399b876a7f43..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/filters/common/fault/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto deleted file mode 100644 index ebdfb6f2fcc0c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.mongo_proxy.v3; - -import "envoy/extensions/filters/common/fault/v3/fault.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.mongo_proxy.v3"; -option java_outer_classname = "MongoProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Mongo proxy] -// MongoDB :ref:`configuration overview `. -// [#extension: envoy.filters.network.mongo_proxy] - -// [#next-free-field: 6] -message MongoProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.mongo_proxy.v2.MongoProxy"; - - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The optional path to use for writing Mongo access logs. If not access log - // path is specified no access logs will be written. Note that access log is - // also gated :ref:`runtime `. - string access_log = 2; - - // Inject a fixed delay before proxying a Mongo operation. Delays are - // applied to the following MongoDB operations: Query, Insert, GetMore, - // and KillCursors. Once an active delay is in progress, all incoming - // data up until the timer event fires will be a part of the delay. - common.fault.v3.FaultDelay delay = 3; - - // Flag to specify whether :ref:`dynamic metadata - // ` should be emitted. Defaults to false. - bool emit_dynamic_metadata = 4; - - // List of commands to emit metrics for. Defaults to "delete", "insert", and "update". - // Note that metrics will not be emitted for "find" commands, since those are considered - // queries, and metrics for those are emitted under a dedicated "query" namespace. - repeated string commands = 5; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/BUILD deleted file mode 100644 index 9276f5ab3d2dd..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v3:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto deleted file mode 100644 index 2fcdda846b6af..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.ratelimit.v3; - -import "envoy/config/ratelimit/v3/rls.proto"; -import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.ratelimit.v3"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.network.ratelimit] - -// [#next-free-field: 7] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.rate_limit.v2.RateLimit"; - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The rate limit domain to use in the rate limit service request. - string domain = 2 [(validate.rules).string = {min_len: 1}]; - - // The rate limit descriptor list to use in the rate limit service request. - repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3 - [(validate.rules).repeated = {min_items: 1}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 4; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 5; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 6 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/BUILD deleted file mode 100644 index fd183569e5a1e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/rbac/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto b/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto deleted file mode 100644 index 4d1ff296fa4aa..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/rbac/v3/rbac.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.rbac.v3; - -import "envoy/config/rbac/v3/rbac.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v3"; -option java_outer_classname = "RbacProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: RBAC] -// Role-Based Access Control :ref:`configuration overview `. -// [#extension: envoy.filters.network.rbac] - -// RBAC network filter config. -// -// Header should not be used in rules/shadow_rules in RBAC network filter as -// this information is only available in :ref:`RBAC http filter `. -// [#next-free-field: 6] -message RBAC { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.rbac.v2.RBAC"; - - enum EnforcementType { - // Apply RBAC policies when the first byte of data arrives on the connection. - ONE_TIME_ON_FIRST_BYTE = 0; - - // Continuously apply RBAC policies as data arrives. Use this mode when - // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, - // etc. when the protocol decoders emit dynamic metadata such as the - // resources being accessed and the operations on the resources. - CONTINUOUS = 1; - } - - // Specify the RBAC rules to be applied globally. - // If absent, no enforcing RBAC policy will be applied. - // If present and empty, DENY. - config.rbac.v3.RBAC rules = 1; - - // Shadow rules are not enforced by the filter but will emit stats and logs - // and can be used for rule testing. - // If absent, no shadow RBAC policy will be applied. - config.rbac.v3.RBAC shadow_rules = 2; - - // If specified, shadow rules will emit stats with the given prefix. - // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with - // shadow rules. - string shadow_rules_stat_prefix = 5; - - // The prefix to use when emitting statistics. - string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; - - // RBAC enforcement strategy. By default RBAC will be enforced only once - // when the first byte of data arrives from the downstream. When used in - // conjunction with filters that emit dynamic metadata after decoding - // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to - // CONTINUOUS to enforce RBAC policies on every message boundary. - EnforcementType enforcement_type = 4; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/BUILD deleted file mode 100644 index cc70c42fc4eb0..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/network/redis_proxy/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto deleted file mode 100644 index 2df7c3e3f6104..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ /dev/null @@ -1,324 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.redis_proxy.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.redis_proxy.v3"; -option java_outer_classname = "RedisProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Redis Proxy] -// Redis Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.redis_proxy] - -// [#next-free-field: 9] -message RedisProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; - - // Redis connection pool settings. - // [#next-free-field: 9] - message ConnPoolSettings { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings"; - - // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently - // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data - // because replication is asynchronous and requires some delay. You need to ensure that your - // application can tolerate stale data. - enum ReadPolicy { - // Default mode. Read from the current primary node. - MASTER = 0 [(udpa.annotations.enum_value_migrate).rename = "PRIMARY"]; - - // Read from the primary, but if it is unavailable, read from replica nodes. - PREFER_MASTER = 1 [(udpa.annotations.enum_value_migrate).rename = "PREFER_PRIMARY"]; - - // Read from replica nodes. If multiple replica nodes are present within a shard, a random - // node is selected. Healthy nodes have precedent over unhealthy nodes. - REPLICA = 2; - - // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not - // present or unhealthy), read from the primary. - PREFER_REPLICA = 3; - - // Read from any node of the cluster. A random node is selected among the primary and - // replicas, healthy nodes have precedent over unhealthy nodes. - ANY = 4; - } - - // Per-operation timeout in milliseconds. The timer starts when the first - // command of a pipeline is written to the backend connection. Each response received from Redis - // resets the timer since it signifies that the next command is being processed by the backend. - // The only exception to this behavior is when a connection to a backend is not yet established. - // In that case, the connect timeout on the cluster will govern the timeout until the connection - // is ready. - google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}]; - - // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be - // forwarded to the same upstream. The hash key used for determining the upstream in a - // consistent hash ring configuration will be computed from the hash tagged key instead of the - // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster - // implementation `_. - // - // Examples: - // - // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream - // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream - bool enable_hashtagging = 2; - - // Accept `moved and ask redirection - // `_ errors from upstream - // redis servers, and retry commands to the specified target server. The target server does not - // need to be known to the cluster manager. If the command cannot be redirected, then the - // original error is passed downstream unchanged. By default, this support is not enabled. - bool enable_redirection = 3; - - // Maximum size of encoded request buffer before flush is triggered and encoded requests - // are sent upstream. If this is unset, the buffer flushes whenever it receives data - // and performs no batching. - // This feature makes it possible for multiple clients to send requests to Envoy and have - // them batched- for example if one is running several worker processes, each with its own - // Redis connection. There is no benefit to using this with a single downstream process. - // Recommended size (if enabled) is 1024 bytes. - uint32 max_buffer_size_before_flush = 4; - - // The encoded request buffer is flushed N milliseconds after the first request has been - // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`. - // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise, - // the timer should be set according to the number of clients, overall request rate and - // desired maximum latency for a single command. For example, if there are many requests - // being batched together at a high rate, the buffer will likely be filled before the timer - // fires. Alternatively, if the request rate is lower the buffer will not be filled as often - // before the timer fires. - // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter - // defaults to 3ms. - google.protobuf.Duration buffer_flush_timeout = 5; - - // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts - // can be created at any given time by any given worker thread (see `enable_redirection` for - // more details). If the host is unknown and a connection cannot be created due to enforcing - // this limit, then redirection will fail and the original redirection error will be passed - // downstream unchanged. This limit defaults to 100. - google.protobuf.UInt32Value max_upstream_unknown_connections = 6; - - // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate - // count. These commands are measured in microseconds. - bool enable_command_stats = 8; - - // Read policy. The default is to read from the primary. - ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; - } - - message PrefixRoutes { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes"; - - message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route"; - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - message RequestMirrorPolicy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route." - "RequestMirrorPolicy"; - - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // If not specified or the runtime key is not present, all requests to the target cluster - // will be mirrored. - // - // If specified, Envoy will lookup the runtime key to get the percentage of requests to the - // mirror. - config.core.v3.RuntimeFractionalPercent runtime_fraction = 2; - - // Set this to TRUE to only mirror write commands, this is effectively replicating the - // writes in a "fire and forget" manner. - bool exclude_read_commands = 3; - } - - // String prefix that must match the beginning of the keys. Envoy will always favor the - // longest match. - string prefix = 1 [(validate.rules).string = {max_bytes: 1000}]; - - // Indicates if the prefix needs to be removed from the key when forwarded. - bool remove_prefix = 2; - - // Upstream cluster to forward the command to. - string cluster = 3 [(validate.rules).string = {min_len: 1}]; - - // Indicates that the route has a request mirroring policy. - repeated RequestMirrorPolicy request_mirror_policy = 4; - } - - // List of prefix routes. - repeated Route routes = 1; - - // Indicates that prefix matching should be case insensitive. - bool case_insensitive = 2; - - // Optional catch-all route to forward commands that doesn't match any of the routes. The - // catch-all route becomes required when no routes are specified. - Route catch_all_route = 4; - - string hidden_envoy_deprecated_catch_all_cluster = 3 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // RedisFault defines faults used for fault injection. - message RedisFault { - enum RedisFaultType { - // Delays requests. This is the base fault; other faults can have delays added. - DELAY = 0; - - // Returns errors on requests. - ERROR = 1; - } - - // Fault type. - RedisFaultType fault_type = 1 [(validate.rules).enum = {defined_only: true}]; - - // Percentage of requests fault applies to. - config.core.v3.RuntimeFractionalPercent fault_enabled = 2 - [(validate.rules).message = {required: true}]; - - // Delay for all faults. If not set, defaults to zero - google.protobuf.Duration delay = 3; - - // Commands fault is restricted to, if any. If not set, fault applies to all commands - // other than auth and ping (due to special handling of those commands in Envoy). - repeated string commands = 4; - } - - // The prefix to use when emitting :ref:`statistics `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Network settings for the connection pool to the upstream clusters. - ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; - - // Indicates that latency stat should be computed in microseconds. By default it is computed in - // milliseconds. This does not apply to upstream command stats currently. - bool latency_in_micros = 4; - - // List of **unique** prefixes used to separate keys from different workloads to different - // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all - // cluster can be used to forward commands when there is no match. Time complexity of the - // lookups are in O(min(longest key prefix, key length)). - // - // Example: - // - // .. code-block:: yaml - // - // prefix_routes: - // routes: - // - prefix: "ab" - // cluster: "cluster_a" - // - prefix: "abc" - // cluster: "cluster_b" - // - // When using the above routes, the following prefixes would be sent to: - // - // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b. - // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a. - // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all - // route` - // would have retrieved the key from that cluster instead. - // - // See the :ref:`configuration section - // ` of the architecture overview for recommendations on - // configuring the backing clusters. - PrefixRoutes prefix_routes = 5; - - // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis - // AUTH command `_ with this password before enabling any other - // command. If an AUTH command's password matches this password, an "OK" response will be returned - // to the client. If the AUTH command password does not match this password, then an "ERR invalid - // password" error will be returned. If any other command is received before AUTH when this - // password is set, then a "NOAUTH Authentication required." error response will be sent to the - // client. If an AUTH command is received when the password is not set, then an "ERR Client sent - // AUTH, but no password is set" error will be returned. - config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; - - // List of faults to inject. Faults currently come in two flavors: - // - Delay, which delays a request. - // - Error, which responds to a request with an error. Errors can also have delays attached. - // - // Example: - // - // .. code-block:: yaml - // - // faults: - // - fault_type: ERROR - // fault_enabled: - // default_value: - // numerator: 10 - // denominator: HUNDRED - // runtime_key: "bogus_key" - // commands: - // - GET - // - fault_type: DELAY - // fault_enabled: - // default_value: - // numerator: 10 - // denominator: HUNDRED - // runtime_key: "bogus_key" - // delay: 2s - // - // See the :ref:`fault injection section - // ` for more information on how to configure this. - repeated RedisFault faults = 8; - - // If a username is provided an ACL style AUTH command will be required with a username and password. - // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis - // AUTH command `_ with this username and the *downstream_auth_password* - // before enabling any other command. If an AUTH command's username and password matches this username - // and the *downstream_auth_password* , an "OK" response will be returned to the client. If the AUTH - // command username or password does not match this username or the *downstream_auth_password*, then an - // "WRONGPASS invalid username-password pair" error will be returned. If any other command is received before AUTH when this - // password is set, then a "NOAUTH Authentication required." error response will be sent to the - // client. If an AUTH command is received when the password is not set, then an "ERR Client sent - // AUTH, but no ACL is set" error will be returned. - config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true]; - - string hidden_envoy_deprecated_cluster = 2 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; -} - -// RedisProtocolOptions specifies Redis upstream protocol options. This object is used in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.redis_proxy`. -message RedisProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions"; - - // Upstream server password as defined by the `requirepass` directive - // `_ in the server's configuration file. - config.core.v3.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; - - // Upstream server username as defined by the `user` directive - // `_ in the server's configuration file. - config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto b/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto deleted file mode 100644 index 3d6f0ee234abb..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.sni_cluster.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_cluster.v3"; -option java_outer_classname = "SniClusterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SNI Cluster Filter] -// Set the upstream cluster name from the SNI field in the TLS connection. -// [#extension: envoy.filters.network.sni_cluster] - -message SniCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.sni_cluster.v2.SniCluster"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD deleted file mode 100644 index 05f25a2fe5d91..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto deleted file mode 100644 index 7f7eb57d5be64..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha; - -import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha"; -option java_outer_classname = "SniDynamicForwardProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SNI dynamic forward proxy] - -// Configuration for the SNI-based dynamic forward proxy filter. See the -// :ref:`architecture overview ` for -// more information. Note this filter must be configured along with -// :ref:`TLS inspector listener filter ` -// to work. -// [#extension: envoy.filters.network.sni_dynamic_forward_proxy] -message FilterConfig { - // The DNS cache configuration that the filter will attach to. Note this - // configuration must match that of associated :ref:`dynamic forward proxy - // cluster configuration - // `. - common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 - [(validate.rules).message = {required: true}]; - - oneof port_specifier { - // The port number to connect to the upstream. - uint32 port_value = 2 [(validate.rules).uint32 = {lte: 65535 gt: 0}]; - } -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/BUILD deleted file mode 100644 index d317ad9266de3..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/accesslog/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/network/tcp_proxy/v2:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto deleted file mode 100644 index f00298a3edd4e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto +++ /dev/null @@ -1,179 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.tcp_proxy.v3; - -import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/type/v3/hash_policy.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v3"; -option java_outer_classname = "TcpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: TCP Proxy] -// TCP Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.tcp_proxy] - -// [#next-free-field: 14] -message TcpProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy"; - - // Allows for specification of multiple upstream clusters along with weights - // that indicate the percentage of traffic to be forwarded to each cluster. - // The router selects an upstream cluster based on these weights. - message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster"; - - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight"; - - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When a request matches the route, the choice of an upstream cluster is - // determined by its weight. The sum of weights across all entries in the - // clusters array determines the total weight. - uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what is set in this field will be considered - // for load balancing. Note that this will be merged with what's provided in - // :ref:`TcpProxy.metadata_match - // `, with values - // here taking precedence. The filter name should be specified as *envoy.lb*. - config.core.v3.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // Configuration for tunneling TCP over other transports or application layers. - // Tunneling is supported over both HTTP/1.1 and HTTP/2. Upstream protocol is - // determined by the cluster configuration. - message TunnelingConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.TunnelingConfig"; - - // The hostname to send in the synthesized CONNECT headers to the upstream proxy. - string hostname = 1 [(validate.rules).string = {min_len: 1}]; - - // Use POST method instead of CONNECT method to tunnel the TCP stream. - // The 'protocol: bytestream' header is also NOT set for HTTP/2 to comply with the spec. - // - // The upstream proxy is expected to convert POST payload as raw TCP. - bool use_post = 2; - - // Additional request headers to upstream proxy. This is mainly used to - // trigger upstream to convert POST requests back to CONNECT requests. - // - // Neither *:-prefixed* pseudo-headers nor the Host: header can be overridden. - repeated config.core.v3.HeaderValueOption headers_to_add = 3 - [(validate.rules).repeated = {max_items: 1000}]; - } - - message DeprecatedV1 { - option deprecated = true; - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1"; - - // [#next-free-field: 6] - message TCPRoute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.DeprecatedV1.TCPRoute"; - - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - repeated config.core.v3.CidrRange destination_ip_list = 2; - - string destination_ports = 3; - - repeated config.core.v3.CidrRange source_ip_list = 4; - - string source_ports = 5; - } - - repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - oneof cluster_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 10; - } - - // Optional endpoint metadata match criteria. Only endpoints in the upstream - // cluster with metadata matching that set in metadata_match will be - // considered. The filter name should be specified as *envoy.lb*. - config.core.v3.Metadata metadata_match = 9; - - // The idle timeout for connections managed by the TCP proxy filter. The idle timeout - // is defined as the period in which there are no bytes sent or received on either - // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set - // to 0s, the timeout will be disabled. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - google.protobuf.Duration idle_timeout = 8; - - // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy - // filter. The idle timeout is defined as the period in which there is no - // active traffic. If not set, there is no idle timeout. When the idle timeout - // is reached the connection will be closed. The distinction between - // downstream_idle_timeout/upstream_idle_timeout provides a means to set - // timeout based on the last byte sent on the downstream/upstream connection. - google.protobuf.Duration downstream_idle_timeout = 3; - - // [#not-implemented-hide:] - google.protobuf.Duration upstream_idle_timeout = 4; - - // Configuration for :ref:`access logs ` - // emitted by the this tcp_proxy. - repeated config.accesslog.v3.AccessLog access_log = 5; - - // The maximum number of unsuccessful connection attempts that will be made before - // giving up. If the parameter is not specified, 1 connection attempt will be made. - google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based - // load balancing algorithms will select a host randomly. Currently the number of hash policies is - // limited to 1. - repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; - - // If set, this configures tunneling, e.g. configuration options to tunnel TCP payload over - // HTTP CONNECT. If this message is absent, the payload will be proxied upstream as per usual. - TunnelingConfig tunneling_config = 12; - - // The maximum duration of a connection. The duration is defined as the period since a connection - // was established. If not set, there is no max duration. When max_downstream_connection_duration - // is reached the connection will be closed. Duration must be at least 1ms. - google.protobuf.Duration max_downstream_connection_duration = 13 - [(validate.rules).duration = {gte {nanos: 1000000}}]; - - DeprecatedV1 hidden_envoy_deprecated_deprecated_v1 = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD deleted file mode 100644 index 0bad14913d217..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/ratelimit/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto deleted file mode 100644 index 8583bbe4b468c..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3; - -import "envoy/config/ratelimit/v3/rls.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; -option java_outer_classname = "RateLimitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit] -// Rate limit :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.ratelimit] - -// [#next-free-field: 6] -message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.thrift.rate_limit.v2alpha1.RateLimit"; - - // The rate limit domain to use in the rate limit service request. - string domain = 1 [(validate.rules).string = {min_len: 1}]; - - // Specifies the rate limit configuration stage. Each configured rate limit filter performs a - // rate limit check using descriptors configured in the - // :ref:`envoy_v3_api_msg_extensions.filters.network.thrift_proxy.v3.RouteAction` for the request. - // Only those entries with a matching stage number are used for a given filter. If not set, the - // default stage number is 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; - - // The timeout in milliseconds for the rate limit service RPC. If not - // set, this defaults to 20ms. - google.protobuf.Duration timeout = 3; - - // The filter's behaviour in case the rate limiting service does - // not respond back. When it is set to true, Envoy will not allow traffic in case of - // communication failure between rate limiting service and the proxy. - // Defaults to false. - bool failure_mode_deny = 4; - - // Configuration for an external rate limit service provider. If not - // specified, any calls to the rate limit service will immediately return - // success. - config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 5 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/BUILD deleted file mode 100644 index c24f669b9bbde..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/filter/thrift/router/v2alpha1:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/router.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/router.proto deleted file mode 100644 index 860622cb61e42..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/router/v3/router.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.router.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.router.v3"; -option java_outer_classname = "RouterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Router] -// Thrift router :ref:`configuration overview `. -// [#extension: envoy.filters.thrift.router] - -message Router { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.thrift.router.v2alpha1.Router"; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/BUILD deleted file mode 100644 index cdb143507f644..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", - "//envoy/config/route/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto deleted file mode 100644 index b79c9bc9619ea..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/route.proto +++ /dev/null @@ -1,183 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/route/v3/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; -option java_outer_classname = "RouteProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Thrift Proxy Route Configuration] -// Thrift Proxy :ref:`configuration overview `. - -message RouteConfiguration { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteConfiguration"; - - // The name of the route configuration. Reserved for future use in asynchronous route discovery. - string name = 1; - - // The list of routes that will be matched, in order, against incoming requests. The first route - // that matches will be used. - repeated Route routes = 2; -} - -message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.Route"; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - // Route request to some upstream cluster. - RouteAction route = 2 [(validate.rules).message = {required: true}]; -} - -message RouteMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteMatch"; - - oneof match_specifier { - option (validate.required) = true; - - // If specified, the route must exactly match the request method name. As a special case, an - // empty string matches any request method name. - string method_name = 1; - - // If specified, the route must have the service name as the request method name prefix. As a - // special case, an empty string matches any service name. Only relevant when service - // multiplexing. - string service_name = 2; - } - - // Inverts whatever matching is done in the :ref:`method_name - // ` or - // :ref:`service_name - // ` fields. - // Cannot be combined with wildcard matching as that would result in routes never being matched. - // - // .. note:: - // - // This does not invert matching done as part of the :ref:`headers field - // ` field. To - // invert header matching, see :ref:`invert_match - // `. - bool invert = 3; - - // Specifies a set of headers that the route should match on. The router will check the request’s - // headers against all the specified headers in the route config. A match will happen if all the - // headers in the route are present in the request with the same values (or based on presence if - // the value field is not in the config). Note that this only applies for Thrift transports and/or - // protocols that support headers. - repeated config.route.v3.HeaderMatcher headers = 4; -} - -// [#next-free-field: 8] -message RouteAction { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction"; - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // .. note:: - // - // Shadowing will not be triggered if the primary cluster does not exist. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration when the route configuration is loaded. - // If it disappears at runtime, the shadow request will silently be ignored. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // If not specified, all requests to the target cluster will be mirrored. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - config.core.v3.RuntimeFractionalPercent runtime_fraction = 2; - } - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates a single upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. - WeightedCluster weighted_clusters = 2; - - // Envoy will determine the cluster to route to by reading the value of the - // Thrift header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist Envoy will - // respond with an unknown method exception or an internal error exception, - // respectively. - string cluster_header = 6 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; - } - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered. - // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match - // `, - // with values there taking precedence. Keys and values should be provided under the "envoy.lb" - // metadata key. - config.core.v3.Metadata metadata_match = 3; - - // Specifies a set of rate limit configurations that could be applied to the route. - // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders - // action with the header name ":method-name". - repeated config.route.v3.RateLimit rate_limits = 4; - - // Strip the service prefix from the method name, if there's a prefix. For - // example, the method call Service:method would end up being just method. - bool strip_service_name = 5; - - // Indicates that the route has request mirroring policies. - repeated RequestMirrorPolicy request_mirror_policies = 7; -} - -// Allows for specification of multiple upstream clusters along with weights that indicate the -// percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster -// based on these weights. -message WeightedCluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster"; - - message ClusterWeight { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight"; - - // Name of the upstream cluster. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When a request matches the route, the choice of an upstream cluster is determined by its - // weight. The sum of weights across all entries in the clusters array determines the total - // weight. - google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field, combined with what's - // provided in :ref:`RouteAction's metadata_match - // `, - // will be considered. Values here will take precedence. Keys and values should be provided - // under the "envoy.lb" metadata key. - config.core.v3.Metadata metadata_match = 3; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto deleted file mode 100644 index a03251a2ee3b0..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto +++ /dev/null @@ -1,141 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.thrift_proxy.v3; - -import "envoy/extensions/filters/network/thrift_proxy/v3/route.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; -option java_outer_classname = "ThriftProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Thrift Proxy] -// Thrift Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.thrift_proxy] - -// Thrift transport types supported by Envoy. -enum TransportType { - // For downstream connections, the Thrift proxy will attempt to determine which transport to use. - // For upstream connections, the Thrift proxy will use same transport as the downstream - // connection. - AUTO_TRANSPORT = 0; - - // The Thrift proxy will use the Thrift framed transport. - FRAMED = 1; - - // The Thrift proxy will use the Thrift unframed transport. - UNFRAMED = 2; - - // The Thrift proxy will assume the client is using the Thrift header transport. - HEADER = 3; -} - -// Thrift Protocol types supported by Envoy. -enum ProtocolType { - // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. - // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol - // detection. For upstream connections, the Thrift proxy will use the same protocol as the - // downstream connection. - AUTO_PROTOCOL = 0; - - // The Thrift proxy will use the Thrift binary protocol. - BINARY = 1; - - // The Thrift proxy will use Thrift non-strict binary protocol. - LAX_BINARY = 2; - - // The Thrift proxy will use the Thrift compact protocol. - COMPACT = 3; - - // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. - TWITTER = 4; -} - -// [#next-free-field: 8] -message ThriftProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProxy"; - - // Supplies the type of transport that the Thrift proxy should use. Defaults to - // :ref:`AUTO_TRANSPORT`. - TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use. Defaults to - // :ref:`AUTO_PROTOCOL`. - ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; - - // The human readable prefix to use when emitting statistics. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // The route table for the connection manager is static and is specified in this property. - RouteConfiguration route_config = 4; - - // A list of individual Thrift filters that make up the filter chain for requests made to the - // Thrift proxy. Order matters as the filters are processed sequentially. For backwards - // compatibility, if no thrift_filters are specified, a default Thrift router filter - // (`envoy.filters.thrift.router`) is used. - // [#extension-category: envoy.thrift_proxy.filters] - repeated ThriftFilter thrift_filters = 5; - - // If set to true, Envoy will try to skip decode data after metadata in the Thrift message. - // This mode will only work if the upstream and downstream protocols are the same and the transport - // is the same, the transport type is framed and the protocol is not Twitter. Otherwise Envoy will - // fallback to decode the data. - bool payload_passthrough = 6; - - // Optional maximum requests for a single downstream connection. If not specified, there is no limit. - google.protobuf.UInt32Value max_requests_per_connection = 7; -} - -// ThriftFilter configures a Thrift filter. -message ThriftFilter { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftFilter"; - - // The name of the filter to instantiate. The name must match a supported - // filter. The built-in filters are: - // - // [#comment:TODO(zuercher): Auto generate the following list] - // * :ref:`envoy.filters.thrift.router ` - // * :ref:`envoy.filters.thrift.rate_limit ` - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. See the supported - // filters for further documentation. - oneof config_type { - google.protobuf.Any typed_config = 3; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } -} - -// ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in -// in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.filters.network.thrift_proxy`. -message ThriftProtocolOptions { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProtocolOptions"; - - // Supplies the type of transport that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_TRANSPORT`, - // which is the default, causes the proxy to use the same transport as the downstream connection. - TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; - - // Supplies the type of protocol that the Thrift proxy should use for upstream connections. - // Selecting - // :ref:`AUTO_PROTOCOL`, - // which is the default, causes the proxy to use the same protocol as the downstream connection. - ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD deleted file mode 100644 index c37174bdefc46..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/wasm/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto deleted file mode 100644 index 1b27e18e3c314..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/wasm/v3/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.wasm.v3; - -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm] -// [#extension: envoy.filters.network.wasm] -// Wasm :ref:`configuration overview `. - -message Wasm { - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto b/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto deleted file mode 100644 index eb2c202c58f1a..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.zookeeper_proxy.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.zookeeper_proxy.v3"; -option java_outer_classname = "ZookeeperProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: ZooKeeper proxy] -// ZooKeeper Proxy :ref:`configuration overview `. -// [#extension: envoy.filters.network.zookeeper_proxy] - -message ZooKeeperProxy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.network.zookeeper_proxy.v1alpha1.ZooKeeperProxy"; - - // The human readable prefix to use when emitting :ref:`statistics - // `. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. - // If the access log field is empty, access logs will not be written. - string access_log = 2; - - // Messages — requests, responses and events — that are bigger than this value will - // be ignored. If it is not set, the default value is 1Mb. - // - // The value here should match the jute.maxbuffer property in your cluster configuration: - // - // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options - // - // if that is set. If it isn't, ZooKeeper's default is also 1Mb. - google.protobuf.UInt32Value max_packet_bytes = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD deleted file mode 100644 index 1f8dbc5af5610..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/data/dns/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto b/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto deleted file mode 100644 index 39f44724c430f..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto +++ /dev/null @@ -1,90 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.udp.dns_filter.v3alpha; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/resolver.proto"; -import "envoy/data/dns/v3/dns_table.proto"; - -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha"; -option java_outer_classname = "DnsFilterProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: DNS Filter] -// DNS Filter :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.dns_filter] - -// Configuration for the DNS filter. -message DnsFilterConfig { - // This message contains the configuration for the DNS Filter operating - // in a server context. This message will contain the virtual hosts and - // associated addresses with which Envoy will respond to queries - message ServerContextConfig { - oneof config_source { - option (validate.required) = true; - - // Load the configuration specified from the control plane - data.dns.v3.DnsTable inline_dns_table = 1; - - // Seed the filter configuration from an external path. This source - // is a yaml formatted file that contains the DnsTable driving Envoy's - // responses to DNS queries - config.core.v3.DataSource external_dns_table = 2; - } - } - - // This message contains the configuration for the DNS Filter operating - // in a client context. This message will contain the timeouts, retry, - // and forwarding configuration for Envoy to make DNS requests to other - // resolvers - // - // [#next-free-field: 6] - message ClientContextConfig { - // Sets the maximum time we will wait for the upstream query to complete - // We allow 5s for the upstream resolution to complete, so the minimum - // value here is 1. Note that the total latency for a failed query is the - // number of retries multiplied by the resolver_timeout. - google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; - - // This field was used for `dns_resolution_config` in Envoy 1.19.0 and - // 1.19.1. - // Control planes that need to set this field for Envoy 1.19.0 and - // 1.19.1 clients should fork the protobufs and change the field type - // to `DnsResolutionConfig`. - // Control planes that need to simultaneously support Envoy 1.18.x and - // Envoy 1.19.x should avoid Envoy 1.19.0 and 1.19.1. - // - // [#not-implemented-hide:] - repeated config.core.v3.Address upstream_resolvers = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // DNS resolution configuration which includes the underlying dns resolver addresses and options. - config.core.v3.DnsResolutionConfig dns_resolution_config = 5; - - // Controls how many outstanding external lookup contexts the filter tracks. - // The context structure allows the filter to respond to every query even if the external - // resolution times out or is otherwise unsuccessful - uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; - } - - // The stat prefix used when emitting DNS filter statistics - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - // Server context configuration contains the data that the filter uses to respond - // to DNS requests. - ServerContextConfig server_config = 2; - - // Client context configuration controls Envoy's behavior when it must use external - // resolvers to answer a query. This object is optional and if omitted instructs - // the filter to resolve queries from the data in the server_config - ClientContextConfig client_config = 3; -} diff --git a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto deleted file mode 100644 index 9d410e28afe3d..0000000000000 --- a/generated_api_shadow/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto +++ /dev/null @@ -1,85 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.udp.udp_proxy.v3; - -import "envoy/config/core/v3/udp_socket_config.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.udp.udp_proxy.v3"; -option java_outer_classname = "UdpProxyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: UDP proxy] -// UDP proxy :ref:`configuration overview `. -// [#extension: envoy.filters.udp_listener.udp_proxy] - -// Configuration for the UDP proxy filter. -// [#next-free-field: 7] -message UdpProxyConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig"; - - // Specifies the UDP hash policy. - // The packets can be routed by hash policy. - message HashPolicy { - oneof policy_specifier { - option (validate.required) = true; - - // The source IP will be used to compute the hash used by hash-based load balancing algorithms. - bool source_ip = 1 [(validate.rules).bool = {const: true}]; - - // A given key will be used to compute the hash used by hash-based load balancing algorithms. - // In certain cases there is a need to direct different UDP streams jointly towards the selected set of endpoints. - // A possible use-case is VoIP telephony, where media (RTP) and its corresponding control (RTCP) belong to the same logical session, - // although they travel in separate streams. To ensure that these pair of streams are load-balanced on session level - // (instead of individual stream level), dynamically created listeners can use the same hash key for each stream in the session. - string key = 2 [(validate.rules).string = {min_len: 1}]; - } - } - - // The stat prefix used when emitting UDP proxy filter stats. - string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; - - oneof route_specifier { - option (validate.required) = true; - - // The upstream cluster to connect to. - string cluster = 2 [(validate.rules).string = {min_len: 1}]; - } - - // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by - // the session. The default if not specified is 1 minute. - google.protobuf.Duration idle_timeout = 3; - - // Use the remote downstream IP address as the sender IP address when sending packets to upstream hosts. - // This option requires Envoy to be run with the *CAP_NET_ADMIN* capability on Linux. - // And the IPv6 stack must be enabled on Linux kernel. - // This option does not preserve the remote downstream port. - // If this option is enabled, the IP address of sent datagrams will be changed to the remote downstream IP address. - // This means that Envoy will not receive packets that are sent by upstream hosts because the upstream hosts - // will send the packets with the remote downstream IP address as the destination. All packets will be routed - // to the remote downstream directly if there are route rules on the upstream host side. - // There are two options to return the packets back to the remote downstream. - // The first one is to use DSR (Direct Server Return). - // The other one is to configure routing rules on the upstream hosts to forward - // all packets back to Envoy and configure iptables rules on the host running Envoy to - // forward all packets from upstream hosts to the Envoy process so that Envoy can forward the packets to the downstream. - // If the platform does not support this option, Envoy will raise a configuration error. - bool use_original_src_ip = 4; - - // Optional configuration for UDP proxy hash policies. If hash_policies is not set, the hash-based - // load balancing algorithms will select a host randomly. Currently the number of hash policies is - // limited to 1. - repeated HashPolicy hash_policies = 5 [(validate.rules).repeated = {max_items: 1}]; - - // UDP socket configuration for upstream sockets. The default for - // :ref:`prefer_gro ` is true for upstream - // sockets as the assumption is datagrams will be received from a single source. - config.core.v3.UdpSocketConfig upstream_socket_config = 6; -} diff --git a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto b/generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto deleted file mode 100644 index 9b110a4893812..0000000000000 --- a/generated_api_shadow/envoy/extensions/formatter/metadata/v3/metadata.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.formatter.metadata.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.formatter.metadata.v3"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Formatter extension for printing various types of metadata] -// [#extension: envoy.formatter.metadata] - -// Metadata formatter extension implements METADATA command operator that -// prints all types of metadata. The first parameter taken by METADATA operator defines -// type of metadata. The following types of metadata are supported (case sensitive): -// -// * DYNAMIC -// * CLUSTER -// * ROUTE -// -// See :ref:`here ` for more information on access log configuration. - -// %METADATA(TYPE:NAMESPACE:KEY):Z% -// :ref:`Metadata ` info, -// where TYPE is type of metadata (see above for supported types), -// NAMESPACE is the filter namespace used when setting the metadata, KEY is an optional -// lookup up key in the namespace with the option of specifying nested keys separated by ':', -// and Z is an optional parameter denoting string truncation up to Z characters long. -// The data will be logged as a JSON string. For example, for the following ROUTE metadata: -// -// ``com.test.my_filter: {"test_key": "foo", "test_object": {"inner_key": "bar"}}`` -// -// * %METADATA(ROUTE:com.test.my_filter)% will log: ``{"test_key": "foo", "test_object": {"inner_key": "bar"}}`` -// * %METADATA(ROUTE:com.test.my_filter:test_key)% will log: ``foo`` -// * %METADATA(ROUTE:com.test.my_filter:test_object)% will log: ``{"inner_key": "bar"}`` -// * %METADATA(ROUTE:com.test.my_filter:test_object:inner_key)% will log: ``bar`` -// * %METADATA(ROUTE:com.unknown_filter)% will log: ``-`` -// * %METADATA(ROUTE:com.test.my_filter:unknown_key)% will log: ``-`` -// * %METADATA(ROUTE:com.test.my_filter):25% will log (truncation at 25 characters): ``{"test_key": "foo", "test`` -// -// .. note:: -// -// For typed JSON logs, this operator renders a single value with string, numeric, or boolean type -// when the referenced key is a simple value. If the referenced key is a struct or list value, a -// JSON struct or list is rendered. Structs and lists may be nested. In any event, the maximum -// length is ignored. -// -// .. note:: -// -// METADATA(DYNAMIC:NAMESPACE:KEY):Z is equivalent to :ref:`DYNAMIC_METADATA(NAMESPACE:KEY):Z` -// METADATA(CLUSTER:NAMESPACE:KEY):Z is equivalent to :ref:`CLUSTER_METADATA(NAMASPACE:KEY):Z` - -message Metadata { -} diff --git a/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/BUILD b/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto b/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto deleted file mode 100644 index e1b6c32a97e66..0000000000000 --- a/generated_api_shadow/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.formatter.req_without_query.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.formatter.req_without_query.v3"; -option java_outer_classname = "ReqWithoutQueryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Formatter extension for printing request without query string] -// [#extension: envoy.formatter.req_without_query] - -// ReqWithoutQuery formatter extension implements REQ_WITHOUT_QUERY command operator that -// works the same way as :ref:`REQ ` except that it will -// remove the query string. It is used to avoid logging any sensitive information into -// the access log. -// See :ref:`here ` for more information on access log configuration. - -// %REQ_WITHOUT_QUERY(X?Y):Z% -// An HTTP request header where X is the main HTTP header, Y is the alternative one, and Z is an -// optional parameter denoting string truncation up to Z characters long. The value is taken from -// the HTTP request header named X first and if it's not set, then request header Y is used. If -// none of the headers are present '-' symbol will be in the log. - -// Configuration for the request without query formatter. -message ReqWithoutQuery { -} diff --git a/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/BUILD b/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/BUILD deleted file mode 100644 index 1cb4c6154f26e..0000000000000 --- a/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/health_checker/redis/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/redis.proto b/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/redis.proto deleted file mode 100644 index 10f5c2b30b038..0000000000000 --- a/generated_api_shadow/envoy/extensions/health_checkers/redis/v3/redis.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.health_checkers.redis.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.health_checkers.redis.v3"; -option java_outer_classname = "RedisProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Redis] -// Redis health checker :ref:`configuration overview `. -// [#extension: envoy.health_checkers.redis] - -message Redis { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.health_checker.redis.v2.Redis"; - - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - string key = 1; -} diff --git a/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/BUILD b/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/preserve_case.proto b/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/preserve_case.proto deleted file mode 100644 index 64bdd497ecab0..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/header_formatters/preserve_case/v3/preserve_case.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.http.header_formatters.preserve_case.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.http.header_formatters.preserve_case.v3"; -option java_outer_classname = "PreserveCaseProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Preserve case header formatter] -// [#extension: envoy.http.stateful_header_formatters.preserve_case] - -// Configuration for the preserve case header formatter. -// See the :ref:`header casing ` configuration guide for more -// information. -message PreserveCaseFormatterConfig { -} diff --git a/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/BUILD b/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/BUILD deleted file mode 100644 index 9a76b7e148e03..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/custom_header.proto b/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/custom_header.proto deleted file mode 100644 index 5ea93d7548438..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/original_ip_detection/custom_header/v3/custom_header.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.http.original_ip_detection.custom_header.v3; - -import "envoy/type/v3/http_status.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.http.original_ip_detection.custom_header.v3"; -option java_outer_classname = "CustomHeaderProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Custom header original IP detection extension] - -// This extension allows for the original downstream remote IP to be detected -// by reading the value from a configured header name. If the value is successfully parsed -// as an IP, it'll be treated as the effective downstream remote address and seen as such -// by all filters. See :ref:`original_ip_detection_extensions -// ` -// for an overview of how extensions operate and what happens when an extension fails -// to detect the remote IP. -// -// [#extension: envoy.http.original_ip_detection.custom_header] -message CustomHeaderConfig { - // The header name containing the original downstream remote address, if present. - // - // Note: in the case of a multi-valued header, only the first value is tried and the rest are ignored. - string header_name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: true}]; - - // If set to true, the extension could decide that the detected address should be treated as - // trusted by the HCM. If the address is considered :ref:`trusted`, - // it might be used as input to determine if the request is internal (among other things). - bool allow_extension_to_set_address_as_trusted = 2; - - // If this is set, the request will be rejected when detection fails using it as the HTTP response status. - // - // .. note:: - // If this is set to < 400 or > 511, the default status 403 will be used instead. - type.v3.HttpStatus reject_with_status = 3; -} diff --git a/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/BUILD b/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto b/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto deleted file mode 100644 index 6864788f9f185..0000000000000 --- a/generated_api_shadow/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.http.original_ip_detection.xff.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.http.original_ip_detection.xff.v3"; -option java_outer_classname = "XffProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: XFF original IP detection extension] - -// This extension allows for the original downstream remote IP to be detected -// by reading the :ref:`config_http_conn_man_headers_x-forwarded-for` header. -// -// [#extension: envoy.http.original_ip_detection.xff] -message XffConfig { - // The number of additional ingress proxy hops from the right side of the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when - // determining the origin client's IP address. The default is zero if this option - // is not specified. See the documentation for - // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - uint32 xff_num_trusted_hops = 1; -} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto deleted file mode 100644 index 90da16095fa95..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.internal_redirect.allow_listed_routes.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.allow_listed_routes.v3"; -option java_outer_classname = "AllowListedRoutesConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Allow listed routes internal redirect predicate] - -// An internal redirect predicate that accepts only explicitly allowed target routes. -// [#extension: envoy.internal_redirect_predicates.allow_listed_routes] -message AllowListedRoutesConfig { - // The list of routes that's allowed as redirect target by this predicate, - // identified by the route's :ref:`name `. - // Empty route names are not allowed. - repeated string allowed_route_names = 1 - [(validate.rules).repeated = {items {string {min_len: 1}}}]; -} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto deleted file mode 100644 index c8b03e07b4b66..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.internal_redirect.previous_routes.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.previous_routes.v3"; -option java_outer_classname = "PreviousRoutesConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Previous routes internal redirect predicate] - -// An internal redirect predicate that rejects redirect targets that are pointing -// to a route that has been followed by a previous redirect from the current route. -// [#extension: envoy.internal_redirect_predicates.previous_routes] -message PreviousRoutesConfig { -} diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto b/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto deleted file mode 100644 index e3638adb9fdb1..0000000000000 --- a/generated_api_shadow/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.internal_redirect.safe_cross_scheme.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.safe_cross_scheme.v3"; -option java_outer_classname = "SafeCrossSchemeConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SafeCrossScheme internal redirect predicate] - -// An internal redirect predicate that checks the scheme between the -// downstream url and the redirect target url and allows a) same scheme -// redirect and b) safe cross scheme redirect, which means if the downstream -// scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the -// downstream scheme is HTTP, only HTTP redirect targets are allowed. -// [#extension: envoy.internal_redirect_predicates.safe_cross_scheme] -message SafeCrossSchemeConfig { -} diff --git a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD b/generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto b/generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto deleted file mode 100644 index 0eff4feb8f941..0000000000000 --- a/generated_api_shadow/envoy/extensions/key_value/file_based/v3/config.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.key_value.file_based.v3; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.key_value.file_based.v3"; -option java_outer_classname = "ConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: File Based Key Value Store storage plugin] - -// [#alpha:] -// [#extension: envoy.key_value.file_based] -// This is configuration to flush a key value store out to disk. -message FileBasedKeyValueStoreConfig { - // The filename to read the keys and values from, and write the keys and - // values to. - string filename = 1 [(validate.rules).string = {min_len: 1}]; - - // The interval at which the key value store should be flushed to the file. - google.protobuf.Duration flush_interval = 2; -} diff --git a/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/BUILD b/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/input.proto b/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/input.proto deleted file mode 100644 index 6bbe86e688644..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/common_inputs/environment_variable/v3/input.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.matching.common_inputs.environment_variable.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.matching.common_inputs.environment_variable.v3"; -option java_outer_classname = "InputProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Environment Variable Input] -// [#extension: envoy.matching.common_inputs.environment_variable] - -// Reads an environment variable to provide an input for matching. -message Config { - // Name of the environment variable to read from. - string name = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/BUILD b/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto b/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto deleted file mode 100644 index c44b0b89d57bd..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/input_matchers/consistent_hashing/v3/consistent_hashing.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.matching.input_matchers.consistent_hashing.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.matching.input_matchers.consistent_hashing.v3"; -option java_outer_classname = "ConsistentHashingProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Consistent Hashing Matcher] -// [#extension: envoy.matching.input_matchers.consistent_hashing] - -// The consistent hashing matchers computes a consistent hash from the input and matches if the resulting hash -// is within the configured threshold. -// More specifically, this matcher evaluates to true if hash(input, seed) % modulo >= threshold. -// Note that the consistency of the match result relies on the internal hash function (xxhash) remaining -// unchanged. While this is unlikely to happen intentionally, this could cause inconsistent match results -// between deployments. -message ConsistentHashing { - // The threshold the resulting hash must be over in order for this matcher to evaluate to true. - // This value must be below the configured modulo value. - // Setting this to 0 is equivalent to this matcher always matching. - uint32 threshold = 1; - - // The value to use for the modulus in the calculation. This effectively bounds the hash output, - // specifying the range of possible values. - // This value must be above the configured threshold. - uint32 modulo = 2 [(validate.rules).uint32 = {gt: 0}]; - - // Optional seed passed through the hash function. This allows using additional information when computing - // the hash value: by changing the seed value, a different partition of matching and non-matching inputs will - // be created that remains consistent for that seed value. - uint64 seed = 3; -} diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD b/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto b/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto deleted file mode 100644 index 3c7cb4eb5f19a..0000000000000 --- a/generated_api_shadow/envoy/extensions/matching/input_matchers/ip/v3/ip.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.matching.input_matchers.ip.v3; - -import "envoy/config/core/v3/address.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.matching.input_matchers.ip.v3"; -option java_outer_classname = "IpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: IP matcher] -// [#extension: envoy.matching.input_matchers.ip] - -// This input matcher matches IPv4 or IPv6 addresses against a list of CIDR -// ranges. It returns true if and only if the input IP belongs to at least one -// of these CIDR ranges. Internally, it uses a Level-Compressed trie, as -// described in the paper `IP-address lookup using LC-tries -// `_ -// by S. Nilsson and G. Karlsson. For "big" lists of IPs, this matcher is more -// efficient than multiple single IP matcher, that would have a linear cost. -message Ip { - // Match if the IP belongs to any of these CIDR ranges. - repeated config.core.v3.CidrRange cidr_ranges = 1 [(validate.rules).repeated = {min_items: 1}]; - - // The human readable prefix to use when emitting statistics for the IP input - // matcher. Names in the table below are concatenated to this prefix. - // - // .. csv-table:: - // :header: Name, Type, Description - // :widths: 1, 1, 2 - // - // ip_parsing_failed, Counter, Total number of IP addresses the matcher was unable to parse - string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto b/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto deleted file mode 100644 index d2c747ec49fb1..0000000000000 --- a/generated_api_shadow/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.network.socket_interface.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.network.socket_interface.v3"; -option java_outer_classname = "DefaultSocketInterfaceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Default Socket Interface configuration] - -// Configuration for default socket interface that relies on OS dependent syscall to create -// sockets. -message DefaultSocketInterface { -} diff --git a/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/BUILD b/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/crypto_stream.proto b/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/crypto_stream.proto deleted file mode 100644 index 6313f79861e84..0000000000000 --- a/generated_api_shadow/envoy/extensions/quic/crypto_stream/v3/crypto_stream.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.quic.crypto_stream.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.quic.crypto_stream.v3"; -option java_outer_classname = "CryptoStreamProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: QUIC server crypto stream config] -// [#extension: envoy.quic.crypto_stream.server.quiche] - -// Configuration for the default QUIC server crypto stream provided by QUICHE. -message CryptoServerStreamConfig { -} diff --git a/generated_api_shadow/envoy/extensions/quic/proof_source/v3/BUILD b/generated_api_shadow/envoy/extensions/quic/proof_source/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/quic/proof_source/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/quic/proof_source/v3/proof_source.proto b/generated_api_shadow/envoy/extensions/quic/proof_source/v3/proof_source.proto deleted file mode 100644 index 1459142d40914..0000000000000 --- a/generated_api_shadow/envoy/extensions/quic/proof_source/v3/proof_source.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.quic.proof_source.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.quic.proof_source.v3"; -option java_outer_classname = "ProofSourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: QUIC proof source config] -// [#extension: envoy.quic.proof_source.filter_chain] - -// Configuration for the default QUIC proof source. -message ProofSourceConfig { -} diff --git a/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/BUILD b/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/BUILD deleted file mode 100644 index facd82ce6de26..0000000000000 --- a/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", - ], -) diff --git a/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/expr.proto b/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/expr.proto deleted file mode 100644 index 76d3505cba04a..0000000000000 --- a/generated_api_shadow/envoy/extensions/rate_limit_descriptors/expr/v3/expr.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.rate_limit_descriptors.expr.v3; - -import "google/api/expr/v1alpha1/syntax.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.rate_limit_descriptors.expr.v3"; -option java_outer_classname = "ExprProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate limit descriptor expression] -// [#extension: envoy.rate_limit_descriptors.expr] - -// The following descriptor entry is appended with a value computed -// from a symbolic Common Expression Language expression. -// See :ref:`attributes ` for the set of -// available attributes. -// -// .. code-block:: cpp -// -// ("", "") -message Descriptor { - // The key to use in the descriptor entry. - string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; - - // If set to true, Envoy skips the descriptor if the expression evaluates to an error. - // By default, the rate limit is not applied when an expression produces an error. - bool skip_if_error = 2; - - oneof expr_specifier { - // Expression in a text form, e.g. "connection.requested_server_name". - string text = 3 [(validate.rules).string = {min_len: 1}]; - - // Parsed expression in AST form. - google.api.expr.v1alpha1.Expr parsed = 4; - } -} diff --git a/generated_api_shadow/envoy/extensions/request_id/uuid/v3/BUILD b/generated_api_shadow/envoy/extensions/request_id/uuid/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/request_id/uuid/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/request_id/uuid/v3/uuid.proto b/generated_api_shadow/envoy/extensions/request_id/uuid/v3/uuid.proto deleted file mode 100644 index 5c3f00da28d71..0000000000000 --- a/generated_api_shadow/envoy/extensions/request_id/uuid/v3/uuid.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.request_id.uuid.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.request_id.uuid.v3"; -option java_outer_classname = "UuidProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: UUID] -// [#extension: envoy.request_id.uuid] - -// Configuration for the default UUID request ID extension which has the following behavior: -// -// 1. Request ID is propagated using the :ref:`x-request-id -// ` header. -// -// 2. Request ID is a universally unique identifier `(UUID4) -// `_. -// -// 3. Tracing decision (sampled, forced, etc) is set in 14th nibble of the UUID. By default this will -// overwrite existing UUIDs received in the *x-request-id* header if the trace sampling decision -// is changed. The 14th nibble of the UUID4 has been chosen because it is fixed to '4' by the -// standard. Thus, '4' indicates a default UUID and no trace status. This nibble is swapped to: -// -// a. '9': Sampled. -// b. 'a': Force traced due to server-side override. -// c. 'b': Force traced due to client-side request ID joining. -// -// See the :ref:`x-request-id ` documentation for -// more information. -message UuidRequestIdConfig { - // Whether the implementation alters the UUID to contain the trace sampling decision as per the - // `UuidRequestIdConfig` message documentation. This defaults to true. If disabled no - // modification to the UUID will be performed. It is important to note that if disabled, - // stable sampling of traces, access logs, etc. will no longer work and only random sampling will - // be possible. - google.protobuf.BoolValue pack_trace_reason = 1; - - // Set whether to use :ref:`x-request-id` for sampling or not. - // This defaults to true. See the :ref:`context propagation ` - // overview for more information. - google.protobuf.BoolValue use_request_id_for_trace_sampling = 2; -} diff --git a/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/BUILD b/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/BUILD deleted file mode 100644 index 3fb51ff1ccaa9..0000000000000 --- a/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/fixed_heap.proto b/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/fixed_heap.proto deleted file mode 100644 index 48aaa0a0268e4..0000000000000 --- a/generated_api_shadow/envoy/extensions/resource_monitors/fixed_heap/v3/fixed_heap.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.resource_monitors.fixed_heap.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.resource_monitors.fixed_heap.v3"; -option java_outer_classname = "FixedHeapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Fixed heap] -// [#extension: envoy.resource_monitors.fixed_heap] - -// The fixed heap resource monitor reports the Envoy process memory pressure, computed as a -// fraction of currently reserved heap memory divided by a statically configured maximum -// specified in the FixedHeapConfig. -message FixedHeapConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig"; - - uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/BUILD b/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/BUILD deleted file mode 100644 index 975b8fcbd5a32..0000000000000 --- a/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/injected_resource.proto b/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/injected_resource.proto deleted file mode 100644 index 643ea68651c73..0000000000000 --- a/generated_api_shadow/envoy/extensions/resource_monitors/injected_resource/v3/injected_resource.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.resource_monitors.injected_resource.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.resource_monitors.injected_resource.v3"; -option java_outer_classname = "InjectedResourceProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Injected resource] -// [#extension: envoy.resource_monitors.injected_resource] - -// The injected resource monitor allows injecting a synthetic resource pressure into Envoy -// via a text file, which must contain a floating-point number in the range [0..1] representing -// the resource pressure and be updated atomically by a symbolic link swap. -// This is intended primarily for integration tests to force Envoy into an overloaded state. -message InjectedResourceConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.resource_monitor.injected_resource.v2alpha.InjectedResourceConfig"; - - string filename = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/BUILD b/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/BUILD deleted file mode 100644 index 0eab79b89fdaf..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/retry/omit_canary_hosts/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/omit_canary_hosts.proto b/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/omit_canary_hosts.proto deleted file mode 100644 index 930cced837036..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/omit_canary_hosts/v3/omit_canary_hosts.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.retry.host.omit_canary_hosts.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.retry.host.omit_canary_hosts.v3"; -option java_outer_classname = "OmitCanaryHostsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Omit Canary Hosts Predicate] -// [#extension: envoy.retry_host_predicates.omit_canary_hosts] - -message OmitCanaryHostsPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.retry.omit_canary_hosts.v2.OmitCanaryHostsPredicate"; -} diff --git a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD b/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto b/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto deleted file mode 100644 index fb7adf4402880..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.retry.host.omit_host_metadata.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.retry.host.omit_host_metadata.v3"; -option java_outer_classname = "OmitHostMetadataConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Omit host metadata retry predicate] - -// A retry host predicate that can be used to reject a host based on -// predefined metadata match criteria. -// [#extension: envoy.retry_host_predicates.omit_host_metadata] -message OmitHostMetadataConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.retry.omit_host_metadata.v2.OmitHostMetadataConfig"; - - // Retry host predicate metadata match criteria. The hosts in - // the upstream cluster with matching metadata will be omitted while - // attempting a retry of a failed request. The metadata should be specified - // under the *envoy.lb* key. - config.core.v3.Metadata metadata_match = 1; -} diff --git a/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/BUILD b/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/BUILD deleted file mode 100644 index 88d9a6e255a3a..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/retry/previous_hosts/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/previous_hosts.proto b/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/previous_hosts.proto deleted file mode 100644 index addce657fefed..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/host/previous_hosts/v3/previous_hosts.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.retry.host.previous_hosts.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.retry.host.previous_hosts.v3"; -option java_outer_classname = "PreviousHostsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Previous Hosts Predicate] -// [#extension: envoy.retry_host_predicates.previous_hosts] - -message PreviousHostsPredicate { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.retry.previous_hosts.v2.PreviousHostsPredicate"; -} diff --git a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/BUILD b/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto b/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto deleted file mode 100644 index b6a4bbecbae8c..0000000000000 --- a/generated_api_shadow/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.retry.priority.previous_priorities.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.retry.priority.previous_priorities.v3"; -option java_outer_classname = "PreviousPrioritiesConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Previous priorities retry selector] - -// A retry host selector that attempts to spread retries between priorities, even if certain -// priorities would not normally be attempted due to higher priorities being available. -// -// As priorities get excluded, load will be distributed amongst the remaining healthy priorities -// based on the relative health of the priorities, matching how load is distributed during regular -// host selection. For example, given priority healths of {100, 50, 50}, the original load will be -// {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load -// changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the -// remaining to spill over to P2. -// -// Each priority attempted will be excluded until there are no healthy priorities left, at which -// point the list of attempted priorities will be reset, essentially starting from the beginning. -// For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the -// following sequence of priorities would be selected (assuming update_frequency = 1): -// Attempt 1: P0 (P0 is 100% healthy) -// Attempt 2: P2 (P0 already attempted, P2 only healthy priority) -// Attempt 3: P0 (no healthy priorities, reset) -// Attempt 4: P2 -// -// In the case of all upstream hosts being unhealthy, no adjustments will be made to the original -// priority load, so behavior should be identical to not using this plugin. -// -// Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of -// priorities), which might incur significant overhead for clusters with many priorities. -// [#extension: envoy.retry_priorities.previous_priorities] -message PreviousPrioritiesConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.retry.previous_priorities.PreviousPrioritiesConfig"; - - // How often the priority load should be updated based on previously attempted priorities. Useful - // to allow each priorities to receive more than one request before being excluded or to reduce - // the number of times that the priority load has to be recomputed. - // - // For example, by setting this to 2, then the first two attempts (initial attempt and first - // retry) will use the unmodified priority load. The third and fourth attempt will use priority - // load which excludes the priorities routed to with the first two attempts, and the fifth and - // sixth attempt will use the priority load excluding the priorities used for the first four - // attempts. - // - // Must be greater than 0. - int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/BUILD b/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/graphite_statsd.proto b/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/graphite_statsd.proto deleted file mode 100644 index 72306389bfeca..0000000000000 --- a/generated_api_shadow/envoy/extensions/stat_sinks/graphite_statsd/v3/graphite_statsd.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.stat_sinks.graphite_statsd.v3; - -import "envoy/config/core/v3/address.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.stat_sinks.graphite_statsd.v3"; -option java_outer_classname = "GraphiteStatsdProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Graphite+Statsd] -// Stats configuration proto schema for ``envoy.stat_sinks.graphite_statsd`` sink. -// The sink emits stats with `Graphite `_ -// compatible tags. Tags are configurable via :ref:`StatsConfig -// `. -// [#extension: envoy.stat_sinks.graphite_statsd] - -message GraphiteStatsdSink { - oneof statsd_specifier { - option (validate.required) = true; - - // The UDP address of a running Graphite-compliant listener. If specified, - // statistics will be flushed to this address. - config.core.v3.Address address = 1; - } - - // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field - // ` for more details. - string prefix = 3; - - // Optional max datagram size to use when sending UDP messages. By default Envoy - // will emit one metric per datagram. By specifying a max-size larger than a single - // metric, Envoy will emit multiple, new-line separated metrics. The max datagram - // size should not exceed your network's MTU. - // - // Note that this value may not be respected if smaller than a single metric. - google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; -} diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD deleted file mode 100644 index c37174bdefc46..0000000000000 --- a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/wasm/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto deleted file mode 100644 index 9d61eda713c78..0000000000000 --- a/generated_api_shadow/envoy/extensions/stat_sinks/wasm/v3/wasm.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.stat_sinks.wasm.v3; - -import "envoy/extensions/wasm/v3/wasm.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.stat_sinks.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm] -// Wasm :ref:`configuration overview `. -// [#extension: envoy.stat_sinks.wasm] - -message Wasm { - // General Plugin configuration. - envoy.extensions.wasm.v3.PluginConfig config = 1; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/BUILD deleted file mode 100644 index 8a8435d89897d..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/transport_socket/alts/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto b/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto deleted file mode 100644 index 93c6f9b834efa..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/alts/v3/alts.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.alts.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.alts.v3"; -option java_outer_classname = "AltsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: ALTS] -// [#extension: envoy.transport_sockets.alts] - -// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy. -// Store the peer identity in dynamic metadata, namespace is "envoy.transport_socket.peer_information", key is "peer_identity". -// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ -message Alts { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.transport_socket.alts.v2alpha.Alts"; - - // The location of a handshaker service, this is usually 169.254.169.254:8080 - // on GCE. - string handshaker_service = 1 [(validate.rules).string = {min_len: 1}]; - - // The acceptable service accounts from peer, peers not in the list will be rejected in the - // handshake validation step. If empty, no validation will be performed. - repeated string peer_service_accounts = 2; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto deleted file mode 100644 index 687226574d29b..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.proxy_protocol.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/proxy_protocol.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3"; -option java_outer_classname = "UpstreamProxyProtocolProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Upstream Proxy Protocol] -// [#extension: envoy.transport_sockets.upstream_proxy_protocol] - -// Configuration for PROXY protocol socket -message ProxyProtocolUpstreamTransport { - // The PROXY protocol settings - config.core.v3.ProxyProtocolConfig config = 1; - - // The underlying transport socket being wrapped. - config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD deleted file mode 100644 index 3ca8242f77801..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto b/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto deleted file mode 100644 index 25122b09c5972..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.quic.v3; - -import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v3"; -option java_outer_classname = "QuicTransportProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: quic transport] -// [#comment:#extension: envoy.transport_sockets.quic] - -// Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. -message QuicDownstreamTransport { - tls.v3.DownstreamTlsContext downstream_tls_context = 1 - [(validate.rules).message = {required: true}]; -} - -// Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. -message QuicUpstreamTransport { - tls.v3.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto b/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto deleted file mode 100644 index 85406c1f77135..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.raw_buffer.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.raw_buffer.v3"; -option java_outer_classname = "RawBufferProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Raw Buffer] -// [#extension: envoy.transport_sockets.raw_buffer] - -// Configuration for raw buffer transport socket. -message RawBuffer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.transport_socket.raw_buffer.v2.RawBuffer"; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto b/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto deleted file mode 100644 index b32b84653e690..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/s2a/v3alpha/s2a.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.s2a.v3alpha; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.s2a.v3alpha"; -option java_outer_classname = "S2aProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#not-implemented-hide:] -// Configuration for S2A transport socket. This allows Envoy clients to -// configure how to offload mTLS handshakes to the S2A service. -// https://github.com/google/s2a-core#readme -message S2AConfiguration { - // The address of the S2A. This can be an IP address or a hostname, - // followed by a port number. - string s2a_address = 1 [(validate.rules).string = {min_len: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/BUILD deleted file mode 100644 index 7ae3c01a99470..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", - "//envoy/extensions/transport_sockets/tls/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/starttls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/starttls.proto deleted file mode 100644 index 69254819baf7b..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/starttls/v3/starttls.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.starttls.v3; - -import "envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto"; -import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.starttls.v3"; -option java_outer_classname = "StarttlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: StartTls] -// [#extension: envoy.transport_sockets.starttls] - -// StartTls transport socket addresses situations when a protocol starts in clear-text and -// negotiates an in-band switch to TLS. StartTls transport socket is protocol agnostic. In the -// case of downstream StartTls a network filter is required which understands protocol exchange -// and a state machine to signal to the StartTls transport socket when a switch to TLS is -// required. Similarly, upstream StartTls requires the owner of an upstream transport socket to -// manage the state machine necessary to properly coordinate negotiation with the upstream and -// signal to the transport socket when a switch to secure transport is required. - -// Configuration for a downstream StartTls transport socket. -// StartTls transport socket wraps two sockets: -// * raw_buffer socket which is used at the beginning of the session -// * TLS socket used when a protocol negotiates a switch to encrypted traffic. -message StartTlsConfig { - // (optional) Configuration for clear-text socket used at the beginning of the session. - raw_buffer.v3.RawBuffer cleartext_socket_config = 1; - - // Configuration for a downstream TLS socket. - transport_sockets.tls.v3.DownstreamTlsContext tls_socket_config = 2 - [(validate.rules).message = {required: true}]; -} - -// Configuration for an upstream StartTls transport socket. -// StartTls transport socket wraps two sockets: -// * raw_buffer socket which is used at the beginning of the session -// * TLS socket used when a protocol negotiates a switch to encrypted traffic. -message UpstreamStartTlsConfig { - // (optional) Configuration for clear-text socket used at the beginning of the session. - raw_buffer.v3.RawBuffer cleartext_socket_config = 1; - - // Configuration for an upstream TLS socket. - transport_sockets.tls.v3.UpstreamTlsContext tls_socket_config = 2 - [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/BUILD deleted file mode 100644 index b97db3d63736c..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/tap/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto deleted file mode 100644 index ef61575f67f72..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tap/v3/tap.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tap.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/common/tap/v3/common.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v3"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap] -// [#extension: envoy.transport_sockets.tap] - -// Configuration for tap transport socket. This wraps another transport socket, providing the -// ability to interpose and record in plain text any traffic that is surfaced to Envoy. -message Tap { - option (udpa.annotations.versioning).previous_message_type = - "envoy.config.transport_socket.tap.v2alpha.Tap"; - - // Common configuration for the tap transport socket. - common.tap.v3.CommonExtensionConfig common_config = 1 - [(validate.rules).message = {required: true}]; - - // The underlying transport socket being wrapped. - config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD deleted file mode 100644 index 47b9b9ae57e96..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2/auth:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto deleted file mode 100644 index b451d45381ca4..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/cert.proto +++ /dev/null @@ -1,11 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import public "envoy/extensions/transport_sockets/tls/v3/common.proto"; -import public "envoy/extensions/transport_sockets/tls/v3/secret.proto"; -import public "envoy/extensions/transport_sockets/tls/v3/tls.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "CertProto"; -option java_multiple_files = true; diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto deleted file mode 100644 index 1a86020683507..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/common.proto +++ /dev/null @@ -1,441 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common TLS configuration] - -message TlsParameters { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; - - enum TlsProtocol { - // Envoy will choose the optimal TLS version. - TLS_AUTO = 0; - - // TLS 1.0 - TLSv1_0 = 1; - - // TLS 1.1 - TLSv1_1 = 2; - - // TLS 1.2 - TLSv1_2 = 3; - - // TLS 1.3 - TLSv1_3 = 4; - } - - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - // servers. - TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; - - // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for - // servers. - TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; - - // If specified, the TLS listener will only support the specified `cipher list - // `_ - // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). - // - // If not specified, a default list will be used. Defaults are different for server (downstream) and - // client (upstream) TLS configurations. - // - // In non-FIPS builds, the default server cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In builds using :ref:`BoringSSL FIPS `, the default server cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES128-SHA - // ECDHE-RSA-AES128-SHA - // AES128-GCM-SHA256 - // AES128-SHA - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // ECDHE-ECDSA-AES256-SHA - // ECDHE-RSA-AES256-SHA - // AES256-GCM-SHA384 - // AES256-SHA - // - // In non-FIPS builds, the default client cipher list is: - // - // .. code-block:: none - // - // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - // - // In builds using :ref:`BoringSSL FIPS `, the default client cipher list is: - // - // .. code-block:: none - // - // ECDHE-ECDSA-AES128-GCM-SHA256 - // ECDHE-RSA-AES128-GCM-SHA256 - // ECDHE-ECDSA-AES256-GCM-SHA384 - // ECDHE-RSA-AES256-GCM-SHA384 - repeated string cipher_suites = 3; - - // If specified, the TLS connection will only support the specified ECDH - // curves. If not specified, the default curves will be used. - // - // In non-FIPS builds, the default curves are: - // - // .. code-block:: none - // - // X25519 - // P-256 - // - // In builds using :ref:`BoringSSL FIPS `, the default curve is: - // - // .. code-block:: none - // - // P-256 - repeated string ecdh_curves = 4; -} - -// BoringSSL private key method configuration. The private key methods are used for external -// (potentially asynchronous) signing and decryption operations. Some use cases for private key -// methods would be TPM support and TLS acceleration. -message PrivateKeyProvider { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.PrivateKeyProvider"; - - // Private key method provider name. The name must match a - // supported private key method provider type. - string provider_name = 1 [(validate.rules).string = {min_len: 1}]; - - // Private key method provider specific configuration. - oneof config_type { - google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; - - google.protobuf.Struct hidden_envoy_deprecated_config = 2 [ - deprecated = true, - (udpa.annotations.sensitive) = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - } -} - -// [#next-free-field: 8] -message TlsCertificate { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; - - // The TLS certificate chain. - // - // If *certificate_chain* is a filesystem path, a watch will be added to the - // parent directory for any file moves to support rotation. This currently - // only applies to dynamic secrets, when the *TlsCertificate* is delivered via - // SDS. - config.core.v3.DataSource certificate_chain = 1; - - // The TLS private key. - // - // If *private_key* is a filesystem path, a watch will be added to the parent - // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *TlsCertificate* is delivered via SDS. - config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; - - // If specified, updates of file-based *certificate_chain* and *private_key* - // sources will be triggered by this watch. The certificate/key pair will be - // read together and validated for atomic read consistency (i.e. no - // intervening modification occurred between cert/key read, verified by file - // hash comparisons). This allows explicit control over the path watched, by - // default the parent directories of the filesystem paths in - // *certificate_chain* and *private_key* are watched if this field is not - // specified. This only applies when a *TlsCertificate* is delivered by SDS - // with references to filesystem paths. See the :ref:`SDS key rotation - // ` documentation for further details. - config.core.v3.WatchedDirectory watched_directory = 7; - - // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. This can't be - // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - // ` and - // :ref:`private_key_provider - // ` fields will result in an - // error. - PrivateKeyProvider private_key_provider = 6; - - // The password to decrypt the TLS private key. If this field is not set, it is assumed that the - // TLS private key is not password encrypted. - config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; - - // The OCSP response to be stapled with this certificate during the handshake. - // The response must be DER-encoded and may only be provided via ``filename`` or - // ``inline_bytes``. The response may pertain to only one certificate. - config.core.v3.DataSource ocsp_staple = 4; - - // [#not-implemented-hide:] - repeated config.core.v3.DataSource signed_certificate_timestamp = 5; -} - -message TlsSessionTicketKeys { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.TlsSessionTicketKeys"; - - // Keys for encrypting and decrypting TLS session tickets. The - // first key in the array contains the key to encrypt all new sessions created by this context. - // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - // by, for example, putting the new key first, and the previous key second. - // - // If :ref:`session_ticket_keys ` - // is not specified, the TLS library will still support resuming sessions via tickets, but it will - // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - // or on different hosts. - // - // Each key must contain exactly 80 bytes of cryptographically-secure random data. For - // example, the output of ``openssl rand 80``. - // - // .. attention:: - // - // Using this feature has serious security considerations and risks. Improper handling of keys - // may result in loss of secrecy in connections, even if ciphers supporting perfect forward - // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - // discussion. To minimize the risk, you must: - // - // * Keep the session ticket keys at least as secure as your TLS certificate private keys - // * Rotate session ticket keys at least daily, and preferably hourly - // * Always generate keys using a cryptographically-secure random data source - repeated config.core.v3.DataSource keys = 1 - [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; -} - -// Indicates a certificate to be obtained from a named CertificateProvider plugin instance. -// The plugin instances are defined in the client's bootstrap file. -// The plugin allows certificates to be fetched/refreshed over the network asynchronously with -// respect to the TLS handshake. -// [#not-implemented-hide:] -message CertificateProviderPluginInstance { - // Provider instance name. If not present, defaults to "default". - // - // Instance names should generally be defined not in terms of the underlying provider - // implementation (e.g., "file_watcher") but rather in terms of the function of the - // certificates (e.g., "foo_deployment_identity"). - string instance_name = 1; - - // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "example.com" to specify a certificate for a - // particular domain. Not all provider instances will actually use this field, so the value - // defaults to the empty string. - string certificate_name = 2; -} - -// [#next-free-field: 14] -message CertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CertificateValidationContext"; - - // Peer certificate verification mode. - enum TrustChainVerification { - // Perform default certificate verification (e.g., against CA / verification lists) - VERIFY_TRUST_CHAIN = 0; - - // Connections where the certificate fails verification will be permitted. - // For HTTP connections, the result of certificate verification can be used in route matching. ( - // see :ref:`validated ` ). - ACCEPT_UNTRUSTED = 1; - } - - reserved 5; - - // TLS certificate data containing certificate authority certificates to use in verifying - // a presented peer certificate (e.g. server certificate for clusters or client certificate - // for listeners). If not specified and a peer certificate is presented it will not be - // verified. By default, a client certificate is optional, unless one of the additional - // options (:ref:`require_client_certificate - // `, - // :ref:`verify_certificate_spki - // `, - // :ref:`verify_certificate_hash - // `, or - // :ref:`match_subject_alt_names - // `) is also - // specified. - // - // It can optionally contain certificate revocation lists, in which case Envoy will verify - // that the presented peer certificate has not been revoked by one of the included CRLs. Note - // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be - // provided for all certificate authorities in that chain. Failure to do so will result in - // verification failure for both revoked and unrevoked certificates from that chain. - // - // See :ref:`the TLS overview ` for a list of common - // system CA locations. - // - // If *trusted_ca* is a filesystem path, a watch will be added to the parent - // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *CertificateValidationContext* is - // delivered via SDS. - // - // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. - // - // [#next-major-version: This field and watched_directory below should ideally be moved into a - // separate sub-message, since there's no point in specifying the latter field without this one.] - config.core.v3.DataSource trusted_ca = 1 - [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; - - // Certificate provider instance for fetching TLS certificates. - // - // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. - // [#not-implemented-hide:] - CertificateProviderPluginInstance ca_certificate_provider_instance = 13 - [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; - - // If specified, updates of a file-based *trusted_ca* source will be triggered - // by this watch. This allows explicit control over the path watched, by - // default the parent directory of the filesystem path in *trusted_ca* is - // watched if this field is not specified. This only applies when a - // *CertificateValidationContext* is delivered by SDS with references to - // filesystem paths. See the :ref:`SDS key rotation ` - // documentation for further details. - config.core.v3.WatchedDirectory watched_directory = 11; - - // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - // matches one of the specified values. - // - // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -pubkey - // | openssl pkey -pubin -outform DER - // | openssl dgst -sha256 -binary - // | openssl enc -base64 - // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - // - // This is the format used in HTTP Public Key Pinning. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - // - // .. attention:: - // - // This option is preferred over :ref:`verify_certificate_hash - // `, - // because SPKI is tied to a private key, so it doesn't change when the certificate - // is renewed using the same private key. - repeated string verify_certificate_spki = 3 - [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; - - // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - // - // A hex-encoded SHA-256 of the certificate can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - // - // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - // can be generated with the following command: - // - // .. code-block:: bash - // - // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - // - // Both of those formats are acceptable. - // - // When both: - // :ref:`verify_certificate_hash - // ` and - // :ref:`verify_certificate_spki - // ` are specified, - // a hash matching value from either of the lists will result in the certificate being accepted. - repeated string verify_certificate_hash = 2 - [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; - - // An optional list of Subject Alternative name matchers. If specified, Envoy will verify that the - // Subject Alternative Name of the presented certificate matches one of the specified matchers. - // - // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - // configured with exact match type in the :ref:`string matcher `. - // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - // it should be configured as shown below. - // - // .. code-block:: yaml - // - // match_subject_alt_names: - // exact: "api.example.com" - // - // .. attention:: - // - // Subject Alternative Names are easily spoofable and verifying only them is insecure, - // therefore this option must be used together with :ref:`trusted_ca - // `. - repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; - - // [#not-implemented-hide:] Must present signed certificate time-stamp. - google.protobuf.BoolValue require_signed_certificate_timestamp = 6; - - // An optional `certificate revocation list - // `_ - // (in PEM format). If specified, Envoy will verify that the presented peer - // certificate has not been revoked by this CRL. If this DataSource contains - // multiple CRLs, all of them will be used. Note that if a CRL is provided - // for any certificate authority in a trust chain, a CRL must be provided - // for all certificate authorities in that chain. Failure to do so will - // result in verification failure for both revoked and unrevoked certificates - // from that chain. - config.core.v3.DataSource crl = 7; - - // If specified, Envoy will not reject expired certificates. - bool allow_expired_certificate = 8; - - // Certificate trust chain verification mode. - TrustChainVerification trust_chain_verification = 10 - [(validate.rules).enum = {defined_only: true}]; - - // The configuration of an extension specific certificate validator. - // If specified, all validation is done by the specified validator, - // and the behavior of all other validation settings is defined by the specified validator (and may be entirely ignored, unused, and unvalidated). - // Refer to the documentation for the specified validator. If you do not want a custom validation algorithm, do not set this field. - // [#extension-category: envoy.tls.cert_validator] - config.core.v3.TypedExtensionConfig custom_validator_config = 12; - - repeated string hidden_envoy_deprecated_verify_subject_alt_name = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto deleted file mode 100644 index f7c849c0334e1..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; -import "envoy/extensions/transport_sockets/tls/v3/common.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "SecretProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Secrets configuration] - -message GenericSecret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; - - // Secret of generic type and is available to filters. - config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; -} - -message SdsSecretConfig { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; - - // Name by which the secret can be uniquely referred to. When both name and config are specified, - // then secret can be fetched and/or reloaded via SDS. When only name is specified, then secret - // will be loaded from static resources. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - config.core.v3.ConfigSource sds_config = 2; -} - -// [#next-free-field: 6] -message Secret { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; - - // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - string name = 1; - - oneof type { - TlsCertificate tls_certificate = 2; - - TlsSessionTicketKeys session_ticket_keys = 3; - - CertificateValidationContext validation_context = 4; - - GenericSecret generic_secret = 5; - } -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto deleted file mode 100644 index f680207955a8c..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ /dev/null @@ -1,302 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import "envoy/config/core/v3/extension.proto"; -import "envoy/extensions/transport_sockets/tls/v3/common.proto"; -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "TlsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: TLS transport socket] -// [#extension: envoy.transport_sockets.tls] -// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. - -message UpstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.UpstreamTlsContext"; - - // Common TLS context settings. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - CommonTlsContext common_tls_context = 1; - - // SNI string to use when creating TLS backend connections. - string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - - // If true, server-initiated TLS renegotiation will be allowed. - // - // .. attention:: - // - // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - bool allow_renegotiation = 3; - - // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to store for the purpose of session resumption. - // - // Defaults to 1, setting this to 0 disables session resumption. - google.protobuf.UInt32Value max_session_keys = 4; -} - -// [#next-free-field: 9] -message DownstreamTlsContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.DownstreamTlsContext"; - - enum OcspStaplePolicy { - // OCSP responses are optional. If an OCSP response is absent - // or expired, the associated certificate will be used for - // connections without an OCSP staple. - LENIENT_STAPLING = 0; - - // OCSP responses are optional. If an OCSP response is absent, - // the associated certificate will be used without an - // OCSP staple. If a response is provided but is expired, - // the associated certificate will not be used for - // subsequent connections. If no suitable certificate is found, - // the connection is rejected. - STRICT_STAPLING = 1; - - // OCSP responses are required. Configuration will fail if - // a certificate is provided without an OCSP response. If a - // response expires, the associated certificate will not be - // used connections. If no suitable certificate is found, the - // connection is rejected. - MUST_STAPLE = 2; - } - - // Common TLS context settings. - CommonTlsContext common_tls_context = 1; - - // If specified, Envoy will reject connections without a valid client - // certificate. - google.protobuf.BoolValue require_client_certificate = 2; - - // If specified, Envoy will reject connections without a valid and matching SNI. - // [#not-implemented-hide:] - google.protobuf.BoolValue require_sni = 3; - - oneof session_ticket_keys_type { - // TLS session ticket key settings. - TlsSessionTicketKeys session_ticket_keys = 4; - - // Config for fetching TLS session ticket keys via SDS API. - SdsSecretConfig session_ticket_keys_sds_secret_config = 5; - - // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - // the keys specified through either :ref:`session_ticket_keys ` - // or :ref:`session_ticket_keys_sds_secret_config `. - // If this config is set to false and no keys are explicitly configured, the TLS server will issue - // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - // implication that sessions cannot be resumed across hot restarts or on different hosts. - bool disable_stateless_session_resumption = 7; - } - - // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - // ` - // only seconds could be specified (fractional seconds are going to be ignored). - google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { - lt {seconds: 4294967296} - gte {} - }]; - - // Config for whether to use certificates if they do not have - // an accompanying OCSP response or if the response expires at runtime. - // Defaults to LENIENT_STAPLING - OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; -} - -// TLS context shared by both client and server TLS contexts. -// [#next-free-field: 15] -message CommonTlsContext { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; - - // Config for Certificate provider to get certificates. This provider should allow certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // - // DEPRECATED: This message is not currently used, but if we ever do need it, we will want to - // move it out of CommonTlsContext and into common.proto, similar to the existing - // CertificateProviderPluginInstance message. - // - // [#not-implemented-hide:] - message CertificateProvider { - // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Provider specific config. - // Note: an implementation is expected to dedup multiple instances of the same config - // to maintain a single certificate-provider instance. The sharing can happen, for - // example, among multiple clusters or between the tls_certificate and validation_context - // certificate providers of a cluster. - // This config could be supplied inline or (in future) a named xDS resource. - oneof config { - option (validate.required) = true; - - config.core.v3.TypedExtensionConfig typed_config = 2; - } - } - - // Similar to CertificateProvider above, but allows the provider instances to be configured on - // the client side instead of being sent from the control plane. - // - // DEPRECATED: This message was moved outside of CommonTlsContext - // and now lives in common.proto. - // - // [#not-implemented-hide:] - message CertificateProviderInstance { - // Provider instance name. This name must be defined in the client's configuration (e.g., a - // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config - // field that would be sent in the CertificateProvider message if the config was sent by the - // control plane). If not present, defaults to "default". - // - // Instance names should generally be defined not in terms of the underlying provider - // implementation (e.g., "file_watcher") but rather in terms of the function of the - // certificates (e.g., "foo_deployment_identity"). - string instance_name = 1; - - // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify - // a root-certificate (validation context) or "example.com" to specify a certificate for a - // particular domain. Not all provider instances will actually use this field, so the value - // defaults to the empty string. - string certificate_name = 2; - } - - message CombinedCertificateValidationContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; - - // How to validate peer certificates. - CertificateValidationContext default_validation_context = 1 - [(validate.rules).message = {required: true}]; - - // Config for fetching validation context via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - SdsSecretConfig validation_context_sds_secret_config = 2 - [(validate.rules).message = {required: true}]; - - // Certificate provider for fetching CA certs. This will populate the - // *default_validation_context.trusted_ca* field. - // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Certificate provider instance for fetching CA certs. This will populate the - // *default_validation_context.trusted_ca* field. - // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - reserved 5; - - // TLS protocol versions, cipher suites etc. - TlsParameters tls_params = 1; - - // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. - // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - // - // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, - // and *tls_certificate_provider_instance* may be used. - // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's - // not legal to put a repeated field in a oneof. In the next major version, we should rework - // this to avoid this problem.] - repeated TlsCertificate tls_certificates = 2; - - // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - // - // The same number and types of certificates as :ref:`tls_certificates ` - // are valid in the the certificates fetched through this setting. - // - // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, - // and *tls_certificate_provider_instance* may be used. - // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's - // not legal to put a repeated field in a oneof. In the next major version, we should rework - // this to avoid this problem.] - repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 - [(validate.rules).repeated = {max_items: 2}]; - - // Certificate provider instance for fetching TLS certs. - // - // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, - // and *tls_certificate_provider_instance* may be used. - // [#not-implemented-hide:] - CertificateProviderPluginInstance tls_certificate_provider_instance = 14; - - // Certificate provider for fetching TLS certificates. - // [#not-implemented-hide:] - CertificateProvider tls_certificate_certificate_provider = 9 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Certificate provider instance for fetching TLS certificates. - // [#not-implemented-hide:] - CertificateProviderInstance tls_certificate_certificate_provider_instance = 11 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - oneof validation_context_type { - // How to validate peer certificates. - CertificateValidationContext validation_context = 3; - - // Config for fetching validation context via SDS API. Note SDS API allows certificates to be - // fetched/refreshed over the network asynchronously with respect to the TLS handshake. - SdsSecretConfig validation_context_sds_secret_config = 7; - - // Combined certificate validation context holds a default CertificateValidationContext - // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - // and default CertificateValidationContext are merged into a new CertificateValidationContext - // for validation. This merge is done by Message::MergeFrom(), so dynamic - // CertificateValidationContext overwrites singular fields in default - // CertificateValidationContext, and concatenates repeated fields to default - // CertificateValidationContext, and logical OR is applied to boolean fields. - CombinedCertificateValidationContext combined_validation_context = 8; - - // Certificate provider for fetching validation context. - // [#not-implemented-hide:] - CertificateProvider validation_context_certificate_provider = 10 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Certificate provider instance for fetching validation context. - // [#not-implemented-hide:] - CertificateProviderInstance validation_context_certificate_provider_instance = 12 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - // Supplies the list of ALPN protocols that the listener should expose. In - // practice this is likely to be set to one of two values (see the - // :ref:`codec_type - // ` - // parameter in the HTTP connection manager for more information): - // - // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - // * "http/1.1" If the listener is only going to support HTTP/1.1. - // - // There is no default for this parameter. If empty, Envoy will not expose ALPN. - repeated string alpn_protocols = 4; - - // Custom TLS handshaker. If empty, defaults to native TLS handshaking - // behavior. - config.core.v3.TypedExtensionConfig custom_handshaker = 13; -} diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto deleted file mode 100644 index cfb5e5c07e90c..0000000000000 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto +++ /dev/null @@ -1,59 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.transport_sockets.tls.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; -option java_outer_classname = "TlsSpiffeValidatorConfigProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SPIFFE Certificate Validator] -// [#extension: envoy.tls.cert_validator.spiffe] - -// Configuration specific to the `SPIFFE `_ certificate validator. -// -// Example: -// -// .. validated-code-block:: yaml -// :type-name: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext -// -// custom_validator_config: -// name: envoy.tls.cert_validator.spiffe -// typed_config: -// "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig -// trust_domains: -// - name: foo.com -// trust_bundle: -// filename: "foo.pem" -// - name: envoy.com -// trust_bundle: -// filename: "envoy.pem" -// -// In this example, a presented peer certificate whose SAN matches `spiffe//foo.com/**` is validated against -// the "foo.pem" x.509 certificate. All the trust bundles are isolated from each other, so no trust domain can mint -// a SVID belonging to another trust domain. That means, in this example, a SVID signed by `envoy.com`'s CA with `spiffe//foo.com/**` -// SAN would be rejected since Envoy selects the trust bundle according to the presented SAN before validate the certificate. -// -// Note that SPIFFE validator inherits and uses the following options from :ref:`CertificateValidationContext `. -// -// - :ref:`allow_expired_certificate ` to allow expired certificates. -// - :ref:`match_subject_alt_names ` to match **URI** SAN of certificates. Unlike the default validator, SPIFFE validator only matches **URI** SAN (which equals to SVID in SPIFFE terminology) and ignore other SAN types. -// -message SPIFFECertValidatorConfig { - message TrustDomain { - // Name of the trust domain, `example.com`, `foo.bar.gov` for example. - // Note that this must *not* have "spiffe://" prefix. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Specify a data source holding x.509 trust bundle used for validating incoming SVID(s) in this trust domain. - config.core.v3.DataSource trust_bundle = 2; - } - - // This field specifies trust domains used for validating incoming X.509-SVID(s). - repeated TrustDomain trust_domains = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto deleted file mode 100644 index 44e207172c9b1..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.generic.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3"; -option java_outer_classname = "GenericConnectionPoolProtoOuterClass"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Generic Connection Pool] - -// A connection pool which forwards downstream HTTP as TCP or HTTP to upstream, -// based on CONNECT configuration. -// [#extension: envoy.upstreams.http.generic] -message GenericConnectionPoolProto { -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto deleted file mode 100644 index 8318f3c666d90..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.http.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3"; -option java_outer_classname = "HttpConnectionPoolProtoOuterClass"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Http Connection Pool] - -// A connection pool which forwards downstream HTTP as HTTP to upstream. -// [#extension: envoy.upstreams.http.http] -message HttpConnectionPoolProto { -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto deleted file mode 100644 index 7c1d633432e9b..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.tcp.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3"; -option java_outer_classname = "TcpConnectionPoolProtoOuterClass"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tcp Connection Pool] - -// A connection pool which forwards downstream HTTP as TCP to upstream, -// [#extension: envoy.upstreams.http.tcp] -message TcpConnectionPoolProto { -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/http/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto b/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto deleted file mode 100644 index 271dcfbe49cec..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/http/v3/http_protocol_options.proto +++ /dev/null @@ -1,151 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.v3; - -import "envoy/config/core/v3/protocol.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.v3"; -option java_outer_classname = "HttpProtocolOptionsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP Protocol Options] -// [#extension: envoy.upstreams.http.http_protocol_options] - -// HttpProtocolOptions specifies Http upstream protocol options. This object -// is used in -// :ref:`typed_extension_protocol_options`, -// keyed by the name `envoy.extensions.upstreams.http.v3.HttpProtocolOptions`. -// -// This controls what protocol(s) should be used for upstream and how said protocol(s) are configured. -// -// This replaces the prior pattern of explicit protocol configuration directly -// in the cluster. So a configuration like this, explicitly configuring the use of HTTP/2 upstream: -// -// .. code:: -// -// clusters: -// - name: some_service -// connect_timeout: 5s -// upstream_http_protocol_options: -// auto_sni: true -// common_http_protocol_options: -// idle_timeout: 1s -// http2_protocol_options: -// max_concurrent_streams: 100 -// .... [further cluster config] -// -// Would now look like this: -// -// .. code:: -// -// clusters: -// - name: some_service -// connect_timeout: 5s -// typed_extension_protocol_options: -// envoy.extensions.upstreams.http.v3.HttpProtocolOptions: -// "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions -// upstream_http_protocol_options: -// auto_sni: true -// common_http_protocol_options: -// idle_timeout: 1s -// explicit_http_config: -// http2_protocol_options: -// max_concurrent_streams: 100 -// .... [further cluster config] -// [#next-free-field: 6] -message HttpProtocolOptions { - // If this is used, the cluster will only operate on one of the possible upstream protocols. - // Note that HTTP/2 or above should generally be used for upstream gRPC clusters. - message ExplicitHttpConfig { - oneof protocol_config { - option (validate.required) = true; - - config.core.v3.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v3.Http2ProtocolOptions http2_protocol_options = 2; - - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - config.core.v3.Http3ProtocolOptions http3_protocol_options = 3; - } - } - - // If this is used, the cluster can use either of the configured protocols, and - // will use whichever protocol was used by the downstream connection. - // - // If HTTP/3 is configured for downstream and not configured for upstream, - // HTTP/3 requests will fail over to HTTP/2. - message UseDownstreamHttpConfig { - config.core.v3.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v3.Http2ProtocolOptions http2_protocol_options = 2; - - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - config.core.v3.Http3ProtocolOptions http3_protocol_options = 3; - } - - // If this is used, the cluster can use either HTTP/1 or HTTP/2, and will use whichever - // protocol is negotiated by ALPN with the upstream. - // Clusters configured with *AutoHttpConfig* will use the highest available - // protocol; HTTP/2 if supported, otherwise HTTP/1. - // If the upstream does not support ALPN, *AutoHttpConfig* will fail over to HTTP/1. - // This can only be used with transport sockets which support ALPN. Using a - // transport socket which does not support ALPN will result in configuration - // failure. The transport layer may be configured with custom ALPN, but the default ALPN - // for the cluster (or if custom ALPN fails) will be "h2,http/1.1". - message AutoHttpConfig { - config.core.v3.Http1ProtocolOptions http_protocol_options = 1; - - config.core.v3.Http2ProtocolOptions http2_protocol_options = 2; - - // Unlike HTTP/1 and HTTP/2, HTTP/3 will not be configured unless it is - // present, and (soon) only if there is an indication of server side - // support. - // See :ref:`here ` for more information on - // when HTTP/3 will be used, and when Envoy will fail over to TCP. - // - // .. warning:: - // QUIC support is currently alpha and should be used with caution. Please - // see :ref:`here ` for details. - // AutoHttpConfig config is undergoing especially rapid change and as it - // is alpha is not guaranteed to be API-stable. - config.core.v3.Http3ProtocolOptions http3_protocol_options = 3; - - // [#not-implemented-hide:] - // The presence of alternate protocols cache options causes the use of the - // alternate protocols cache, which is responsible for parsing and caching - // HTTP Alt-Svc headers. This enables the use of HTTP/3 for origins that - // advertise supporting it. - // TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled. - config.core.v3.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 4; - } - - // This contains options common across HTTP/1 and HTTP/2 - config.core.v3.HttpProtocolOptions common_http_protocol_options = 1; - - // This contains common protocol options which are only applied upstream. - config.core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 2; - - // This controls the actual protocol to be used upstream. - oneof upstream_protocol_options { - option (validate.required) = true; - - // To explicitly configure either HTTP/1 or HTTP/2 (but not both!) use *explicit_http_config*. - // If the *explicit_http_config* is empty, HTTP/1.1 is used. - ExplicitHttpConfig explicit_http_config = 3; - - // This allows switching on protocol based on what protocol the downstream - // connection used. - UseDownstreamHttpConfig use_downstream_protocol_config = 4; - - // This allows switching on protocol based on ALPN - AutoHttpConfig auto_config = 5; - } -} diff --git a/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/BUILD b/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/generic_connection_pool.proto b/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/generic_connection_pool.proto deleted file mode 100644 index 5754491b91d19..0000000000000 --- a/generated_api_shadow/envoy/extensions/upstreams/tcp/generic/v3/generic_connection_pool.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.tcp.generic.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.tcp.generic.v3"; -option java_outer_classname = "GenericConnectionPoolProtoOuterClass"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Generic Connection Pool] - -// A connection pool which forwards downstream TCP as TCP or HTTP to upstream, -// based on CONNECT configuration. -// [#extension: envoy.upstreams.tcp.generic] -message GenericConnectionPoolProto { -} diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD b/generated_api_shadow/envoy/extensions/wasm/v3/BUILD deleted file mode 100644 index 1c1a6f6b44235..0000000000000 --- a/generated_api_shadow/envoy/extensions/wasm/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto b/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto deleted file mode 100644 index b4566c826ed08..0000000000000 --- a/generated_api_shadow/envoy/extensions/wasm/v3/wasm.proto +++ /dev/null @@ -1,165 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.wasm.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.wasm.v3"; -option java_outer_classname = "WasmProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Wasm] -// [#extension: envoy.bootstrap.wasm] - -// Configuration for restricting Proxy-Wasm capabilities available to modules. -message CapabilityRestrictionConfig { - // The Proxy-Wasm capabilities which will be allowed. Capabilities are mapped by - // name. The *SanitizationConfig* which each capability maps to is currently unimplemented and ignored, - // and so should be left empty. - // - // The capability names are given in the - // `Proxy-Wasm ABI `_. - // Additionally, the following WASI capabilities from - // `this list `_ - // are implemented and can be allowed: - // *fd_write*, *fd_read*, *fd_seek*, *fd_close*, *fd_fdstat_get*, *environ_get*, *environ_sizes_get*, - // *args_get*, *args_sizes_get*, *proc_exit*, *clock_time_get*, *random_get*. - map allowed_capabilities = 1; -} - -// Configuration for sanitization of inputs to an allowed capability. -// -// NOTE: This is currently unimplemented. -message SanitizationConfig { -} - -// Configuration for a Wasm VM. -// [#next-free-field: 8] -message VmConfig { - // An ID which will be used along with a hash of the wasm code (or the name of the registered Null - // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same - // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can - // reduce memory utilization and make sharing of data easier which may have security implications. - // [#comment: TODO: add ref for details.] - string vm_id = 1; - - // The Wasm runtime type. - // Available Wasm runtime types are registered as extensions. The following runtimes are included - // in Envoy code base: - // - // .. _extension_envoy.wasm.runtime.null: - // - // **envoy.wasm.runtime.null**: Null sandbox, the Wasm module must be compiled and linked into the - // Envoy binary. The registered name is given in the *code* field as *inline_string*. - // - // .. _extension_envoy.wasm.runtime.v8: - // - // **envoy.wasm.runtime.v8**: `V8 `_-based WebAssembly runtime. - // - // .. _extension_envoy.wasm.runtime.wamr: - // - // **envoy.wasm.runtime.wamr**: `WAMR `_-based WebAssembly runtime. - // This runtime is not enabled in the official build. - // - // .. _extension_envoy.wasm.runtime.wavm: - // - // **envoy.wasm.runtime.wavm**: `WAVM `_-based WebAssembly runtime. - // This runtime is not enabled in the official build. - // - // .. _extension_envoy.wasm.runtime.wasmtime: - // - // **envoy.wasm.runtime.wasmtime**: `Wasmtime `_-based WebAssembly runtime. - // This runtime is not enabled in the official build. - // - // [#extension-category: envoy.wasm.runtime] - string runtime = 2 [(validate.rules).string = {min_len: 1}]; - - // The Wasm code that Envoy will execute. - config.core.v3.AsyncDataSource code = 3; - - // The Wasm configuration used in initialization of a new VM - // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before - // passing it to the plugin. `google.protobuf.BytesValue` and - // `google.protobuf.StringValue` are passed directly without the wrapper. - google.protobuf.Any configuration = 4; - - // Allow the wasm file to include pre-compiled code on VMs which support it. - // Warning: this should only be enable for trusted sources as the precompiled code is not - // verified. - bool allow_precompiled = 5; - - // If true and the code needs to be remotely fetched and it is not in the cache then NACK the configuration - // update and do a background fetch to fill the cache, otherwise fetch the code asynchronously and enter - // warming state. - bool nack_on_code_cache_miss = 6; - - // Specifies environment variables to be injected to this VM which will be available through - // WASI's ``environ_get`` and ``environ_get_sizes`` system calls. Note that these functions are mostly implicitly - // called in your language's standard library, so you do not need to call them directly and you can access to env - // vars just like when you do on native platforms. - // Warning: Envoy rejects the configuration if there's conflict of key space. - EnvironmentVariables environment_variables = 7; -} - -message EnvironmentVariables { - // The keys of *Envoy's* environment variables exposed to this VM. In other words, if a key exists in Envoy's environment - // variables, then that key-value pair will be injected. Note that if a key does not exist, it will be ignored. - repeated string host_env_keys = 1; - - // Explicitly given key-value pairs to be injected to this VM in the form of "KEY=VALUE". - map key_values = 2; -} - -// Base Configuration for Wasm Plugins e.g. filters and services. -// [#next-free-field: 7] -message PluginConfig { - // A unique name for a filters/services in a VM for use in identifying the filter/service if - // multiple filters/services are handled by the same *vm_id* and *root_id* and for - // logging/debugging. - string name = 1; - - // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts - // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all - // filters/services with a blank root_id with the same *vm_id* will share Context(s). - string root_id = 2; - - // Configuration for finding or starting VM. - oneof vm { - VmConfig vm_config = 3; - // TODO: add referential VM configurations. - } - - // Filter/service configuration used to configure or reconfigure a plugin - // (proxy_on_configuration). - // `google.protobuf.Struct` is serialized as JSON before - // passing it to the plugin. `google.protobuf.BytesValue` and - // `google.protobuf.StringValue` are passed directly without the wrapper. - google.protobuf.Any configuration = 4; - - // If there is a fatal error on the VM (e.g. exception, abort(), on_start or on_configure return false), - // then all plugins associated with the VM will either fail closed (by default), e.g. by returning an HTTP 503 error, - // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false - // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial - // startup the proxy will not start. - bool fail_open = 5; - - // Configuration for restricting Proxy-Wasm capabilities available to modules. - CapabilityRestrictionConfig capability_restriction_config = 6; -} - -// WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService -// ` This opaque configuration will be used to create a Wasm Service. -message WasmService { - // General plugin configuration. - PluginConfig config = 1; - - // If true, create a single VM rather than creating one VM per worker. Such a singleton can - // not be used with filters. - bool singleton = 2; -} diff --git a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/BUILD b/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto b/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto deleted file mode 100644 index d73f0b5dfb9c5..0000000000000 --- a/generated_api_shadow/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.watchdog.profile_action.v3alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3alpha"; -option java_outer_classname = "ProfileActionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Watchdog Action that does CPU profiling.] -// [#extension: envoy.watchdog.profile_action] - -// Configuration for the profile watchdog action. -message ProfileActionConfig { - // How long the profile should last. If not set defaults to 5 seconds. - google.protobuf.Duration profile_duration = 1; - - // File path to the directory to output profiles. - string profile_path = 2 [(validate.rules).string = {min_len: 1}]; - - // Limits the max number of profiles that can be generated by this action - // over its lifetime to avoid filling the disk. - // If not set (i.e. it's 0), a default of 10 will be used. - uint64 max_profiles = 3; -} diff --git a/generated_api_shadow/envoy/service/README.md b/generated_api_shadow/envoy/service/README.md deleted file mode 100644 index 831b740a0ea80..0000000000000 --- a/generated_api_shadow/envoy/service/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Protocol buffer definitions for gRPC and REST services. - -Visibility should be constrained to none (default). diff --git a/generated_api_shadow/envoy/service/accesslog/v2/BUILD b/generated_api_shadow/envoy/service/accesslog/v2/BUILD deleted file mode 100644 index 1253698c39d51..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/data/accesslog/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/accesslog/v2/als.proto b/generated_api_shadow/envoy/service/accesslog/v2/als.proto deleted file mode 100644 index bbd871ff83a4a..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v2/als.proto +++ /dev/null @@ -1,72 +0,0 @@ -syntax = "proto3"; - -package envoy.service.accesslog.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/accesslog/v2/accesslog.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.accesslog.v2"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Service for streaming access logs from Envoy to an access log server. -service AccessLogService { - // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different - // API for "critical" access logs in which Envoy will buffer access logs for some period of time - // until it gets an ACK so it could then retry. This API is designed for high throughput with the - // expectation that it might be lossy. - rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { - } -} - -// Empty response for the StreamAccessLogs API. Will never be sent. See below. -message StreamAccessLogsResponse { -} - -// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream -// access logs without ever expecting a response. -message StreamAccessLogsMessage { - message Identifier { - // The node sending the access log messages over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - - // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig - // `. - string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // Wrapper for batches of HTTP access log entries. - message HTTPAccessLogEntries { - repeated data.accesslog.v2.HTTPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Wrapper for batches of TCP access log entries. - message TCPAccessLogEntries { - repeated data.accesslog.v2.TCPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batches of log entries of a single type. Generally speaking, a given stream should only - // ever include one type of log entry. - oneof log_entries { - option (validate.required) = true; - - HTTPAccessLogEntries http_logs = 2; - - TCPAccessLogEntries tcp_logs = 3; - } -} diff --git a/generated_api_shadow/envoy/service/accesslog/v3/BUILD b/generated_api_shadow/envoy/service/accesslog/v3/BUILD deleted file mode 100644 index d44839fbe0952..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/data/accesslog/v3:pkg", - "//envoy/service/accesslog/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/accesslog/v3/als.proto b/generated_api_shadow/envoy/service/accesslog/v3/als.proto deleted file mode 100644 index 94a290ad4a325..0000000000000 --- a/generated_api_shadow/envoy/service/accesslog/v3/als.proto +++ /dev/null @@ -1,87 +0,0 @@ -syntax = "proto3"; - -package envoy.service.accesslog.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/data/accesslog/v3/accesslog.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.accesslog.v3"; -option java_outer_classname = "AlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC Access Log Service (ALS)] - -// Service for streaming access logs from Envoy to an access log server. -service AccessLogService { - // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different - // API for "critical" access logs in which Envoy will buffer access logs for some period of time - // until it gets an ACK so it could then retry. This API is designed for high throughput with the - // expectation that it might be lossy. - rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { - } -} - -// Empty response for the StreamAccessLogs API. Will never be sent. See below. -message StreamAccessLogsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsResponse"; -} - -// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream -// access logs without ever expecting a response. -message StreamAccessLogsMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsMessage.Identifier"; - - // The node sending the access log messages over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - - // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig - // `. - string log_name = 2 [(validate.rules).string = {min_len: 1}]; - } - - // Wrapper for batches of HTTP access log entries. - message HTTPAccessLogEntries { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsMessage.HTTPAccessLogEntries"; - - repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Wrapper for batches of TCP access log entries. - message TCPAccessLogEntries { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.accesslog.v2.StreamAccessLogsMessage.TCPAccessLogEntries"; - - repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1 - [(validate.rules).repeated = {min_items: 1}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batches of log entries of a single type. Generally speaking, a given stream should only - // ever include one type of log entry. - oneof log_entries { - option (validate.required) = true; - - HTTPAccessLogEntries http_logs = 2; - - TCPAccessLogEntries tcp_logs = 3; - } -} diff --git a/generated_api_shadow/envoy/service/auth/v2/BUILD b/generated_api_shadow/envoy/service/auth/v2/BUILD deleted file mode 100644 index fa00ca5127dea..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto b/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto deleted file mode 100644 index 8e0170067d24e..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2/attribute_context.proto +++ /dev/null @@ -1,160 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v2; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v2"; -option java_outer_classname = "AttributeContextProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Attribute Context ] - -// See :ref:`network filter configuration overview ` -// and :ref:`HTTP filter configuration overview `. - -// An attribute is a piece of metadata that describes an activity on a network. -// For example, the size of an HTTP request, or the status code of an HTTP response. -// -// Each attribute has a type and a name, which is logically defined as a proto message field -// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes -// supported by Envoy authorization system. -// [#comment: The following items are left out of this proto -// Request.Auth field for jwt tokens -// Request.Api for api management -// Origin peer that originated the request -// Caching Protocol -// request_context return values to inject back into the filter chain -// peer.claims -- from X.509 extensions -// Configuration -// - field mask to send -// - which return values from request_context are copied back -// - which return values are copied into request_headers] -// [#next-free-field: 12] -message AttributeContext { - // This message defines attributes for a node that handles a network request. - // The node can be either a service or an application that sends, forwards, - // or receives the request. Service peers should fill in the `service`, - // `principal`, and `labels` as appropriate. - // [#next-free-field: 6] - message Peer { - // The address of the peer, this is typically the IP address. - // It can also be UDS path, or others. - api.v2.core.Address address = 1; - - // The canonical service name of the peer. - // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster - // ` - // If a more trusted source of the service name is available through mTLS/secure naming, it - // should be used. - string service = 2; - - // The labels associated with the peer. - // These could be pod labels for Kubernetes or tags for VMs. - // The source of the labels could be an X.509 certificate or other configuration. - map labels = 3; - - // The authenticated identity of this peer. - // For example, the identity associated with the workload such as a service account. - // If an X.509 certificate is used to assert the identity this field should be sourced from - // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. - // The primary identity should be the principal. The principal format is issuer specific. - // - // Example: - // * SPIFFE format is `spiffe://trust-domain/path` - // * Google account format is `https://accounts.google.com/{userid}` - string principal = 4; - - // The X.509 certificate used to authenticate the identify of this peer. - // When present, the certificate contents are encoded in URL and PEM format. - string certificate = 5; - } - - // Represents a network request, such as an HTTP request. - message Request { - // The timestamp when the proxy receives the first byte of the request. - google.protobuf.Timestamp time = 1; - - // Represents an HTTP request or an HTTP-like request. - HttpRequest http = 2; - } - - // This message defines attributes for an HTTP request. - // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 12] - message HttpRequest { - // The unique ID for a request, which can be propagated to downstream - // systems. The ID should have low probability of collision - // within a single day for a specific service. - // For HTTP requests, it should be X-Request-ID or equivalent. - string id = 1; - - // The HTTP request method, such as `GET`, `POST`. - string method = 2; - - // The HTTP request headers. If multiple headers share the same key, they - // must be merged according to the HTTP spec. All header keys must be - // lower-cased, because HTTP header keys are case-insensitive. - map headers = 3; - - // The request target, as it appears in the first line of the HTTP request. This includes - // the URL path and query-string. No decoding is performed. - string path = 4; - - // The HTTP request `Host` or 'Authority` header value. - string host = 5; - - // The HTTP URL scheme, such as `http` and `https`. This is set for HTTP/2 - // requests only. For HTTP/1.1, use "x-forwarded-for" header value to lookup - // the scheme of the request. - string scheme = 6; - - // This field is always empty, and exists for compatibility reasons. The HTTP URL query is - // included in `path` field. - string query = 7; - - // This field is always empty, and exists for compatibility reasons. The URL fragment is - // not submitted as part of HTTP requests; it is unknowable. - string fragment = 8; - - // The HTTP request size in bytes. If unknown, it must be -1. - int64 size = 9; - - // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". - // - // See :repo:`headers.h:ProtocolStrings ` for a list of all - // possible values. - string protocol = 10; - - // The HTTP request body. - string body = 11; - } - - // The source of a network activity, such as starting a TCP connection. - // In a multi hop network activity, the source represents the sender of the - // last hop. - Peer source = 1; - - // The destination of a network activity, such as accepting a TCP connection. - // In a multi hop network activity, the destination represents the receiver of - // the last hop. - Peer destination = 2; - - // Represents a network request, such as an HTTP request. - Request request = 4; - - // This is analogous to http_request.headers, however these contents will not be sent to the - // upstream server. Context_extensions provide an extension mechanism for sending additional - // information to the auth server without modifying the proto definition. It maps to the - // internal opaque context in the filter chain. - map context_extensions = 10; - - // Dynamic metadata associated with the request. - api.v2.core.Metadata metadata_context = 11; -} diff --git a/generated_api_shadow/envoy/service/auth/v2/external_auth.proto b/generated_api_shadow/envoy/service/auth/v2/external_auth.proto deleted file mode 100644 index 7dbfd35569681..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2/external_auth.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/service/auth/v2/attribute_context.proto"; -import "envoy/type/http_status.proto"; - -import "google/rpc/status.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v2"; -option java_outer_classname = "ExternalAuthProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Authorization Service ] - -// The authorization service request messages used by external authorization :ref:`network filter -// ` and :ref:`HTTP filter `. - -// A generic interface for performing authorization check on incoming -// requests to a networked service. -service Authorization { - // Performs authorization check based on the attributes associated with the - // incoming request, and returns status `OK` or not `OK`. - rpc Check(CheckRequest) returns (CheckResponse) { - } -} - -message CheckRequest { - // The request attributes. - AttributeContext attributes = 1; -} - -// HTTP attributes for a denied response. -message DeniedHttpResponse { - // This field allows the authorization service to send a HTTP response status - // code to the downstream client other than 403 (Forbidden). - type.HttpStatus status = 1 [(validate.rules).message = {required: true}]; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to - // false when used in this message. - repeated api.v2.core.HeaderValueOption headers = 2; - - // This field allows the authorization service to send a response body data - // to the downstream client. - string body = 3; -} - -// HTTP attributes for an ok response. -message OkHttpResponse { - // HTTP entity headers in addition to the original request headers. This allows the authorization - // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to - // false when used in this message. By setting the `append` field to `true`, - // the filter will append the correspondent header value to the matched request header. - // By leaving `append` as false, the filter will either add a new header, or override an existing - // one if there is a match. - repeated api.v2.core.HeaderValueOption headers = 2; -} - -// Intended for gRPC and Network Authorization servers `only`. -message CheckResponse { - // Status `OK` allows the request. Any other status indicates the request should be denied. - google.rpc.Status status = 1; - - // An message that contains HTTP response attributes. This message is - // used when the authorization service needs to send custom responses to the - // downstream client or, to modify/add request headers being dispatched to the upstream. - oneof http_response { - // Supplies http attributes for a denied response. - DeniedHttpResponse denied_response = 2; - - // Supplies http attributes for an ok response. - OkHttpResponse ok_response = 3; - } -} diff --git a/generated_api_shadow/envoy/service/auth/v2alpha/BUILD b/generated_api_shadow/envoy/service/auth/v2alpha/BUILD deleted file mode 100644 index e72b2a63b2e17..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2alpha/BUILD +++ /dev/null @@ -1,10 +0,0 @@ -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -api_proto_package( - has_services = True, - deps = ["//envoy/service/auth/v2:pkg"], -) diff --git a/generated_api_shadow/envoy/service/auth/v2alpha/external_auth.proto b/generated_api_shadow/envoy/service/auth/v2alpha/external_auth.proto deleted file mode 100644 index 85e9c12c6afb4..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v2alpha/external_auth.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v2alpha; - -option java_multiple_files = true; -option java_generic_services = true; -option java_outer_classname = "CertsProto"; -option java_package = "io.envoyproxy.envoy.service.auth.v2alpha"; - -import "envoy/service/auth/v2/external_auth.proto"; - -// [#protodoc-title: Authorization Service ] - -// The authorization service request messages used by external authorization :ref:`network filter -// ` and :ref:`HTTP filter `. - -// A generic interface for performing authorization check on incoming -// requests to a networked service. -service Authorization { - // Performs authorization check based on the attributes associated with the - // incoming request, and returns status `OK` or not `OK`. - rpc Check(v2.CheckRequest) returns (v2.CheckResponse); -} diff --git a/generated_api_shadow/envoy/service/auth/v3/BUILD b/generated_api_shadow/envoy/service/auth/v3/BUILD deleted file mode 100644 index 0774dda23e421..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v3/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/service/auth/v2:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto b/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto deleted file mode 100644 index 452a1e1ad9a5f..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v3/attribute_context.proto +++ /dev/null @@ -1,177 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v3; - -import "envoy/config/core/v3/address.proto"; -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v3"; -option java_outer_classname = "AttributeContextProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Attribute Context ] - -// See :ref:`network filter configuration overview ` -// and :ref:`HTTP filter configuration overview `. - -// An attribute is a piece of metadata that describes an activity on a network. -// For example, the size of an HTTP request, or the status code of an HTTP response. -// -// Each attribute has a type and a name, which is logically defined as a proto message field -// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes -// supported by Envoy authorization system. -// [#comment: The following items are left out of this proto -// Request.Auth field for jwt tokens -// Request.Api for api management -// Origin peer that originated the request -// Caching Protocol -// request_context return values to inject back into the filter chain -// peer.claims -- from X.509 extensions -// Configuration -// - field mask to send -// - which return values from request_context are copied back -// - which return values are copied into request_headers] -// [#next-free-field: 12] -message AttributeContext { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.AttributeContext"; - - // This message defines attributes for a node that handles a network request. - // The node can be either a service or an application that sends, forwards, - // or receives the request. Service peers should fill in the `service`, - // `principal`, and `labels` as appropriate. - // [#next-free-field: 6] - message Peer { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.AttributeContext.Peer"; - - // The address of the peer, this is typically the IP address. - // It can also be UDS path, or others. - config.core.v3.Address address = 1; - - // The canonical service name of the peer. - // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster - // ` - // If a more trusted source of the service name is available through mTLS/secure naming, it - // should be used. - string service = 2; - - // The labels associated with the peer. - // These could be pod labels for Kubernetes or tags for VMs. - // The source of the labels could be an X.509 certificate or other configuration. - map labels = 3; - - // The authenticated identity of this peer. - // For example, the identity associated with the workload such as a service account. - // If an X.509 certificate is used to assert the identity this field should be sourced from - // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. - // The primary identity should be the principal. The principal format is issuer specific. - // - // Example: - // * SPIFFE format is `spiffe://trust-domain/path` - // * Google account format is `https://accounts.google.com/{userid}` - string principal = 4; - - // The X.509 certificate used to authenticate the identify of this peer. - // When present, the certificate contents are encoded in URL and PEM format. - string certificate = 5; - } - - // Represents a network request, such as an HTTP request. - message Request { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.AttributeContext.Request"; - - // The timestamp when the proxy receives the first byte of the request. - google.protobuf.Timestamp time = 1; - - // Represents an HTTP request or an HTTP-like request. - HttpRequest http = 2; - } - - // This message defines attributes for an HTTP request. - // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. - // [#next-free-field: 13] - message HttpRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.AttributeContext.HttpRequest"; - - // The unique ID for a request, which can be propagated to downstream - // systems. The ID should have low probability of collision - // within a single day for a specific service. - // For HTTP requests, it should be X-Request-ID or equivalent. - string id = 1; - - // The HTTP request method, such as `GET`, `POST`. - string method = 2; - - // The HTTP request headers. If multiple headers share the same key, they - // must be merged according to the HTTP spec. All header keys must be - // lower-cased, because HTTP header keys are case-insensitive. - map headers = 3; - - // The request target, as it appears in the first line of the HTTP request. This includes - // the URL path and query-string. No decoding is performed. - string path = 4; - - // The HTTP request `Host` or 'Authority` header value. - string host = 5; - - // The HTTP URL scheme, such as `http` and `https`. - string scheme = 6; - - // This field is always empty, and exists for compatibility reasons. The HTTP URL query is - // included in `path` field. - string query = 7; - - // This field is always empty, and exists for compatibility reasons. The URL fragment is - // not submitted as part of HTTP requests; it is unknowable. - string fragment = 8; - - // The HTTP request size in bytes. If unknown, it must be -1. - int64 size = 9; - - // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". - // - // See :repo:`headers.h:ProtocolStrings ` for a list of all - // possible values. - string protocol = 10; - - // The HTTP request body. - string body = 11; - - // The HTTP request body in bytes. This is used instead of - // :ref:`body ` when - // :ref:`pack_as_bytes ` - // is set to true. - bytes raw_body = 12; - } - - // The source of a network activity, such as starting a TCP connection. - // In a multi hop network activity, the source represents the sender of the - // last hop. - Peer source = 1; - - // The destination of a network activity, such as accepting a TCP connection. - // In a multi hop network activity, the destination represents the receiver of - // the last hop. - Peer destination = 2; - - // Represents a network request, such as an HTTP request. - Request request = 4; - - // This is analogous to http_request.headers, however these contents will not be sent to the - // upstream server. Context_extensions provide an extension mechanism for sending additional - // information to the auth server without modifying the proto definition. It maps to the - // internal opaque context in the filter chain. - map context_extensions = 10; - - // Dynamic metadata associated with the request. - config.core.v3.Metadata metadata_context = 11; -} diff --git a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto b/generated_api_shadow/envoy/service/auth/v3/external_auth.proto deleted file mode 100644 index 31adbc161b881..0000000000000 --- a/generated_api_shadow/envoy/service/auth/v3/external_auth.proto +++ /dev/null @@ -1,136 +0,0 @@ -syntax = "proto3"; - -package envoy.service.auth.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/service/auth/v3/attribute_context.proto"; -import "envoy/type/v3/http_status.proto"; - -import "google/protobuf/struct.proto"; -import "google/rpc/status.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.auth.v3"; -option java_outer_classname = "ExternalAuthProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Authorization Service ] - -// The authorization service request messages used by external authorization :ref:`network filter -// ` and :ref:`HTTP filter `. - -// A generic interface for performing authorization check on incoming -// requests to a networked service. -service Authorization { - // Performs authorization check based on the attributes associated with the - // incoming request, and returns status `OK` or not `OK`. - rpc Check(CheckRequest) returns (CheckResponse) { - } -} - -message CheckRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.CheckRequest"; - - // The request attributes. - AttributeContext attributes = 1; -} - -// HTTP attributes for a denied response. -message DeniedHttpResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.DeniedHttpResponse"; - - // This field allows the authorization service to send an HTTP response status code to the - // downstream client. If not set, Envoy sends ``403 Forbidden`` HTTP status code by default. - type.v3.HttpStatus status = 1; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client. Note that the :ref:`append field in HeaderValueOption ` defaults to - // false when used in this message. - repeated config.core.v3.HeaderValueOption headers = 2; - - // This field allows the authorization service to send a response body data - // to the downstream client. - string body = 3; -} - -// HTTP attributes for an OK response. -// [#next-free-field: 7] -message OkHttpResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.OkHttpResponse"; - - // HTTP entity headers in addition to the original request headers. This allows the authorization - // service to append, to add or to override headers from the original request before - // dispatching it to the upstream. Note that the :ref:`append field in HeaderValueOption ` defaults to - // false when used in this message. By setting the `append` field to `true`, - // the filter will append the correspondent header value to the matched request header. - // By leaving `append` as false, the filter will either add a new header, or override an existing - // one if there is a match. - repeated config.core.v3.HeaderValueOption headers = 2; - - // HTTP entity headers to remove from the original request before dispatching - // it to the upstream. This allows the authorization service to act on auth - // related headers (like `Authorization`), process them, and consume them. - // Under this model, the upstream will either receive the request (if it's - // authorized) or not receive it (if it's not), but will not see headers - // containing authorization credentials. - // - // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as - // the header `Host`, may not be removed as that would make the request - // malformed. If mentioned in `headers_to_remove` these special headers will - // be ignored. - // - // When using the HTTP service this must instead be set by the HTTP - // authorization service as a comma separated list like so: - // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. - repeated string headers_to_remove = 5; - - // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata - // `. Until it is removed, - // setting this field overrides :ref:`CheckResponse.dynamic_metadata - // `. - google.protobuf.Struct dynamic_metadata = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // This field allows the authorization service to send HTTP response headers - // to the downstream client on success. Note that the :ref:`append field in HeaderValueOption ` - // defaults to false when used in this message. - repeated config.core.v3.HeaderValueOption response_headers_to_add = 6; -} - -// Intended for gRPC and Network Authorization servers `only`. -message CheckResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.auth.v2.CheckResponse"; - - // Status `OK` allows the request. Any other status indicates the request should be denied, and - // for HTTP filter, if not overridden by :ref:`denied HTTP response status ` - // Envoy sends ``403 Forbidden`` HTTP status code by default. - google.rpc.Status status = 1; - - // An message that contains HTTP response attributes. This message is - // used when the authorization service needs to send custom responses to the - // downstream client or, to modify/add request headers being dispatched to the upstream. - oneof http_response { - // Supplies http attributes for a denied response. - DeniedHttpResponse denied_response = 2; - - // Supplies http attributes for an ok response. - OkHttpResponse ok_response = 3; - } - - // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next - // filter. This metadata lives in a namespace specified by the canonical name of extension filter - // that requires it: - // - // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. - // - :ref:`envoy.filters.network.ext_authz ` for network filter. - google.protobuf.Struct dynamic_metadata = 4; -} diff --git a/generated_api_shadow/envoy/service/cluster/v3/BUILD b/generated_api_shadow/envoy/service/cluster/v3/BUILD deleted file mode 100644 index d3be4fae57fa4..0000000000000 --- a/generated_api_shadow/envoy/service/cluster/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/cluster/v3/cds.proto b/generated_api_shadow/envoy/service/cluster/v3/cds.proto deleted file mode 100644 index 100ecad39a968..0000000000000 --- a/generated_api_shadow/envoy/service/cluster/v3/cds.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.service.cluster.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.cluster.v3"; -option java_outer_classname = "CdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: CDS] - -// Return list of all clusters this proxy will load balance to. -service ClusterDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.cluster.v3.Cluster"; - - rpc StreamClusters(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaClusters(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchClusters(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:clusters"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message CdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.CdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/discovery/v2/BUILD b/generated_api_shadow/envoy/service/discovery/v2/BUILD deleted file mode 100644 index ec687f7534368..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/endpoint:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/discovery/v2/ads.proto b/generated_api_shadow/envoy/service/discovery/v2/ads.proto deleted file mode 100644 index d70e0cdc8e149..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/ads.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v2; - -import "envoy/api/v2/discovery.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; -option java_outer_classname = "AdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Aggregated Discovery Service (ADS)] - -// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, -// and listeners are retained in the package `envoy.api.v2` for backwards -// compatibility with existing management servers. New development in discovery -// services should proceed in the package `envoy.service.discovery.v2`. - -// See https://github.com/lyft/envoy-api#apis for a description of the role of -// ADS and how it is intended to be used by a management server. ADS requests -// have the same structure as their singleton xDS counterparts, but can -// multiplex many resource types on a single stream. The type_url in the -// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover -// the multiplexed singleton APIs at the Envoy instance and management server. -service AggregatedDiscoveryService { - // This is a gRPC-only API. - rpc StreamAggregatedResources(stream api.v2.DiscoveryRequest) - returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaAggregatedResources(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message AdsDummy { -} diff --git a/generated_api_shadow/envoy/service/discovery/v2/hds.proto b/generated_api_shadow/envoy/service/discovery/v2/hds.proto deleted file mode 100644 index 76f91c5a456de..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/hds.proto +++ /dev/null @@ -1,138 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/health_check.proto"; -import "envoy/api/v2/endpoint/endpoint_components.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; -option java_outer_classname = "HdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.health.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Health Discovery Service (HDS)] - -// HDS is Health Discovery Service. It compliments Envoy’s health checking -// service by designating this Envoy to be a healthchecker for a subset of hosts -// in the cluster. The status of these health checks will be reported to the -// management server, where it can be aggregated etc and redistributed back to -// Envoy through EDS. -service HealthDiscoveryService { - // 1. Envoy starts up and if its can_healthcheck option in the static - // bootstrap config is enabled, sends HealthCheckRequest to the management - // server. It supplies its capabilities (which protocol it can health check - // with, what zone it resides in, etc.). - // 2. In response to (1), the management server designates this Envoy as a - // healthchecker to health check a subset of all upstream hosts for a given - // cluster (for example upstream Host 1 and Host 2). It streams - // HealthCheckSpecifier messages with cluster related configuration for all - // clusters this Envoy is designated to health check. Subsequent - // HealthCheckSpecifier message will be sent on changes to: - // a. Endpoints to health checks - // b. Per cluster configuration change - // 3. Envoy creates a health probe based on the HealthCheck config and sends - // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck - // configuration Envoy waits upon the arrival of the probe response and - // looks at the content of the response to decide whether the endpoint is - // healthy or not. If a response hasn't been received within the timeout - // interval, the endpoint health status is considered TIMEOUT. - // 4. Envoy reports results back in an EndpointHealthResponse message. - // Envoy streams responses as often as the interval configured by the - // management server in HealthCheckSpecifier. - // 5. The management Server collects health statuses for all endpoints in the - // cluster (for all clusters) and uses this information to construct - // EndpointDiscoveryResponse messages. - // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load - // balances traffic to them without additional health checking. It may - // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection - // failed to a particular endpoint to account for health status propagation - // delay between HDS and EDS). - // By default, can_healthcheck is true. If can_healthcheck is false, Cluster - // configuration may not contain HealthCheck message. - // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above - // invariant? - // TODO(htuch): Add @amb67's diagram. - rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) - returns (stream HealthCheckSpecifier) { - } - - // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of - // request/response. Should we add an identifier to the HealthCheckSpecifier - // to bind with the response? - rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { - option (google.api.http).post = "/v2/discovery:health_check"; - option (google.api.http).body = "*"; - } -} - -// Defines supported protocols etc, so the management server can assign proper -// endpoints to healthcheck. -message Capability { - // Different Envoy instances may have different capabilities (e.g. Redis) - // and/or have ports enabled for different protocols. - enum Protocol { - HTTP = 0; - TCP = 1; - REDIS = 2; - } - - repeated Protocol health_check_protocols = 1; -} - -message HealthCheckRequest { - api.v2.core.Node node = 1; - - Capability capability = 2; -} - -message EndpointHealth { - api.v2.endpoint.Endpoint endpoint = 1; - - api.v2.core.HealthStatus health_status = 2; -} - -message EndpointHealthResponse { - repeated EndpointHealth endpoints_health = 1; -} - -message HealthCheckRequestOrEndpointHealthResponse { - oneof request_type { - HealthCheckRequest health_check_request = 1; - - EndpointHealthResponse endpoint_health_response = 2; - } -} - -message LocalityEndpoints { - api.v2.core.Locality locality = 1; - - repeated api.v2.endpoint.Endpoint endpoints = 2; -} - -// The cluster name and locality is provided to Envoy for the endpoints that it -// health checks to support statistics reporting, logging and debugging by the -// Envoy instance (outside of HDS). For maximum usefulness, it should match the -// same cluster structure as that provided by EDS. -message ClusterHealthCheck { - string cluster_name = 1; - - repeated api.v2.core.HealthCheck health_checks = 2; - - repeated LocalityEndpoints locality_endpoints = 3; -} - -message HealthCheckSpecifier { - repeated ClusterHealthCheck cluster_health_checks = 1; - - // The default is 1 second. - google.protobuf.Duration interval = 2; -} diff --git a/generated_api_shadow/envoy/service/discovery/v2/rtds.proto b/generated_api_shadow/envoy/service/discovery/v2/rtds.proto deleted file mode 100644 index 713ac277072bf..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/rtds.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/struct.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; -option java_outer_classname = "RtdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.runtime.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Runtime Discovery Service (RTDS)] -// RTDS :ref:`configuration overview ` - -// Discovery service for Runtime resources. -service RuntimeDiscoveryService { - option (envoy.annotations.resource).type = "envoy.service.discovery.v2.Runtime"; - - rpc StreamRuntime(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaRuntime(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc FetchRuntime(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:runtime"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message RtdsDummy { -} - -// RTDS resource type. This describes a layer in the runtime virtual filesystem. -message Runtime { - // Runtime resource name. This makes the Runtime a self-describing xDS - // resource. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - google.protobuf.Struct layer = 2; -} diff --git a/generated_api_shadow/envoy/service/discovery/v2/sds.proto b/generated_api_shadow/envoy/service/discovery/v2/sds.proto deleted file mode 100644 index 4d01d475c59bc..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v2/sds.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v2; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; -option java_outer_classname = "SdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.secret.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Secret Discovery Service (SDS)] - -service SecretDiscoveryService { - option (envoy.annotations.resource).type = "envoy.api.v2.auth.Secret"; - - rpc DeltaSecrets(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc StreamSecrets(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc FetchSecrets(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v2/discovery:secrets"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message SdsDummy { -} diff --git a/generated_api_shadow/envoy/service/discovery/v3/BUILD b/generated_api_shadow/envoy/service/discovery/v3/BUILD deleted file mode 100644 index 074bab85eb710..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/service/discovery/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/discovery/v3/ads.proto b/generated_api_shadow/envoy/service/discovery/v3/ads.proto deleted file mode 100644 index 03021559ab669..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v3/ads.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v3"; -option java_outer_classname = "AdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Aggregated Discovery Service (ADS)] - -// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, -// and listeners are retained in the package `envoy.api.v2` for backwards -// compatibility with existing management servers. New development in discovery -// services should proceed in the package `envoy.service.discovery.v2`. - -// See https://github.com/lyft/envoy-api#apis for a description of the role of -// ADS and how it is intended to be used by a management server. ADS requests -// have the same structure as their singleton xDS counterparts, but can -// multiplex many resource types on a single stream. The type_url in the -// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover -// the multiplexed singleton APIs at the Envoy instance and management server. -service AggregatedDiscoveryService { - // This is a gRPC-only API. - rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) { - } - - rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest) - returns (stream DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message AdsDummy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.AdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto b/generated_api_shadow/envoy/service/discovery/v3/discovery.proto deleted file mode 100644 index 4a474d0fe2608..0000000000000 --- a/generated_api_shadow/envoy/service/discovery/v3/discovery.proto +++ /dev/null @@ -1,279 +0,0 @@ -syntax = "proto3"; - -package envoy.service.discovery.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/rpc/status.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.discovery.v3"; -option java_outer_classname = "DiscoveryProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common discovery API components] - -// A DiscoveryRequest requests a set of versioned resources of the same type for -// a given Envoy node on some API. -// [#next-free-field: 7] -message DiscoveryRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryRequest"; - - // The version_info provided in the request messages will be the version_info - // received with the most recent successfully processed response or empty on - // the first request. It is expected that no new request is sent after a - // response is received until the Envoy instance is ready to ACK/NACK the new - // configuration. ACK/NACK takes place by returning the new API config version - // as applied or the previous API config version respectively. Each type_url - // (see below) has an independent version associated with it. - string version_info = 1; - - // The node making the request. - config.core.v3.Node node = 2; - - // List of resources to subscribe to, e.g. list of cluster names or a route - // configuration name. If this is empty, all resources for the API are - // returned. LDS/CDS may have empty resource_names, which will cause all - // resources for the Envoy instance to be returned. The LDS and CDS responses - // will then imply a number of resources that need to be fetched via EDS/RDS, - // which will be explicitly enumerated in resource_names. - repeated string resource_names = 3; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit - // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is - // required for ADS. - string type_url = 4; - - // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above - // discussion on version_info and the DiscoveryResponse nonce comment. This - // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, - // or 2) the client has not yet accepted an update in this xDS stream (unlike - // delta, where it is populated only for new explicit ACKs). - string response_nonce = 5; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* provides the Envoy - // internal exception related to the failure. It is only intended for consumption during manual - // debugging, the string provided is not guaranteed to be stable across Envoy versions. - google.rpc.Status error_detail = 6; -} - -// [#next-free-field: 7] -message DiscoveryResponse { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryResponse"; - - // The version of the response data. - string version_info = 1; - - // The response resources. These resources are typed and depend on the API being called. - repeated google.protobuf.Any resources = 2; - - // [#not-implemented-hide:] - // Canary is used to support two Envoy command line flags: - // - // * --terminate-on-canary-transition-failure. When set, Envoy is able to - // terminate if it detects that configuration is stuck at canary. Consider - // this example sequence of updates: - // - Management server applies a canary config successfully. - // - Management server rolls back to a production config. - // - Envoy rejects the new production config. - // Since there is no sensible way to continue receiving configuration - // updates, Envoy will then terminate and apply production config from a - // clean slate. - // * --dry-run-canary. When set, a canary response will never be applied, only - // validated via a dry run. - bool canary = 3; - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). - string type_url = 4; - - // For gRPC based subscriptions, the nonce provides a way to explicitly ack a - // specific DiscoveryResponse in a following DiscoveryRequest. Additional - // messages may have been sent by Envoy to the management server for the - // previous version on the stream prior to this DiscoveryResponse, that were - // unprocessed at response send time. The nonce allows the management server - // to ignore any further DiscoveryRequests for the previous version until a - // DiscoveryRequest bearing the nonce. The nonce is optional and is not - // required for non-stream based xDS implementations. - string nonce = 5; - - // The control plane instance that sent the response. - config.core.v3.ControlPlane control_plane = 6; -} - -// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC -// endpoint for Delta xDS. -// -// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full -// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a -// diff to the state of a xDS client. -// In Delta XDS there are per-resource versions, which allow tracking state at -// the resource granularity. -// An xDS Delta session is always in the context of a gRPC bidirectional -// stream. This allows the xDS server to keep track of the state of xDS clients -// connected to it. -// -// In Delta xDS the nonce field is required and used to pair -// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. -// Optionally, a response message level system_version_info is present for -// debugging purposes only. -// -// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest -// can be either or both of: [1] informing the server of what resources the -// client has gained/lost interest in (using resource_names_subscribe and -// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from -// the server (using response_nonce, with presence of error_detail making it a NACK). -// Additionally, the first message (for a given type_url) of a reconnected gRPC stream -// has a third role: informing the server of the resources (and their versions) -// that the client already possesses, using the initial_resource_versions field. -// -// As with state-of-the-world, when multiple resource types are multiplexed (ADS), -// all requests/acknowledgments/updates are logically walled off by type_url: -// a Cluster ACK exists in a completely separate world from a prior Route NACK. -// In particular, initial_resource_versions being sent at the "start" of every -// gRPC stream actually entails a message for each type_url, each with its own -// initial_resource_versions. -// [#next-free-field: 8] -message DeltaDiscoveryRequest { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryRequest"; - - // The node making the request. - config.core.v3.Node node = 1; - - // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if - // resources are only referenced via *xds_resource_subscribe* and - // *xds_resources_unsubscribe*. - string type_url = 2; - - // DeltaDiscoveryRequests allow the client to add or remove individual - // resources to the set of tracked resources in the context of a stream. - // All resource names in the resource_names_subscribe list are added to the - // set of tracked resources and all resource names in the resource_names_unsubscribe - // list are removed from the set of tracked resources. - // - // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or - // resource_names_unsubscribe list simply means that no resources are to be - // added or removed to the resource list. - // *Like* state-of-the-world xDS, the server must send updates for all tracked - // resources, but can also send updates for resources the client has not subscribed to. - // - // NOTE: the server must respond with all resources listed in resource_names_subscribe, - // even if it believes the client has the most recent version of them. The reason: - // the client may have dropped them, but then regained interest before it had a chance - // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. - // - // These two fields can be set in any DeltaDiscoveryRequest, including ACKs - // and initial_resource_versions. - // - // A list of Resource names to add to the list of tracked resources. - repeated string resource_names_subscribe = 3; - - // A list of Resource names to remove from the list of tracked resources. - repeated string resource_names_unsubscribe = 4; - - // Informs the server of the versions of the resources the xDS client knows of, to enable the - // client to continue the same logical xDS session even in the face of gRPC stream reconnection. - // It will not be populated: [1] in the very first stream of a session, since the client will - // not yet have any resources, [2] in any message after the first in a stream (for a given - // type_url), since the server will already be correctly tracking the client's state. - // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) - // The map's keys are names of xDS resources known to the xDS client. - // The map's values are opaque resource versions. - map initial_resource_versions = 5; - - // When the DeltaDiscoveryRequest is a ACK or NACK message in response - // to a previous DeltaDiscoveryResponse, the response_nonce must be the - // nonce in the DeltaDiscoveryResponse. - // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. - string response_nonce = 6; - - // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* - // provides the Envoy internal exception related to the failure. - google.rpc.Status error_detail = 7; -} - -// [#next-free-field: 8] -message DeltaDiscoveryResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.api.v2.DeltaDiscoveryResponse"; - - // The version of the response data (used for debugging). - string system_version_info = 1; - - // The response resources. These are typed resources, whose types must match - // the type_url field. - repeated Resource resources = 2; - - // field id 3 IS available! - - // Type URL for resources. Identifies the xDS API when muxing over ADS. - // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. - string type_url = 4; - - // Resources names of resources that have be deleted and to be removed from the xDS Client. - // Removed resources for missing resources can be ignored. - repeated string removed_resources = 6; - - // The nonce provides a way for DeltaDiscoveryRequests to uniquely - // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - string nonce = 5; - - // [#not-implemented-hide:] - // The control plane instance that sent the response. - config.core.v3.ControlPlane control_plane = 7; -} - -// [#next-free-field: 8] -message Resource { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Resource"; - - // Cache control properties for the resource. - // [#not-implemented-hide:] - message CacheControl { - // If true, xDS proxies may not cache this resource. - // Note that this does not apply to clients other than xDS proxies, which must cache resources - // for their own use, regardless of the value of this field. - bool do_not_cache = 1; - } - - // The resource's name, to distinguish it from others of the same type of resource. - string name = 3; - - // The aliases are a list of other names that this resource can go by. - repeated string aliases = 4; - - // The resource level version. It allows xDS to track the state of individual - // resources. - string version = 1; - - // The resource being tracked. - google.protobuf.Any resource = 2; - - // Time-to-live value for the resource. For each resource, a timer is started. The timer is - // reset each time the resource is received with a new TTL. If the resource is received with - // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the - // configuration for the resource will be removed. - // - // The TTL can be refreshed or changed by sending a response that doesn't change the resource - // version. In this case the resource field does not need to be populated, which allows for - // light-weight "heartbeat" updates to keep a resource with a TTL alive. - // - // The TTL feature is meant to support configurations that should be removed in the event of - // a management server failure. For example, the feature may be used for fault injection - // testing where the fault injection should be terminated in the event that Envoy loses contact - // with the management server. - google.protobuf.Duration ttl = 6; - - // Cache control properties for the resource. - // [#not-implemented-hide:] - CacheControl cache_control = 7; -} diff --git a/generated_api_shadow/envoy/service/endpoint/v3/BUILD b/generated_api_shadow/envoy/service/endpoint/v3/BUILD deleted file mode 100644 index d3be4fae57fa4..0000000000000 --- a/generated_api_shadow/envoy/service/endpoint/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/endpoint/v3/eds.proto b/generated_api_shadow/envoy/service/endpoint/v3/eds.proto deleted file mode 100644 index 7f560b87b79e5..0000000000000 --- a/generated_api_shadow/envoy/service/endpoint/v3/eds.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package envoy.service.endpoint.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.endpoint.v3"; -option java_outer_classname = "EdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: EDS] -// Endpoint discovery :ref:`architecture overview ` - -service EndpointDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.endpoint.v3.ClusterLoadAssignment"; - - // The resource_names field in DiscoveryRequest specifies a list of clusters - // to subscribe to updates for. - rpc StreamEndpoints(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaEndpoints(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchEndpoints(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:endpoints"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message EdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.EdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/endpoint/v3/leds.proto b/generated_api_shadow/envoy/service/endpoint/v3/leds.proto deleted file mode 100644 index 89172f487eba0..0000000000000 --- a/generated_api_shadow/envoy/service/endpoint/v3/leds.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package envoy.service.endpoint.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.endpoint.v3"; -option java_outer_classname = "LedsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#not-implemented-hide:] -// [#protodoc-title: LEDS] -// Locality-Endpoint discovery -// [#comment:TODO(adisuissa): Link to unified matching docs: -// :ref:`architecture overview`] - -service LocalityEndpointDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.endpoint.v3.LbEndpoint"; - - // State-of-the-World (DiscoveryRequest) and REST are not supported. - - // The resource_names_subscribe resource_names_unsubscribe fields in DeltaDiscoveryRequest - // specify a list of glob collections to subscribe to updates for. - rpc DeltaLocalityEndpoints(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message LedsDummy { -} diff --git a/generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD b/generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD deleted file mode 100644 index 4f58bd462f66c..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v2alpha/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto b/generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto deleted file mode 100644 index 8d07f04640caf..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v2alpha/event_reporting_service.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package envoy.service.event_reporting.v2alpha; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.event_reporting.v2alpha"; -option java_outer_classname = "EventReportingServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.service.event_reporting.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: gRPC Event Reporting Service] - -// [#not-implemented-hide:] -// Service for streaming different types of events from Envoy to a server. The examples of -// such events may be health check or outlier detection events. -service EventReportingService { - // Envoy will connect and send StreamEventsRequest messages forever. - // The management server may send StreamEventsResponse to configure event stream. See below. - // This API is designed for high throughput with the expectation that it might be lossy. - rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { - } -} - -// [#not-implemented-hide:] -// An events envoy sends to the management server. -message StreamEventsRequest { - message Identifier { - // The node sending the event messages over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batch of events. When the stream is already active, it will be the events occurred - // since the last message had been sent. If the server receives unknown event type, it should - // silently ignore it. - // - // The following events are supported: - // - // * :ref:`HealthCheckEvent ` - // * :ref:`OutlierDetectionEvent ` - repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// [#not-implemented-hide:] -// The management server may send envoy a StreamEventsResponse to tell which events the server -// is interested in. In future, with aggregated event reporting service, this message will -// contain, for example, clusters the envoy should send events for, or event types the server -// wants to process. -message StreamEventsResponse { -} diff --git a/generated_api_shadow/envoy/service/event_reporting/v3/BUILD b/generated_api_shadow/envoy/service/event_reporting/v3/BUILD deleted file mode 100644 index 7753cfeb3d6e5..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto b/generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto deleted file mode 100644 index 30c161a1c5309..0000000000000 --- a/generated_api_shadow/envoy/service/event_reporting/v3/event_reporting_service.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; - -package envoy.service.event_reporting.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.event_reporting.v3"; -option java_outer_classname = "EventReportingServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC Event Reporting Service] - -// [#not-implemented-hide:] -// Service for streaming different types of events from Envoy to a server. The examples of -// such events may be health check or outlier detection events. -service EventReportingService { - // Envoy will connect and send StreamEventsRequest messages forever. - // The management server may send StreamEventsResponse to configure event stream. See below. - // This API is designed for high throughput with the expectation that it might be lossy. - rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { - } -} - -// [#not-implemented-hide:] -// An events envoy sends to the management server. -message StreamEventsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v2alpha.StreamEventsRequest"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v2alpha.StreamEventsRequest.Identifier"; - - // The node sending the event messages over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data that will only be sent in the first message on the stream. This is effectively - // structured metadata and is a performance optimization. - Identifier identifier = 1; - - // Batch of events. When the stream is already active, it will be the events occurred - // since the last message had been sent. If the server receives unknown event type, it should - // silently ignore it. - // - // The following events are supported: - // - // * :ref:`HealthCheckEvent ` - // * :ref:`OutlierDetectionEvent ` - repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// [#not-implemented-hide:] -// The management server may send envoy a StreamEventsResponse to tell which events the server -// is interested in. In future, with aggregated event reporting service, this message will -// contain, for example, clusters the envoy should send events for, or event types the server -// wants to process. -message StreamEventsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.event_reporting.v2alpha.StreamEventsResponse"; -} diff --git a/generated_api_shadow/envoy/service/ext_proc/v3alpha/BUILD b/generated_api_shadow/envoy/service/ext_proc/v3alpha/BUILD deleted file mode 100644 index 4f3730e2af32e..0000000000000 --- a/generated_api_shadow/envoy/service/ext_proc/v3alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/extensions/filters/http/ext_proc/v3alpha:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto b/generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto deleted file mode 100644 index 09572331aa42a..0000000000000 --- a/generated_api_shadow/envoy/service/ext_proc/v3alpha/external_processor.proto +++ /dev/null @@ -1,331 +0,0 @@ -syntax = "proto3"; - -package envoy.service.ext_proc.v3alpha; - -import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.proto"; -import "envoy/type/v3/http_status.proto"; - -import "google/protobuf/struct.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.ext_proc.v3alpha"; -option java_outer_classname = "ExternalProcessorProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: External Processing Service] - -// A service that can access and modify HTTP requests and responses -// as part of a filter chain. -// The overall external processing protocol works like this: -// -// 1. Envoy sends to the service information about the HTTP request. -// 2. The service sends back a ProcessingResponse message that directs Envoy -// to either stop processing, continue without it, or send it the -// next chunk of the message body. -// 3. If so requested, Envoy sends the server chunks of the message body, -// or the entire body at once. In either case, the server sends back -// a ProcessingResponse after each message it receives. -// 4. If so requested, Envoy sends the server the HTTP trailers, -// and the server sends back a ProcessingResponse. -// 5. At this point, request processing is done, and we pick up again -// at step 1 when Envoy receives a response from the upstream server. -// 6. At any point above, if the server closes the gRPC stream cleanly, -// then Envoy proceeds without consulting the server. -// 7. At any point above, if the server closes the gRPC stream with an error, -// then Envoy returns a 500 error to the client, unless the filter -// was configured to ignore errors. -// -// In other words, the process is a request/response conversation, but -// using a gRPC stream to make it easier for the server to -// maintain state. - -service ExternalProcessor { - // This begins the bidirectional stream that Envoy will use to - // give the server control over what the filter does. The actual - // protocol is described by the ProcessingRequest and ProcessingResponse - // messages below. - rpc Process(stream ProcessingRequest) returns (stream ProcessingResponse) { - } -} - -// This represents the different types of messages that Envoy can send -// to an external processing server. -// [#next-free-field: 8] -message ProcessingRequest { - // Specify whether the filter that sent this request is running in synchronous - // or asynchronous mode. The choice of synchronous or asynchronous mode - // can be set in the filter configuration, and defaults to false. - // - // * A value of "false" indicates that the server must respond - // to this message by either sending back a matching ProcessingResponse message, - // or by closing the stream. - // * A value of "true" indicates that the server must not respond to this - // message, although it may still close the stream to indicate that no more messages - // are needed. - // - bool async_mode = 1; - - // Each request message will include one of the following sub-messages. Which - // ones are set for a particular HTTP request/response depend on the - // processing mode. - oneof request { - option (validate.required) = true; - - // Information about the HTTP request headers, as well as peer info and additional - // properties. Unless "async_mode" is true, the server must send back a - // HeaderResponse message, an ImmediateResponse message, or close the stream. - HttpHeaders request_headers = 2; - - // Information about the HTTP response headers, as well as peer info and additional - // properties. Unless "async_mode" is true, the server must send back a - // HeaderResponse message or close the stream. - HttpHeaders response_headers = 3; - - // A chunk of the HTTP request body. Unless "async_mode" is true, the server must send back - // a BodyResponse message, an ImmediateResponse message, or close the stream. - HttpBody request_body = 4; - - // A chunk of the HTTP request body. Unless "async_mode" is true, the server must send back - // a BodyResponse message or close the stream. - HttpBody response_body = 5; - - // The HTTP trailers for the request path. Unless "async_mode" is true, the server - // must send back a TrailerResponse message or close the stream. - // - // This message is only sent if the trailers processing mode is set to "SEND". - // If there are no trailers on the original downstream request, then this message - // will only be sent (with empty trailers waiting to be populated) if the - // processing mode is set before the request headers are sent, such as - // in the filter configuration. - HttpTrailers request_trailers = 6; - - // The HTTP trailers for the response path. Unless "async_mode" is true, the server - // must send back a TrailerResponse message or close the stream. - // - // This message is only sent if the trailers processing mode is set to "SEND". - // If there are no trailers on the original downstream request, then this message - // will only be sent (with empty trailers waiting to be populated) if the - // processing mode is set before the request headers are sent, such as - // in the filter configuration. - HttpTrailers response_trailers = 7; - } -} - -// For every ProcessingRequest received by the server with the "async_mode" field -// set to false, the server must send back exactly one ProcessingResponse message. -// [#next-free-field: 10] -message ProcessingResponse { - oneof response { - option (validate.required) = true; - - // The server must send back this message in response to a message with the - // "request_headers" field set. - HeadersResponse request_headers = 1; - - // The server must send back this message in response to a message with the - // "response_headers" field set. - HeadersResponse response_headers = 2; - - // The server must send back this message in response to a message with - // the "request_body" field set. - BodyResponse request_body = 3; - - // The server must send back this message in response to a message with - // the "response_body" field set. - BodyResponse response_body = 4; - - // The server must send back this message in response to a message with - // the "request_trailers" field set. - TrailersResponse request_trailers = 5; - - // The server must send back this message in response to a message with - // the "response_trailers" field set. - TrailersResponse response_trailers = 6; - - // If specified, attempt to create a locally generated response, send it - // downstream, and stop processing additional filters and ignore any - // additional messages received from the remote server for this request or - // response. If a response has already started -- for example, if this - // message is sent response to a "response_body" message -- then - // this will either ship the reply directly to the downstream codec, - // or reset the stream. - ImmediateResponse immediate_response = 7; - } - - // [#not-implemented-hide:] - // Optional metadata that will be emitted as dynamic metadata to be consumed by the next - // filter. This metadata will be placed in the namespace "envoy.filters.http.ext_proc". - google.protobuf.Struct dynamic_metadata = 8; - - // Override how parts of the HTTP request and response are processed - // for the duration of this particular request/response only. Servers - // may use this to intelligently control how requests are processed - // based on the headers and other metadata that they see. - envoy.extensions.filters.http.ext_proc.v3alpha.ProcessingMode mode_override = 9; -} - -// The following are messages that are sent to the server. - -// This message is sent to the external server when the HTTP request and responses -// are first received. -message HttpHeaders { - // The HTTP request headers. All header keys will be - // lower-cased, because HTTP header keys are case-insensitive. - config.core.v3.HeaderMap headers = 1; - - // [#not-implemented-hide:] - // The values of properties selected by the "request_attributes" - // or "response_attributes" list in the configuration. Each entry - // in the list is populated - // from the standard :ref:`attributes ` - // supported across Envoy. - map attributes = 2; - - // If true, then there is no message body associated with this - // request or response. - bool end_of_stream = 3; -} - -// This message contains the message body that Envoy sends to the external server. -message HttpBody { - bytes body = 1; - - bool end_of_stream = 2; -} - -// This message contains the trailers. -message HttpTrailers { - config.core.v3.HeaderMap trailers = 1; -} - -// The following are messages that may be sent back by the server. - -// This message must be sent in response to an HttpHeaders message. -message HeadersResponse { - CommonResponse response = 1; -} - -// This message must be sent in response to an HttpTrailers message. -message TrailersResponse { - // Instructions on how to manipulate the trailers - HeaderMutation header_mutation = 1; -} - -// This message must be sent in response to an HttpBody message. -message BodyResponse { - CommonResponse response = 1; -} - -// This message contains common fields between header and body responses. -// [#next-free-field: 6] -message CommonResponse { - enum ResponseStatus { - // Apply the mutation instructions in this message to the - // request or response, and then continue processing the filter - // stream as normal. This is the default. - CONTINUE = 0; - - // Apply the specified header mutation, replace the body with the body - // specified in the body mutation (if present), and do not send any - // further messages for this request or response even if the processing - // mode is configured to do so. - // - // When used in response to a request_headers or response_headers message, - // this status makes it possible to either completely replace the body - // while discarding the original body, or to add a body to a message that - // formerly did not have one. - // - // In other words, this response makes it possible to turn an HTTP GET - // into a POST, PUT, or PATCH. - CONTINUE_AND_REPLACE = 1; - } - - // If set, provide additional direction on how the Envoy proxy should - // handle the rest of the HTTP filter chain. - ResponseStatus status = 1 [(validate.rules).enum = {defined_only: true}]; - - // Instructions on how to manipulate the headers. When responding to an - // HttpBody request, header mutations will only take effect if - // the current processing mode for the body is BUFFERED. - HeaderMutation header_mutation = 2; - - // Replace the body of the last message sent to the remote server on this - // stream. If responding to an HttpBody request, simply replace or clear - // the body chunk that was sent with that request. Body mutations only take - // effect in response to "body" messages and are ignored otherwise. - BodyMutation body_mutation = 3; - - // [#not-implemented-hide:] - // Add new trailers to the message. This may be used when responding to either a - // HttpHeaders or HttpBody message, but only if this message is returned - // along with the CONTINUE_AND_REPLACE status. - config.core.v3.HeaderMap trailers = 4; - - // Clear the route cache for the current request. - // This is necessary if the remote server - // modified headers that are used to calculate the route. - bool clear_route_cache = 5; -} - -// This message causes the filter to attempt to create a locally -// generated response, send it downstream, stop processing -// additional filters, and ignore any additional messages received -// from the remote server for this request or response. If a response -// has already started, then this will either ship the reply directly -// to the downstream codec, or reset the stream. -// [#next-free-field: 6] -message ImmediateResponse { - // The response code to return - type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; - - // Apply changes to the default headers, which will include content-type. - HeaderMutation headers = 2; - - // The message body to return with the response which is sent using the - // text/plain content type, or encoded in the grpc-message header. - string body = 3; - - // If set, then include a gRPC status trailer. - GrpcStatus grpc_status = 4; - - // A string detailing why this local reply was sent, which may be included - // in log and debug output. - string details = 5; -} - -// This message specifies a gRPC status for an ImmediateResponse message. -message GrpcStatus { - // The actual gRPC status - uint32 status = 1; -} - -// Change HTTP headers or trailers by appending, replacing, or removing -// headers. -message HeaderMutation { - // Add or replace HTTP headers. Attempts to set the value of - // any "x-envoy" header, and attempts to set the ":method", - // ":authority", ":scheme", or "host" headers will be ignored. - repeated config.core.v3.HeaderValueOption set_headers = 1; - - // Remove these HTTP headers. Attempts to remove system headers -- - // any header starting with ":", plus "host" -- will be ignored. - repeated string remove_headers = 2; -} - -// Replace the entire message body chunk received in the corresponding -// HttpBody message with this new body, or clear the body. -message BodyMutation { - oneof mutation { - // The entire body to replace - bytes body = 1; - - // Clear the corresponding body chunk - bool clear_body = 2; - } -} diff --git a/generated_api_shadow/envoy/service/extension/v3/BUILD b/generated_api_shadow/envoy/service/extension/v3/BUILD deleted file mode 100644 index 9f2ae1e747c54..0000000000000 --- a/generated_api_shadow/envoy/service/extension/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto b/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto deleted file mode 100644 index cf83adbd26444..0000000000000 --- a/generated_api_shadow/envoy/service/extension/v3/config_discovery.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.service.extension.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.extension.v3"; -option java_outer_classname = "ConfigDiscoveryProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Extension Config Discovery Service (ECDS)] - -// Return extension configurations. -service ExtensionConfigDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.core.v3.TypedExtensionConfig"; - - rpc StreamExtensionConfigs(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaExtensionConfigs(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchExtensionConfigs(discovery.v3.DiscoveryRequest) - returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:extension_configs"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue -// with importing services: https://github.com/google/protobuf/issues/4221 and -// protoxform to upgrade the file. -message EcdsDummy { -} diff --git a/generated_api_shadow/envoy/service/health/v3/BUILD b/generated_api_shadow/envoy/service/health/v3/BUILD deleted file mode 100644 index 30ba155208b5d..0000000000000 --- a/generated_api_shadow/envoy/service/health/v3/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/config/cluster/v3:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/service/discovery/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/health/v3/hds.proto b/generated_api_shadow/envoy/service/health/v3/hds.proto deleted file mode 100644 index 51266a64fa959..0000000000000 --- a/generated_api_shadow/envoy/service/health/v3/hds.proto +++ /dev/null @@ -1,193 +0,0 @@ -syntax = "proto3"; - -package envoy.service.health.v3; - -import "envoy/config/cluster/v3/cluster.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/health_check.proto"; -import "envoy/config/endpoint/v3/endpoint_components.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.health.v3"; -option java_outer_classname = "HdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Health Discovery Service (HDS)] - -// HDS is Health Discovery Service. It compliments Envoy’s health checking -// service by designating this Envoy to be a healthchecker for a subset of hosts -// in the cluster. The status of these health checks will be reported to the -// management server, where it can be aggregated etc and redistributed back to -// Envoy through EDS. -service HealthDiscoveryService { - // 1. Envoy starts up and if its can_healthcheck option in the static - // bootstrap config is enabled, sends HealthCheckRequest to the management - // server. It supplies its capabilities (which protocol it can health check - // with, what zone it resides in, etc.). - // 2. In response to (1), the management server designates this Envoy as a - // healthchecker to health check a subset of all upstream hosts for a given - // cluster (for example upstream Host 1 and Host 2). It streams - // HealthCheckSpecifier messages with cluster related configuration for all - // clusters this Envoy is designated to health check. Subsequent - // HealthCheckSpecifier message will be sent on changes to: - // a. Endpoints to health checks - // b. Per cluster configuration change - // 3. Envoy creates a health probe based on the HealthCheck config and sends - // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck - // configuration Envoy waits upon the arrival of the probe response and - // looks at the content of the response to decide whether the endpoint is - // healthy or not. If a response hasn't been received within the timeout - // interval, the endpoint health status is considered TIMEOUT. - // 4. Envoy reports results back in an EndpointHealthResponse message. - // Envoy streams responses as often as the interval configured by the - // management server in HealthCheckSpecifier. - // 5. The management Server collects health statuses for all endpoints in the - // cluster (for all clusters) and uses this information to construct - // EndpointDiscoveryResponse messages. - // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load - // balances traffic to them without additional health checking. It may - // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection - // failed to a particular endpoint to account for health status propagation - // delay between HDS and EDS). - // By default, can_healthcheck is true. If can_healthcheck is false, Cluster - // configuration may not contain HealthCheck message. - // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above - // invariant? - // TODO(htuch): Add @amb67's diagram. - rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) - returns (stream HealthCheckSpecifier) { - } - - // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of - // request/response. Should we add an identifier to the HealthCheckSpecifier - // to bind with the response? - rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { - option (google.api.http).post = "/v3/discovery:health_check"; - option (google.api.http).body = "*"; - } -} - -// Defines supported protocols etc, so the management server can assign proper -// endpoints to healthcheck. -message Capability { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.Capability"; - - // Different Envoy instances may have different capabilities (e.g. Redis) - // and/or have ports enabled for different protocols. - enum Protocol { - HTTP = 0; - TCP = 1; - REDIS = 2; - } - - repeated Protocol health_check_protocols = 1; -} - -message HealthCheckRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.HealthCheckRequest"; - - config.core.v3.Node node = 1; - - Capability capability = 2; -} - -message EndpointHealth { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.EndpointHealth"; - - config.endpoint.v3.Endpoint endpoint = 1; - - config.core.v3.HealthStatus health_status = 2; -} - -// Group endpoint health by locality under each cluster. -message LocalityEndpointsHealth { - config.core.v3.Locality locality = 1; - - repeated EndpointHealth endpoints_health = 2; -} - -// The health status of endpoints in a cluster. The cluster name and locality -// should match the corresponding fields in ClusterHealthCheck message. -message ClusterEndpointsHealth { - string cluster_name = 1; - - repeated LocalityEndpointsHealth locality_endpoints_health = 2; -} - -message EndpointHealthResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.EndpointHealthResponse"; - - // Deprecated - Flat list of endpoint health information. - repeated EndpointHealth endpoints_health = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Organize Endpoint health information by cluster. - repeated ClusterEndpointsHealth cluster_endpoints_health = 2; -} - -message HealthCheckRequestOrEndpointHealthResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.HealthCheckRequestOrEndpointHealthResponse"; - - oneof request_type { - HealthCheckRequest health_check_request = 1; - - EndpointHealthResponse endpoint_health_response = 2; - } -} - -message LocalityEndpoints { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.LocalityEndpoints"; - - config.core.v3.Locality locality = 1; - - repeated config.endpoint.v3.Endpoint endpoints = 2; -} - -// The cluster name and locality is provided to Envoy for the endpoints that it -// health checks to support statistics reporting, logging and debugging by the -// Envoy instance (outside of HDS). For maximum usefulness, it should match the -// same cluster structure as that provided by EDS. -message ClusterHealthCheck { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.ClusterHealthCheck"; - - string cluster_name = 1; - - repeated config.core.v3.HealthCheck health_checks = 2; - - repeated LocalityEndpoints locality_endpoints = 3; - - // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria ` - // on connection when health checking. For more details, see - // :ref:`config.cluster.v3.Cluster.transport_socket_matches `. - repeated config.cluster.v3.Cluster.TransportSocketMatch transport_socket_matches = 4; -} - -message HealthCheckSpecifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.HealthCheckSpecifier"; - - repeated ClusterHealthCheck cluster_health_checks = 1; - - // The default is 1 second. - google.protobuf.Duration interval = 2; -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message HdsDummy { -} diff --git a/generated_api_shadow/envoy/service/listener/v3/BUILD b/generated_api_shadow/envoy/service/listener/v3/BUILD deleted file mode 100644 index d3be4fae57fa4..0000000000000 --- a/generated_api_shadow/envoy/service/listener/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/listener/v3/lds.proto b/generated_api_shadow/envoy/service/listener/v3/lds.proto deleted file mode 100644 index 5b8c0d5207258..0000000000000 --- a/generated_api_shadow/envoy/service/listener/v3/lds.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.service.listener.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.listener.v3"; -option java_outer_classname = "LdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Listener] -// Listener :ref:`configuration overview ` - -// The Envoy instance initiates an RPC at startup to discover a list of -// listeners. Updates are delivered via streaming from the LDS server and -// consist of a complete update of all listeners. Existing connections will be -// allowed to drain from listeners that are no longer present. -service ListenerDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.listener.v3.Listener"; - - rpc DeltaListeners(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc StreamListeners(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc FetchListeners(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:listeners"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message LdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/load_stats/v2/BUILD b/generated_api_shadow/envoy/service/load_stats/v2/BUILD deleted file mode 100644 index 1263251505f6b..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/endpoint:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto deleted file mode 100644 index 7ab87c2dfb04f..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v2/lrs.proto +++ /dev/null @@ -1,88 +0,0 @@ -syntax = "proto3"; - -package envoy.service.load_stats.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/endpoint/load_report.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.load_stats.v2"; -option java_outer_classname = "LrsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Load reporting service] - -service LoadReportingService { - // Advanced API to allow for multi-dimensional load balancing by remote - // server. For receiving LB assignments, the steps are: - // 1, The management server is configured with per cluster/zone/load metric - // capacity configuration. The capacity configuration definition is - // outside of the scope of this document. - // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters - // to balance. - // - // Independently, Envoy will initiate a StreamLoadStats bidi stream with a - // management server: - // 1. Once a connection establishes, the management server publishes a - // LoadStatsResponse for all clusters it is interested in learning load - // stats about. - // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts - // based on per-zone weights and/or per-instance weights (if specified) - // based on intra-zone LbPolicy. This information comes from the above - // {Stream,Fetch}Endpoints. - // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. - // 4. Envoy aggregates load reports over the period of time given to it in - // LoadStatsResponse.load_reporting_interval. This includes aggregation - // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as - // well as load metrics from upstream hosts. - // 5. When the timer of load_reporting_interval expires, Envoy sends new - // LoadStatsRequest filled with load reports for each cluster. - // 6. The management server uses the load reports from all reported Envoys - // from around the world, computes global assignment and prepares traffic - // assignment destined for each zone Envoys are located in. Goto 2. - rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { - } -} - -// A load report Envoy sends to the management server. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -message LoadStatsRequest { - // Node identifier for Envoy instance. - api.v2.core.Node node = 1; - - // A list of load stats to report. - repeated api.v2.endpoint.ClusterStats cluster_stats = 2; -} - -// The management server sends envoy a LoadStatsResponse with all clusters it -// is interested in learning load stats about. -// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. -message LoadStatsResponse { - // Clusters to report stats for. - // Not populated if *send_all_clusters* is true. - repeated string clusters = 1; - - // If true, the client should send all clusters it knows about. - // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their - // :ref:`client_features` field will honor this field. - bool send_all_clusters = 4; - - // The minimum interval of time to collect stats over. This is only a minimum for two reasons: - // 1. There may be some delay from when the timer fires until stats sampling occurs. - // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic - // that is observed in between the corresponding previous *LoadStatsRequest* and this - // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period - // of inobservability that might otherwise exists between the messages. New clusters are not - // subject to this consideration. - google.protobuf.Duration load_reporting_interval = 2; - - // Set to *true* if the management server supports endpoint granularity - // report. - bool report_endpoint_granularity = 3; -} diff --git a/generated_api_shadow/envoy/service/load_stats/v3/BUILD b/generated_api_shadow/envoy/service/load_stats/v3/BUILD deleted file mode 100644 index d69e005bae22f..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/config/endpoint/v3:pkg", - "//envoy/service/load_stats/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto b/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto deleted file mode 100644 index 0b565ebe72368..0000000000000 --- a/generated_api_shadow/envoy/service/load_stats/v3/lrs.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -package envoy.service.load_stats.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/config/endpoint/v3/load_report.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.load_stats.v3"; -option java_outer_classname = "LrsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Load Reporting service (LRS)] - -// Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional -// stream with a management server. Upon connecting, the management server can send a -// :ref:`LoadStatsResponse ` to a node it is -// interested in getting the load reports for. Envoy in this node will start sending -// :ref:`LoadStatsRequest `. This is done periodically -// based on the :ref:`load reporting interval ` -// For details, take a look at the :ref:`Load Reporting Service sandbox example `. - -service LoadReportingService { - // Advanced API to allow for multi-dimensional load balancing by remote - // server. For receiving LB assignments, the steps are: - // 1, The management server is configured with per cluster/zone/load metric - // capacity configuration. The capacity configuration definition is - // outside of the scope of this document. - // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters - // to balance. - // - // Independently, Envoy will initiate a StreamLoadStats bidi stream with a - // management server: - // 1. Once a connection establishes, the management server publishes a - // LoadStatsResponse for all clusters it is interested in learning load - // stats about. - // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts - // based on per-zone weights and/or per-instance weights (if specified) - // based on intra-zone LbPolicy. This information comes from the above - // {Stream,Fetch}Endpoints. - // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. - // 4. Envoy aggregates load reports over the period of time given to it in - // LoadStatsResponse.load_reporting_interval. This includes aggregation - // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as - // well as load metrics from upstream hosts. - // 5. When the timer of load_reporting_interval expires, Envoy sends new - // LoadStatsRequest filled with load reports for each cluster. - // 6. The management server uses the load reports from all reported Envoys - // from around the world, computes global assignment and prepares traffic - // assignment destined for each zone Envoys are located in. Goto 2. - rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { - } -} - -// A load report Envoy sends to the management server. -message LoadStatsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.load_stats.v2.LoadStatsRequest"; - - // Node identifier for Envoy instance. - config.core.v3.Node node = 1; - - // A list of load stats to report. - repeated config.endpoint.v3.ClusterStats cluster_stats = 2; -} - -// The management server sends envoy a LoadStatsResponse with all clusters it -// is interested in learning load stats about. -message LoadStatsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.load_stats.v2.LoadStatsResponse"; - - // Clusters to report stats for. - // Not populated if *send_all_clusters* is true. - repeated string clusters = 1; - - // If true, the client should send all clusters it knows about. - // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their - // :ref:`client_features` field will honor this field. - bool send_all_clusters = 4; - - // The minimum interval of time to collect stats over. This is only a minimum for two reasons: - // - // 1. There may be some delay from when the timer fires until stats sampling occurs. - // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic - // that is observed in between the corresponding previous *LoadStatsRequest* and this - // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period - // of inobservability that might otherwise exists between the messages. New clusters are not - // subject to this consideration. - google.protobuf.Duration load_reporting_interval = 2; - - // Set to *true* if the management server supports endpoint granularity - // report. - bool report_endpoint_granularity = 3; -} diff --git a/generated_api_shadow/envoy/service/metrics/v2/BUILD b/generated_api_shadow/envoy/service/metrics/v2/BUILD deleted file mode 100644 index 79fc6928c032a..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@prometheus_metrics_model//:client_model", - ], -) diff --git a/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto deleted file mode 100644 index 78d6e47e20ab1..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v2/metrics_service.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.service.metrics.v2; - -import "envoy/api/v2/core/base.proto"; - -import "io/prometheus/client/metrics.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.metrics.v2"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metrics service] - -// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric -// data model as a standard to represent metrics information. -service MetricsService { - // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. - rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { - } -} - -message StreamMetricsResponse { -} - -message StreamMetricsMessage { - message Identifier { - // The node sending metrics over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // A list of metric entries - repeated io.prometheus.client.MetricFamily envoy_metrics = 2; -} diff --git a/generated_api_shadow/envoy/service/metrics/v3/BUILD b/generated_api_shadow/envoy/service/metrics/v3/BUILD deleted file mode 100644 index b266dfc5558d2..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/service/metrics/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@prometheus_metrics_model//:client_model", - ], -) diff --git a/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto b/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto deleted file mode 100644 index e86bda356f7d2..0000000000000 --- a/generated_api_shadow/envoy/service/metrics/v3/metrics_service.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.service.metrics.v3; - -import "envoy/config/core/v3/base.proto"; - -import "io/prometheus/client/metrics.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.metrics.v3"; -option java_outer_classname = "MetricsServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metrics service] - -// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric -// data model as a standard to represent metrics information. -service MetricsService { - // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. - rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { - } -} - -message StreamMetricsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v2.StreamMetricsResponse"; -} - -message StreamMetricsMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v2.StreamMetricsMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.metrics.v2.StreamMetricsMessage.Identifier"; - - // The node sending metrics over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // A list of metric entries - repeated io.prometheus.client.MetricFamily envoy_metrics = 2; -} diff --git a/generated_api_shadow/envoy/service/ratelimit/v2/BUILD b/generated_api_shadow/envoy/service/ratelimit/v2/BUILD deleted file mode 100644 index eedc3e62b3b20..0000000000000 --- a/generated_api_shadow/envoy/service/ratelimit/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/ratelimit:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto b/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto deleted file mode 100644 index cee8cd7bc3d5d..0000000000000 --- a/generated_api_shadow/envoy/service/ratelimit/v2/rls.proto +++ /dev/null @@ -1,115 +0,0 @@ -syntax = "proto3"; - -package envoy.service.ratelimit.v2; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/ratelimit/ratelimit.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.ratelimit.v2"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Rate Limit Service (RLS)] - -service RateLimitService { - // Determine whether rate limiting should take place. - rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) { - } -} - -// Main message for a rate limit request. The rate limit service is designed to be fully generic -// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded -// configuration will parse the request and find the most specific limit to apply. In addition, -// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors -// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any -// of them are over limit. This enables more complex application level rate limiting scenarios -// if desired. -message RateLimitRequest { - // All rate limit requests must specify a domain. This enables the configuration to be per - // application without fear of overlap. E.g., "envoy". - string domain = 1; - - // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is - // processed by the service (see below). If any of the descriptors are over limit, the entire - // request is considered to be over limit. - repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 2; - - // Rate limit requests can optionally specify the number of hits a request adds to the matched - // limit. If the value is not set in the message, a request increases the matched limit by 1. - uint32 hits_addend = 3; -} - -// A response from a ShouldRateLimit call. -message RateLimitResponse { - enum Code { - // The response code is not known. - UNKNOWN = 0; - - // The response code to notify that the number of requests are under limit. - OK = 1; - - // The response code to notify that the number of requests are over limit. - OVER_LIMIT = 2; - } - - // Defines an actual rate limit in terms of requests per unit of time and the unit itself. - message RateLimit { - enum Unit { - // The time unit is not known. - UNKNOWN = 0; - - // The time unit representing a second. - SECOND = 1; - - // The time unit representing a minute. - MINUTE = 2; - - // The time unit representing an hour. - HOUR = 3; - - // The time unit representing a day. - DAY = 4; - } - - // A name or description of this limit. - string name = 3; - - // The number of requests per unit of time. - uint32 requests_per_unit = 1; - - // The unit of time. - Unit unit = 2; - } - - message DescriptorStatus { - // The response code for an individual descriptor. - Code code = 1; - - // The current limit as configured by the server. Useful for debugging, etc. - RateLimit current_limit = 2; - - // The limit remaining in the current time unit. - uint32 limit_remaining = 3; - } - - // The overall response code which takes into account all of the descriptors that were passed - // in the RateLimitRequest message. - Code overall_code = 1; - - // A list of DescriptorStatus messages which matches the length of the descriptor list passed - // in the RateLimitRequest. This can be used by the caller to determine which individual - // descriptors failed and/or what the currently configured limits are for all of them. - repeated DescriptorStatus statuses = 2; - - // A list of headers to add to the response - repeated api.v2.core.HeaderValue headers = 3 - [(udpa.annotations.field_migrate).rename = "response_headers_to_add"]; - - // A list of headers to add to the request when forwarded - repeated api.v2.core.HeaderValue request_headers_to_add = 4; -} diff --git a/generated_api_shadow/envoy/service/ratelimit/v3/BUILD b/generated_api_shadow/envoy/service/ratelimit/v3/BUILD deleted file mode 100644 index 222b9ac522924..0000000000000 --- a/generated_api_shadow/envoy/service/ratelimit/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/ratelimit/v3:pkg", - "//envoy/service/ratelimit/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto b/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto deleted file mode 100644 index ab8e0ffc0eba7..0000000000000 --- a/generated_api_shadow/envoy/service/ratelimit/v3/rls.proto +++ /dev/null @@ -1,196 +0,0 @@ -syntax = "proto3"; - -package envoy.service.ratelimit.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.ratelimit.v3"; -option java_outer_classname = "RlsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Rate Limit Service (RLS)] - -service RateLimitService { - // Determine whether rate limiting should take place. - rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) { - } -} - -// Main message for a rate limit request. The rate limit service is designed to be fully generic -// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded -// configuration will parse the request and find the most specific limit to apply. In addition, -// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors -// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any -// of them are over limit. This enables more complex application level rate limiting scenarios -// if desired. -message RateLimitRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.ratelimit.v2.RateLimitRequest"; - - // All rate limit requests must specify a domain. This enables the configuration to be per - // application without fear of overlap. E.g., "envoy". - string domain = 1; - - // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is - // processed by the service (see below). If any of the descriptors are over limit, the entire - // request is considered to be over limit. - repeated envoy.extensions.common.ratelimit.v3.RateLimitDescriptor descriptors = 2; - - // Rate limit requests can optionally specify the number of hits a request adds to the matched - // limit. If the value is not set in the message, a request increases the matched limit by 1. - uint32 hits_addend = 3; -} - -// A response from a ShouldRateLimit call. -// [#next-free-field: 7] -message RateLimitResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.ratelimit.v2.RateLimitResponse"; - - enum Code { - // The response code is not known. - UNKNOWN = 0; - - // The response code to notify that the number of requests are under limit. - OK = 1; - - // The response code to notify that the number of requests are over limit. - OVER_LIMIT = 2; - } - - // Defines an actual rate limit in terms of requests per unit of time and the unit itself. - message RateLimit { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.ratelimit.v2.RateLimitResponse.RateLimit"; - - // Identifies the unit of of time for rate limit. - // [#comment: replace by envoy/type/v3/ratelimit_unit.proto in v4] - enum Unit { - // The time unit is not known. - UNKNOWN = 0; - - // The time unit representing a second. - SECOND = 1; - - // The time unit representing a minute. - MINUTE = 2; - - // The time unit representing an hour. - HOUR = 3; - - // The time unit representing a day. - DAY = 4; - } - - // A name or description of this limit. - string name = 3; - - // The number of requests per unit of time. - uint32 requests_per_unit = 1; - - // The unit of time. - Unit unit = 2; - } - - // Cacheable quota for responses, see documentation for the :ref:`quota - // ` field. - // [#not-implemented-hide:] - message Quota { - // Number of matching requests granted in quota. Must be 1 or more. - uint32 requests = 1 [(validate.rules).uint32 = {gt: 0}]; - - oneof expiration_specifier { - // Point in time at which the quota expires. - google.protobuf.Timestamp valid_until = 2; - } - } - - // [#next-free-field: 6] - message DescriptorStatus { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.ratelimit.v2.RateLimitResponse.DescriptorStatus"; - - // The response code for an individual descriptor. - Code code = 1; - - // The current limit as configured by the server. Useful for debugging, etc. - RateLimit current_limit = 2; - - // The limit remaining in the current time unit. - uint32 limit_remaining = 3; - - // Duration until reset of the current limit window. - google.protobuf.Duration duration_until_reset = 4; - - // Quota granted for the descriptor. This is a certain number of requests over a period of time. - // The client may cache this result and apply the effective RateLimitResponse to future matching - // requests containing a matching descriptor without querying rate limit service. - // - // Quota is available for a request if its descriptor set has cached quota available for all - // descriptors. - // - // If quota is available, a RLS request will not be made and the quota will be reduced by 1 for - // all matching descriptors. - // - // If there is not sufficient quota, there are three cases: - // 1. A cached entry exists for a RLS descriptor that is out-of-quota, but not expired. - // In this case, the request will be treated as OVER_LIMIT. - // 2. Some RLS descriptors have a cached entry that has valid quota but some RLS descriptors - // have no cached entry. This will trigger a new RLS request. - // When the result is returned, a single unit will be consumed from the quota for all - // matching descriptors. - // If the server did not provide a quota, such as the quota message is empty for some of - // the descriptors, then the request admission is determined by the - // :ref:`overall_code `. - // 3. All RLS descriptors lack a cached entry, this will trigger a new RLS request, - // When the result is returned, a single unit will be consumed from the quota for all - // matching descriptors. - // If the server did not provide a quota, such as the quota message is empty for some of - // the descriptors, then the request admission is determined by the - // :ref:`overall_code `. - // - // When quota expires due to timeout, a new RLS request will also be made. - // The implementation may choose to preemptively query the rate limit server for more quota on or - // before expiration or before the available quota runs out. - // [#not-implemented-hide:] - Quota quota = 5; - } - - // The overall response code which takes into account all of the descriptors that were passed - // in the RateLimitRequest message. - Code overall_code = 1; - - // A list of DescriptorStatus messages which matches the length of the descriptor list passed - // in the RateLimitRequest. This can be used by the caller to determine which individual - // descriptors failed and/or what the currently configured limits are for all of them. - repeated DescriptorStatus statuses = 2; - - // A list of headers to add to the response - repeated config.core.v3.HeaderValue response_headers_to_add = 3; - - // A list of headers to add to the request when forwarded - repeated config.core.v3.HeaderValue request_headers_to_add = 4; - - // A response body to send to the downstream client when the response code is not OK. - bytes raw_body = 5; - - // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next - // filter. This metadata lives in a namespace specified by the canonical name of extension filter - // that requires it: - // - // - :ref:`envoy.filters.http.ratelimit ` for HTTP filter. - // - :ref:`envoy.filters.network.ratelimit ` for network filter. - // - :ref:`envoy.filters.thrift.rate_limit ` for Thrift filter. - google.protobuf.Struct dynamic_metadata = 6; -} diff --git a/generated_api_shadow/envoy/service/route/v3/BUILD b/generated_api_shadow/envoy/service/route/v3/BUILD deleted file mode 100644 index d3be4fae57fa4..0000000000000 --- a/generated_api_shadow/envoy/service/route/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/api/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/route/v3/rds.proto b/generated_api_shadow/envoy/service/route/v3/rds.proto deleted file mode 100644 index 62a7da4094936..0000000000000 --- a/generated_api_shadow/envoy/service/route/v3/rds.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; - -package envoy.service.route.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.route.v3"; -option java_outer_classname = "RdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: RDS] - -// The resource_names field in DiscoveryRequest specifies a route configuration. -// This allows an Envoy configuration with multiple HTTP listeners (and -// associated HTTP connection manager filters) to use different route -// configurations. Each listener will bind its HTTP connection manager filter to -// a route table via this identifier. -service RouteDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.route.v3.RouteConfiguration"; - - rpc StreamRoutes(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaRoutes(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:routes"; - option (google.api.http).body = "*"; - } -} - -// Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for -// a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered -// during the processing of an HTTP request if a route for the request cannot be resolved. The -// :ref:`resource_names_subscribe ` -// field contains a list of virtual host names or aliases to track. The contents of an alias would -// be the contents of a *host* or *authority* header used to make an http request. An xDS server -// will match an alias to a virtual host based on the content of :ref:`domains' -// ` field. The *resource_names_unsubscribe* field -// contains a list of virtual host names that have been :ref:`unsubscribed -// ` from the routing table associated with the RouteConfiguration. -service VirtualHostDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.route.v3.VirtualHost"; - - rpc DeltaVirtualHosts(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message RdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/route/v3/srds.proto b/generated_api_shadow/envoy/service/route/v3/srds.proto deleted file mode 100644 index 64fe45fee1fab..0000000000000 --- a/generated_api_shadow/envoy/service/route/v3/srds.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package envoy.service.route.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.route.v3"; -option java_outer_classname = "SrdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: SRDS] -// * Routing :ref:`architecture overview ` - -// The Scoped Routes Discovery Service (SRDS) API distributes -// :ref:`ScopedRouteConfiguration` -// resources. Each ScopedRouteConfiguration resource represents a "routing -// scope" containing a mapping that allows the HTTP connection manager to -// dynamically assign a routing table (specified via a -// :ref:`RouteConfiguration` message) to each -// HTTP request. -service ScopedRoutesDiscoveryService { - option (envoy.annotations.resource).type = "envoy.config.route.v3.ScopedRouteConfiguration"; - - rpc StreamScopedRoutes(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaScopedRoutes(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchScopedRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:scoped-routes"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. -message SrdsDummy { - option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.SrdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/runtime/v3/BUILD b/generated_api_shadow/envoy/service/runtime/v3/BUILD deleted file mode 100644 index fb6a1656ca9bf..0000000000000 --- a/generated_api_shadow/envoy/service/runtime/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/service/discovery/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/runtime/v3/rtds.proto b/generated_api_shadow/envoy/service/runtime/v3/rtds.proto deleted file mode 100644 index 796b6fac24e67..0000000000000 --- a/generated_api_shadow/envoy/service/runtime/v3/rtds.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package envoy.service.runtime.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/struct.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.runtime.v3"; -option java_outer_classname = "RtdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Runtime Discovery Service (RTDS)] -// RTDS :ref:`configuration overview ` - -// Discovery service for Runtime resources. -service RuntimeDiscoveryService { - option (envoy.annotations.resource).type = "envoy.service.runtime.v3.Runtime"; - - rpc StreamRuntime(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc DeltaRuntime(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc FetchRuntime(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:runtime"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message RtdsDummy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.RtdsDummy"; -} - -// RTDS resource type. This describes a layer in the runtime virtual filesystem. -message Runtime { - option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.Runtime"; - - // Runtime resource name. This makes the Runtime a self-describing xDS - // resource. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - google.protobuf.Struct layer = 2; -} diff --git a/generated_api_shadow/envoy/service/secret/v3/BUILD b/generated_api_shadow/envoy/service/secret/v3/BUILD deleted file mode 100644 index fb6a1656ca9bf..0000000000000 --- a/generated_api_shadow/envoy/service/secret/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/annotations:pkg", - "//envoy/service/discovery/v2:pkg", - "//envoy/service/discovery/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/secret/v3/sds.proto b/generated_api_shadow/envoy/service/secret/v3/sds.proto deleted file mode 100644 index 3c9441d7c7608..0000000000000 --- a/generated_api_shadow/envoy/service/secret/v3/sds.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package envoy.service.secret.v3; - -import "envoy/service/discovery/v3/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.secret.v3"; -option java_outer_classname = "SdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Secret Discovery Service (SDS)] - -service SecretDiscoveryService { - option (envoy.annotations.resource).type = "envoy.extensions.transport_sockets.tls.v3.Secret"; - - rpc DeltaSecrets(stream discovery.v3.DeltaDiscoveryRequest) - returns (stream discovery.v3.DeltaDiscoveryResponse) { - } - - rpc StreamSecrets(stream discovery.v3.DiscoveryRequest) - returns (stream discovery.v3.DiscoveryResponse) { - } - - rpc FetchSecrets(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:secrets"; - option (google.api.http).body = "*"; - } -} - -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message SdsDummy { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.discovery.v2.SdsDummy"; -} diff --git a/generated_api_shadow/envoy/service/status/v2/BUILD b/generated_api_shadow/envoy/service/status/v2/BUILD deleted file mode 100644 index 39c38eb10a7cb..0000000000000 --- a/generated_api_shadow/envoy/service/status/v2/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/admin/v2alpha:pkg", - "//envoy/api/v2/core:pkg", - "//envoy/type/matcher:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/status/v2/csds.proto b/generated_api_shadow/envoy/service/status/v2/csds.proto deleted file mode 100644 index 10f603cedb15a..0000000000000 --- a/generated_api_shadow/envoy/service/status/v2/csds.proto +++ /dev/null @@ -1,88 +0,0 @@ -syntax = "proto3"; - -package envoy.service.status.v2; - -import "envoy/admin/v2alpha/config_dump.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/type/matcher/node.proto"; - -import "google/api/annotations.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.status.v2"; -option java_outer_classname = "CsdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Client Status Discovery Service (CSDS)] - -// CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. In the -// future, it can potentially be used as an interface to get the current -// state directly from the client. -service ClientStatusDiscoveryService { - rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { - } - - rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { - option (google.api.http).post = "/v2/discovery:client_status"; - option (google.api.http).body = "*"; - } -} - -// Status of a config. -enum ConfigStatus { - // Status info is not available/unknown. - UNKNOWN = 0; - - // Management server has sent the config to client and received ACK. - SYNCED = 1; - - // Config is not sent. - NOT_SENT = 2; - - // Management server has sent the config to client but hasn’t received - // ACK/NACK. - STALE = 3; - - // Management server has sent the config to client but received NACK. - ERROR = 4; -} - -// Request for client status of clients identified by a list of NodeMatchers. -message ClientStatusRequest { - // Management server can use these match criteria to identify clients. - // The match follows OR semantics. - repeated type.matcher.NodeMatcher node_matchers = 1; -} - -// Detailed config (per xDS) with status. -// [#next-free-field: 6] -message PerXdsConfig { - ConfigStatus status = 1; - - oneof per_xds_config { - admin.v2alpha.ListenersConfigDump listener_config = 2; - - admin.v2alpha.ClustersConfigDump cluster_config = 3; - - admin.v2alpha.RoutesConfigDump route_config = 4; - - admin.v2alpha.ScopedRoutesConfigDump scoped_route_config = 5; - } -} - -// All xds configs for a particular client. -message ClientConfig { - // Node for a particular client. - api.v2.core.Node node = 1; - - repeated PerXdsConfig xds_config = 2; -} - -message ClientStatusResponse { - // Client configs for the clients specified in the ClientStatusRequest. - repeated ClientConfig config = 1; -} diff --git a/generated_api_shadow/envoy/service/status/v3/BUILD b/generated_api_shadow/envoy/service/status/v3/BUILD deleted file mode 100644 index a73963967ef77..0000000000000 --- a/generated_api_shadow/envoy/service/status/v3/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/admin/v3:pkg", - "//envoy/annotations:pkg", - "//envoy/config/core/v3:pkg", - "//envoy/service/status/v2:pkg", - "//envoy/type/matcher/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/status/v3/csds.proto b/generated_api_shadow/envoy/service/status/v3/csds.proto deleted file mode 100644 index 1d940d6a2dfe1..0000000000000 --- a/generated_api_shadow/envoy/service/status/v3/csds.proto +++ /dev/null @@ -1,191 +0,0 @@ -syntax = "proto3"; - -package envoy.service.status.v3; - -import "envoy/admin/v3/config_dump.proto"; -import "envoy/config/core/v3/base.proto"; -import "envoy/type/matcher/v3/node.proto"; - -import "google/api/annotations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.service.status.v3"; -option java_outer_classname = "CsdsProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Client Status Discovery Service (CSDS)] - -// CSDS is Client Status Discovery Service. It can be used to get the status of -// an xDS-compliant client from the management server's point of view. It can -// also be used to get the current xDS states directly from the client. -service ClientStatusDiscoveryService { - rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { - } - - rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { - option (google.api.http).post = "/v3/discovery:client_status"; - option (google.api.http).body = "*"; - } -} - -// Status of a config from a management server view. -enum ConfigStatus { - // Status info is not available/unknown. - UNKNOWN = 0; - - // Management server has sent the config to client and received ACK. - SYNCED = 1; - - // Config is not sent. - NOT_SENT = 2; - - // Management server has sent the config to client but hasn’t received - // ACK/NACK. - STALE = 3; - - // Management server has sent the config to client but received NACK. The - // attached config dump will be the latest config (the rejected one), since - // it is the persisted version in the management server. - ERROR = 4; -} - -// Config status from a client-side view. -enum ClientConfigStatus { - // Config status is not available/unknown. - CLIENT_UNKNOWN = 0; - - // Client requested the config but hasn't received any config from management - // server yet. - CLIENT_REQUESTED = 1; - - // Client received the config and replied with ACK. - CLIENT_ACKED = 2; - - // Client received the config and replied with NACK. Notably, the attached - // config dump is not the NACKed version, but the most recent accepted one. If - // no config is accepted yet, the attached config dump will be empty. - CLIENT_NACKED = 3; -} - -// Request for client status of clients identified by a list of NodeMatchers. -message ClientStatusRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v2.ClientStatusRequest"; - - // Management server can use these match criteria to identify clients. - // The match follows OR semantics. - repeated type.matcher.v3.NodeMatcher node_matchers = 1; - - // The node making the csds request. - config.core.v3.Node node = 2; -} - -// Detailed config (per xDS) with status. -// [#next-free-field: 8] -message PerXdsConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v2.PerXdsConfig"; - - // Config status generated by management servers. Will not be present if the - // CSDS server is an xDS client. - ConfigStatus status = 1; - - // Client config status is populated by xDS clients. Will not be present if - // the CSDS server is an xDS server. No matter what the client config status - // is, xDS clients should always dump the most recent accepted xDS config. - // - // .. attention:: - // This field is deprecated. Use :ref:`ClientResourceStatus - // ` for per-resource - // config status instead. - ClientConfigStatus client_status = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - oneof per_xds_config { - admin.v3.ListenersConfigDump listener_config = 2; - - admin.v3.ClustersConfigDump cluster_config = 3; - - admin.v3.RoutesConfigDump route_config = 4; - - admin.v3.ScopedRoutesConfigDump scoped_route_config = 5; - - admin.v3.EndpointsConfigDump endpoint_config = 6; - } -} - -// All xds configs for a particular client. -message ClientConfig { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v2.ClientConfig"; - - // GenericXdsConfig is used to specify the config status and the dump - // of any xDS resource identified by their type URL. It is the generalized - // version of the now deprecated ListenersConfigDump, ClustersConfigDump etc - // [#next-free-field: 10] - message GenericXdsConfig { - // Type_url represents the fully qualified name of xDS resource type - // like envoy.v3.Cluster, envoy.v3.ClusterLoadAssignment etc. - string type_url = 1; - - // Name of the xDS resource - string name = 2; - - // This is the :ref:`version_info ` - // in the last processed xDS discovery response. If there are only - // static bootstrap listeners, this field will be "" - string version_info = 3; - - // The xDS resource config. Actual content depends on the type - google.protobuf.Any xds_config = 4; - - // Timestamp when the xDS resource was last updated - google.protobuf.Timestamp last_updated = 5; - - // Per xDS resource config status. It is generated by management servers. - // It will not be present if the CSDS server is an xDS client. - ConfigStatus config_status = 6; - - // Per xDS resource status from the view of a xDS client - admin.v3.ClientResourceStatus client_status = 7; - - // Set if the last update failed, cleared after the next successful - // update. The *error_state* field contains the rejected version of - // this particular resource along with the reason and timestamp. For - // successfully updated or acknowledged resource, this field should - // be empty. - // [#not-implemented-hide:] - admin.v3.UpdateFailureState error_state = 8; - - // Is static resource is true if it is specified in the config supplied - // through the file at the startup. - bool is_static_resource = 9; - } - - // Node for a particular client. - config.core.v3.Node node = 1; - - // This field is deprecated in favor of generic_xds_configs which is - // much simpler and uniform in structure. - repeated PerXdsConfig xds_config = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // Represents generic xDS config and the exact config structure depends on - // the type URL (like Cluster if it is CDS) - repeated GenericXdsConfig generic_xds_configs = 3; -} - -message ClientStatusResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.status.v2.ClientStatusResponse"; - - // Client configs for the clients specified in the ClientStatusRequest. - repeated ClientConfig config = 1; -} diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/BUILD b/generated_api_shadow/envoy/service/tap/v2alpha/BUILD deleted file mode 100644 index 8e0561a169c5a..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v2alpha/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "//envoy/api/v2/route:pkg", - "//envoy/data/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/common.proto b/generated_api_shadow/envoy/service/tap/v2alpha/common.proto deleted file mode 100644 index 990a3826481bd..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v2alpha/common.proto +++ /dev/null @@ -1,205 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/grpc_service.proto"; -import "envoy/api/v2/route/route_components.proto"; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; -option java_outer_classname = "CommonProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.config.tap.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Common tap configuration] - -// Tap configuration. -message TapConfig { - // [#comment:TODO(mattklein123): Rate limiting] - - // The match configuration. If the configuration matches the data source being tapped, a tap will - // occur, with the result written to the configured output. - MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; - - // The tap output configuration. If a match configuration matches a data source being tapped, - // a tap will occur and the data will be written to the configured output. - OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; - - // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for - // which the tap matching is enabled. When not enabled, the request\connection will not be - // recorded. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - api.v2.core.RuntimeFractionalPercent tap_enabled = 3; -} - -// Tap match configuration. This is a recursive structure which allows complex nested match -// configurations to be built using various logical operators. -// [#next-free-field: 9] -message MatchPredicate { - // A set of match configurations used for logical operations. - message MatchSet { - // The list of rules that make up the set. - repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; - } - - oneof rule { - option (validate.required) = true; - - // A set that describes a logical OR. If any member of the set matches, the match configuration - // matches. - MatchSet or_match = 1; - - // A set that describes a logical AND. If all members of the set match, the match configuration - // matches. - MatchSet and_match = 2; - - // A negation match. The match configuration will match if the negated match condition matches. - MatchPredicate not_match = 3; - - // The match configuration will always match. - bool any_match = 4 [(validate.rules).bool = {const: true}]; - - // HTTP request headers match configuration. - HttpHeadersMatch http_request_headers_match = 5; - - // HTTP request trailers match configuration. - HttpHeadersMatch http_request_trailers_match = 6; - - // HTTP response headers match configuration. - HttpHeadersMatch http_response_headers_match = 7; - - // HTTP response trailers match configuration. - HttpHeadersMatch http_response_trailers_match = 8; - } -} - -// HTTP headers match configuration. -message HttpHeadersMatch { - // HTTP headers to match. - repeated api.v2.route.HeaderMatcher headers = 1; -} - -// Tap output configuration. -message OutputConfig { - // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple - // sink types are supported this constraint will be relaxed. - repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; - - // For buffered tapping, the maximum amount of received body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_rx_bytes = 2; - - // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to - // truncation. If truncation occurs, the :ref:`truncated - // ` field will be set. If not specified, the - // default is 1KiB. - google.protobuf.UInt32Value max_buffered_tx_bytes = 3; - - // Indicates whether taps produce a single buffered message per tap, or multiple streamed - // messages per tap in the emitted :ref:`TraceWrapper - // ` messages. Note that streamed tapping does not - // mean that no buffering takes place. Buffering may be required if data is processed before a - // match can be determined. See the HTTP tap filter :ref:`streaming - // ` documentation for more information. - bool streaming = 4; -} - -// Tap output sink configuration. -message OutputSink { - // Output format. All output is in the form of one or more :ref:`TraceWrapper - // ` messages. This enumeration indicates - // how those messages are written. Note that not all sinks support all output formats. See - // individual sink documentation for more information. - enum Format { - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_bytes - // ` field. This means that body data will be - // base64 encoded as per the `proto3 JSON mappings - // `_. - JSON_BODY_AS_BYTES = 0; - - // Each message will be written as JSON. Any :ref:`body ` - // data will be present in the :ref:`as_string - // ` field. This means that body data will be - // string encoded as per the `proto3 JSON mappings - // `_. This format type is - // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the - // user wishes to view it directly without being forced to base64 decode the body. - JSON_BODY_AS_STRING = 1; - - // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes - // multiple binary messages without any length information the data stream will not be - // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) - // this output format makes consumption simpler. - PROTO_BINARY = 2; - - // Messages are written as a sequence tuples, where each tuple is the message length encoded - // as a `protobuf 32-bit varint - // `_ - // followed by the binary message. The messages can be read back using the language specific - // protobuf coded stream implementation to obtain the message length and the message. - PROTO_BINARY_LENGTH_DELIMITED = 3; - - // Text proto format. - PROTO_TEXT = 4; - } - - // Sink output format. - Format format = 1 [(validate.rules).enum = {defined_only: true}]; - - oneof output_sink_type { - option (validate.required) = true; - - // Tap output will be streamed out the :http:post:`/tap` admin endpoint. - // - // .. attention:: - // - // It is only allowed to specify the streaming admin output sink if the tap is being - // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has - // been configured to receive tap configuration from some other source (e.g., static - // file, XDS, etc.) configuring the streaming admin output type will fail. - StreamingAdminSink streaming_admin = 2; - - // Tap output will be written to a file per tap sink. - FilePerTapSink file_per_tap = 3; - - // [#not-implemented-hide:] - // GrpcService to stream data to. The format argument must be PROTO_BINARY. - StreamingGrpcSink streaming_grpc = 4; - } -} - -// Streaming admin sink configuration. -message StreamingAdminSink { -} - -// The file per tap sink outputs a discrete file for every tapped stream. -message FilePerTapSink { - // Path prefix. The output file will be of the form _.pb, where is an - // identifier distinguishing the recorded trace for stream instances (the Envoy - // connection ID, HTTP stream ID, etc.). - string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; -} - -// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC -// server. -message StreamingGrpcSink { - // Opaque identifier, that will be sent back to the streaming grpc server. - string tap_id = 1; - - // The gRPC server that hosts the Tap Sink Service. - api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto b/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto deleted file mode 100644 index 9fd18eae5d361..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v2alpha/tap.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v2alpha; - -import "envoy/api/v2/core/base.proto"; -import "envoy/data/tap/v2alpha/wrapper.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Tap Sink Service] - -// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call -// StreamTaps to deliver captured taps to the server -service TapSinkService { - // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. - rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { - } -} - -// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server -// and stream taps without ever expecting a response. -message StreamTapsRequest { - message Identifier { - // The node sending taps over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - - // The opaque identifier that was set in the :ref:`output config - // `. - string tap_id = 2; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // The trace id. this can be used to merge together a streaming trace. Note that the trace_id - // is not guaranteed to be spatially or temporally unique. - uint64 trace_id = 2; - - // The trace data. - data.tap.v2alpha.TraceWrapper trace = 3; -} - -// [#not-implemented-hide:] -message StreamTapsResponse { -} diff --git a/generated_api_shadow/envoy/service/tap/v3/BUILD b/generated_api_shadow/envoy/service/tap/v3/BUILD deleted file mode 100644 index 5ee1ce553f48b..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v3/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "//envoy/data/tap/v3:pkg", - "//envoy/service/tap/v2alpha:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/service/tap/v3/tap.proto b/generated_api_shadow/envoy/service/tap/v3/tap.proto deleted file mode 100644 index 5d9866e570747..0000000000000 --- a/generated_api_shadow/envoy/service/tap/v3/tap.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.service.tap.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/data/tap/v3/wrapper.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.tap.v3"; -option java_outer_classname = "TapProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Tap Sink Service] - -// [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call -// StreamTaps to deliver captured taps to the server -service TapSinkService { - // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any - // response to be sent as nothing would be done in the case of failure. The server should - // disconnect if it expects Envoy to reconnect. - rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { - } -} - -// [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server -// and stream taps without ever expecting a response. -message StreamTapsRequest { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamTapsRequest"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamTapsRequest.Identifier"; - - // The node sending taps over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - - // The opaque identifier that was set in the :ref:`output config - // `. - string tap_id = 2; - } - - // Identifier data effectively is a structured metadata. As a performance optimization this will - // only be sent in the first message on the stream. - Identifier identifier = 1; - - // The trace id. this can be used to merge together a streaming trace. Note that the trace_id - // is not guaranteed to be spatially or temporally unique. - uint64 trace_id = 2; - - // The trace data. - data.tap.v3.TraceWrapper trace = 3; -} - -// [#not-implemented-hide:] -message StreamTapsResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.tap.v2alpha.StreamTapsResponse"; -} diff --git a/generated_api_shadow/envoy/service/trace/v2/BUILD b/generated_api_shadow/envoy/service/trace/v2/BUILD deleted file mode 100644 index 7e6d2b11bf163..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v2/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/api/v2/core:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", - ], -) diff --git a/generated_api_shadow/envoy/service/trace/v2/trace_service.proto b/generated_api_shadow/envoy/service/trace/v2/trace_service.proto deleted file mode 100644 index 4e07f9e1f609b..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v2/trace_service.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package envoy.service.trace.v2; - -import "envoy/api/v2/core/base.proto"; - -import "opencensus/proto/trace/v1/trace.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.trace.v2"; -option java_outer_classname = "TraceServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Trace service] - -// Service for streaming traces to server that consumes the trace data. It -// uses OpenCensus data model as a standard to represent trace information. -service TraceService { - // Envoy will connect and send StreamTracesMessage messages forever. It does - // not expect any response to be sent as nothing would be done in the case - // of failure. - rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { - } -} - -message StreamTracesResponse { -} - -message StreamTracesMessage { - message Identifier { - // The node sending the access log messages over the stream. - api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. - // As a performance optimization this will only be sent in the first message - // on the stream. - Identifier identifier = 1; - - // A list of Span entries - repeated opencensus.proto.trace.v1.Span spans = 2; -} diff --git a/generated_api_shadow/envoy/service/trace/v3/BUILD b/generated_api_shadow/envoy/service/trace/v3/BUILD deleted file mode 100644 index a00d454ff9749..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - has_services = True, - deps = [ - "//envoy/config/core/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", - ], -) diff --git a/generated_api_shadow/envoy/service/trace/v3/trace_service.proto b/generated_api_shadow/envoy/service/trace/v3/trace_service.proto deleted file mode 100644 index 65970593d7867..0000000000000 --- a/generated_api_shadow/envoy/service/trace/v3/trace_service.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package envoy.service.trace.v3; - -import "envoy/config/core/v3/base.proto"; - -import "opencensus/proto/trace/v1/trace.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.service.trace.v3"; -option java_outer_classname = "TraceServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Trace service] - -// Service for streaming traces to server that consumes the trace data. It -// uses OpenCensus data model as a standard to represent trace information. -service TraceService { - // Envoy will connect and send StreamTracesMessage messages forever. It does - // not expect any response to be sent as nothing would be done in the case - // of failure. - rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { - } -} - -message StreamTracesResponse { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v2.StreamTracesResponse"; -} - -message StreamTracesMessage { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v2.StreamTracesMessage"; - - message Identifier { - option (udpa.annotations.versioning).previous_message_type = - "envoy.service.trace.v2.StreamTracesMessage.Identifier"; - - // The node sending the access log messages over the stream. - config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; - } - - // Identifier data effectively is a structured metadata. - // As a performance optimization this will only be sent in the first message - // on the stream. - Identifier identifier = 1; - - // A list of Span entries - repeated opencensus.proto.trace.v1.Span spans = 2; -} diff --git a/generated_api_shadow/envoy/type/BUILD b/generated_api_shadow/envoy/type/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/type/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/type/hash_policy.proto b/generated_api_shadow/envoy/type/hash_policy.proto deleted file mode 100644 index b6aeb31fcbfde..0000000000000 --- a/generated_api_shadow/envoy/type/hash_policy.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "HashPolicyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Hash Policy] - -// Specifies the hash policy -message HashPolicy { - // The source IP will be used to compute the hash used by hash-based load balancing - // algorithms. - message SourceIp { - } - - oneof policy_specifier { - option (validate.required) = true; - - SourceIp source_ip = 1; - } -} diff --git a/generated_api_shadow/envoy/type/http.proto b/generated_api_shadow/envoy/type/http.proto deleted file mode 100644 index c1c787411fad8..0000000000000 --- a/generated_api_shadow/envoy/type/http.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "HttpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP] - -enum CodecClientType { - HTTP1 = 0; - - HTTP2 = 1; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 2; -} diff --git a/generated_api_shadow/envoy/type/http/v3/BUILD b/generated_api_shadow/envoy/type/http/v3/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/type/http/v3/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/type/http/v3/path_transformation.proto b/generated_api_shadow/envoy/type/http/v3/path_transformation.proto deleted file mode 100644 index 0b3d72009f5ff..0000000000000 --- a/generated_api_shadow/envoy/type/http/v3/path_transformation.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.type.http.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.http.v3"; -option java_outer_classname = "PathTransformationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Path Transformations API] - -// PathTransformation defines an API to apply a sequence of operations that can be used to alter -// text before it is used for matching or routing. Multiple actions can be applied in the same -// Transformation, forming a sequential pipeline. The transformations will be performed in the order -// that they appear. -// -// This API is a work in progress. - -message PathTransformation { - // A type of operation to alter text. - message Operation { - // Should text be normalized according to RFC 3986? This typically is used for path headers - // before any processing of requests by HTTP filters or routing. This applies percent-encoded - // normalization and path segment normalization. Fails on characters disallowed in URLs - // (e.g. NULLs). See `Normalization and Comparison - // `_ for details of normalization. Note that - // this options does not perform `case normalization - // `_ - message NormalizePathRFC3986 { - } - - // Determines if adjacent slashes are merged into one. A common use case is for a request path - // header. Using this option in `:ref: PathNormalizationOptions - // ` - // will allow incoming requests with path `//dir///file` to match against route with `prefix` - // match set to `/dir`. When using for header transformations, note that slash merging is not - // part of `HTTP spec `_ and is provided for convenience. - message MergeSlashes { - } - - oneof operation_specifier { - option (validate.required) = true; - - // Enable path normalization per RFC 3986. - NormalizePathRFC3986 normalize_path_rfc_3986 = 2; - - // Enable merging adjacent slashes. - MergeSlashes merge_slashes = 3; - } - } - - // A list of operations to apply. Transformations will be performed in the order that they appear. - repeated Operation operations = 1; -} diff --git a/generated_api_shadow/envoy/type/http_status.proto b/generated_api_shadow/envoy/type/http_status.proto deleted file mode 100644 index 99b44a98c2512..0000000000000 --- a/generated_api_shadow/envoy/type/http_status.proto +++ /dev/null @@ -1,139 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "HttpStatusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: HTTP status codes] - -// HTTP response codes supported in Envoy. -// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml -enum StatusCode { - // Empty - This code not part of the HTTP status code specification, but it is needed for proto - // `enum` type. - Empty = 0; - - Continue = 100; - - OK = 200; - - Created = 201; - - Accepted = 202; - - NonAuthoritativeInformation = 203; - - NoContent = 204; - - ResetContent = 205; - - PartialContent = 206; - - MultiStatus = 207; - - AlreadyReported = 208; - - IMUsed = 226; - - MultipleChoices = 300; - - MovedPermanently = 301; - - Found = 302; - - SeeOther = 303; - - NotModified = 304; - - UseProxy = 305; - - TemporaryRedirect = 307; - - PermanentRedirect = 308; - - BadRequest = 400; - - Unauthorized = 401; - - PaymentRequired = 402; - - Forbidden = 403; - - NotFound = 404; - - MethodNotAllowed = 405; - - NotAcceptable = 406; - - ProxyAuthenticationRequired = 407; - - RequestTimeout = 408; - - Conflict = 409; - - Gone = 410; - - LengthRequired = 411; - - PreconditionFailed = 412; - - PayloadTooLarge = 413; - - URITooLong = 414; - - UnsupportedMediaType = 415; - - RangeNotSatisfiable = 416; - - ExpectationFailed = 417; - - MisdirectedRequest = 421; - - UnprocessableEntity = 422; - - Locked = 423; - - FailedDependency = 424; - - UpgradeRequired = 426; - - PreconditionRequired = 428; - - TooManyRequests = 429; - - RequestHeaderFieldsTooLarge = 431; - - InternalServerError = 500; - - NotImplemented = 501; - - BadGateway = 502; - - ServiceUnavailable = 503; - - GatewayTimeout = 504; - - HTTPVersionNotSupported = 505; - - VariantAlsoNegotiates = 506; - - InsufficientStorage = 507; - - LoopDetected = 508; - - NotExtended = 510; - - NetworkAuthenticationRequired = 511; -} - -// HTTP status. -message HttpStatus { - // Supplies HTTP response code. - StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/BUILD b/generated_api_shadow/envoy/type/matcher/BUILD deleted file mode 100644 index 29613b4c3487b..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/matcher/metadata.proto b/generated_api_shadow/envoy/type/matcher/metadata.proto deleted file mode 100644 index ed58d04adb021..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/metadata.proto +++ /dev/null @@ -1,98 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/value.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metadata matcher] - -// MetadataMatcher provides a general interface to check if a given value is matched in -// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value -// from the Metadata and then check if it's matched to the specified value. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.filters.http.rbac: -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following MetadataMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to -// enforce access control based on dynamic metadata in a request. See :ref:`Permission -// ` and :ref:`Principal -// `. - -// [#next-major-version: MetadataMatcher should use StructMatcher] -message MetadataMatcher { - // Specifies the segment in a path to retrieve value from Metadata. - // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that - // if the segment key refers to a list, it has to be the last segment in a path. - message PathSegment { - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_len: 1}]; - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The MetadataMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/node.proto b/generated_api_shadow/envoy/type/matcher/node.proto deleted file mode 100644 index c9e84a46279ab..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/node.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/string.proto"; -import "envoy/type/matcher/struct.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Node matcher] - -// Specifies the way to match a Node. -// The match follows AND semantics. -message NodeMatcher { - // Specifies match criteria on the node id. - StringMatcher node_id = 1; - - // Specifies match criteria on the node metadata. - repeated StructMatcher node_metadatas = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/number.proto b/generated_api_shadow/envoy/type/matcher/number.proto deleted file mode 100644 index e488f16a4a0c9..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/number.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/range.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "NumberProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Number matcher] - -// Specifies the way to match a double value. -message DoubleMatcher { - oneof match_pattern { - option (validate.required) = true; - - // If specified, the input double value must be in the range specified here. - // Note: The range is using half-open interval semantics [start, end). - DoubleRange range = 1; - - // If specified, the input double value must be equal to the value specified here. - double exact = 2; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/path.proto b/generated_api_shadow/envoy/type/matcher/path.proto deleted file mode 100644 index 860a1c69f18a8..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/path.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "PathProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Path matcher] - -// Specifies the way to match a path on HTTP request. -message PathMatcher { - oneof rule { - option (validate.required) = true; - - // The `path` must match the URL path portion of the :path header. The query and fragment - // string (if present) are removed in the URL path portion. - // For example, the path */data* will match the *:path* header */data#fragment?param=value*. - StringMatcher path = 1 [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/regex.proto b/generated_api_shadow/envoy/type/matcher/regex.proto deleted file mode 100644 index 6c499235bbe23..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/regex.proto +++ /dev/null @@ -1,78 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "RegexProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Regex matcher] - -// A regex matcher designed for safety when used with untrusted input. -message RegexMatcher { - // Google's `RE2 `_ regex engine. The regex string must adhere to - // the documented `syntax `_. The engine is designed - // to complete execution in linear time as well as limit the amount of memory used. - // - // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` - // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or - // complexity that a compiled regex can have before an exception is thrown or a warning is - // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and - // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). - // - // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, - // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented - // each time the program size exceeds the warn level threshold. - message GoogleRE2 { - // This field controls the RE2 "program size" which is a rough estimate of how complex a - // compiled regex is to evaluate. A regex that has a program size greater than the configured - // value will fail to compile. In this case, the configured max program size can be increased - // or the regex can be simplified. If not specified, the default is 100. - // - // This field is deprecated; regexp validation should be performed on the management server - // instead of being done by each individual client. - google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; - } - - oneof engine_type { - option (validate.required) = true; - - // Google's RE2 regex engine. - GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; - } - - // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Describes how to match a string and then produce a new string using a regular -// expression and a substitution string. -message RegexMatchAndSubstitute { - // The regular expression used to find portions of a string (hereafter called - // the "subject string") that should be replaced. When a new string is - // produced during the substitution operation, the new string is initially - // the same as the subject string, but then all matches in the subject string - // are replaced by the substitution string. If replacing all matches isn't - // desired, regular expression anchors can be used to ensure a single match, - // so as to replace just one occurrence of a pattern. Capture groups can be - // used in the pattern to extract portions of the subject string, and then - // referenced in the substitution string. - RegexMatcher pattern = 1; - - // The string that should be substituted into matching portions of the - // subject string during a substitution operation to produce a new string. - // Capture groups in the pattern can be referenced in the substitution - // string. Note, however, that the syntax for referring to capture groups is - // defined by the chosen regular expression engine. Google's `RE2 - // `_ regular expression engine uses a - // backslash followed by the capture group number to denote a numbered - // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers - // to capture group 2. - string substitution = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/string.proto b/generated_api_shadow/envoy/type/matcher/string.proto deleted file mode 100644 index 499eaf21775f8..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/string.proto +++ /dev/null @@ -1,79 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/regex.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "StringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: String matcher] - -// Specifies the way to match a string. -// [#next-free-field: 7] -message StringMatcher { - oneof match_pattern { - option (validate.required) = true; - - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - string exact = 1; - - // The input string must have the prefix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_len: 1}]; - - // The input string must have the suffix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_len: 1}]; - - // The input string must match the regular expression specified here. - // The regex grammar is defined `here - // `_. - // - // Examples: - // - // * The regex ``\d{3}`` matches the value *123* - // * The regex ``\d{3}`` does not match the value *1234* - // * The regex ``\d{3}`` does not match the value *123.456* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex` as it is not safe for use with - // untrusted input in all cases. - string regex = 4 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.disallowed_by_default) = true - ]; - - // The input string must match the regular expression specified here. - RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; - } - - // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no - // effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. - bool ignore_case = 6; -} - -// Specifies a list of ways to match a string. -message ListStringMatcher { - repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/struct.proto b/generated_api_shadow/envoy/type/matcher/struct.proto deleted file mode 100644 index 10d4672e0622b..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/struct.proto +++ /dev/null @@ -1,84 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/value.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Struct matcher] - -// StructMatcher provides a general interface to check if a given value is matched in -// google.protobuf.Struct. It uses `path` to retrieve the value -// from the struct and then check if it's matched to the specified value. -// -// For example, for the following Struct: -// -// .. code-block:: yaml -// -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following StructMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. -message StructMatcher { - // Specifies the segment in a path to retrieve value from Struct. - message PathSegment { - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The StructMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/BUILD b/generated_api_shadow/envoy/type/matcher/v3/BUILD deleted file mode 100644 index a117fd27e4ff0..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/annotations:pkg", - "//envoy/type/matcher:pkg", - "//envoy/type/v3:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/matcher/v3/http_inputs.proto b/generated_api_shadow/envoy/type/matcher/v3/http_inputs.proto deleted file mode 100644 index 403e4676f7b96..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/http_inputs.proto +++ /dev/null @@ -1,57 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "HttpInputsProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Common HTTP Inputs] - -// Match input indicates that matching should be done on a specific request header. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpRequestHeaderMatchInput { - // The request header to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicates that matching should be done on a specific request trailer. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpRequestTrailerMatchInput { - // The request trailer to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicating that matching should be done on a specific response header. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the response contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpResponseHeaderMatchInput { - // The response header to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} - -// Match input indicates that matching should be done on a specific response trailer. -// The resulting input string will be all headers for the given key joined by a comma, -// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input -// string will be 'bar,baz'. -// [#comment:TODO(snowp): Link to unified matching docs.] -message HttpResponseTrailerMatchInput { - // The response trailer to match on. - string header_name = 1 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto b/generated_api_shadow/envoy/type/matcher/v3/metadata.proto deleted file mode 100644 index de19a2f34dbd1..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/metadata.proto +++ /dev/null @@ -1,107 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/value.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metadata matcher] - -// MetadataMatcher provides a general interface to check if a given value is matched in -// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value -// from the Metadata and then check if it's matched to the specified value. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.filters.http.rbac: -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following MetadataMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// filter: envoy.filters.http.rbac -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to -// enforce access control based on dynamic metadata in a request. See :ref:`Permission -// ` and :ref:`Principal -// `. - -// [#next-major-version: MetadataMatcher should use StructMatcher] -message MetadataMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.MetadataMatcher"; - - // Specifies the segment in a path to retrieve value from Metadata. - // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that - // if the segment key refers to a list, it has to be the last segment in a path. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.MetadataMatcher.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_len: 1}]; - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The MetadataMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; - - // If true, the match result will be inverted. - bool invert = 4; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/node.proto b/generated_api_shadow/envoy/type/matcher/v3/node.proto deleted file mode 100644 index fe507312135ff..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/node.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/string.proto"; -import "envoy/type/matcher/v3/struct.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Node matcher] - -// Specifies the way to match a Node. -// The match follows AND semantics. -message NodeMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.NodeMatcher"; - - // Specifies match criteria on the node id. - StringMatcher node_id = 1; - - // Specifies match criteria on the node metadata. - repeated StructMatcher node_metadatas = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/number.proto b/generated_api_shadow/envoy/type/matcher/v3/number.proto deleted file mode 100644 index 2379efdcbd23a..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/number.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/v3/range.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "NumberProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Number matcher] - -// Specifies the way to match a double value. -message DoubleMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.DoubleMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // If specified, the input double value must be in the range specified here. - // Note: The range is using half-open interval semantics [start, end). - type.v3.DoubleRange range = 1; - - // If specified, the input double value must be equal to the value specified here. - double exact = 2; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/path.proto b/generated_api_shadow/envoy/type/matcher/v3/path.proto deleted file mode 100644 index 0ce89871c9d9f..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/path.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "PathProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Path matcher] - -// Specifies the way to match a path on HTTP request. -message PathMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.PathMatcher"; - - oneof rule { - option (validate.required) = true; - - // The `path` must match the URL path portion of the :path header. The query and fragment - // string (if present) are removed in the URL path portion. - // For example, the path */data* will match the *:path* header */data#fragment?param=value*. - StringMatcher path = 1 [(validate.rules).message = {required: true}]; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/regex.proto b/generated_api_shadow/envoy/type/matcher/v3/regex.proto deleted file mode 100644 index 3e7bb477ecbf0..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/regex.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "google/protobuf/wrappers.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "RegexProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Regex matcher] - -// A regex matcher designed for safety when used with untrusted input. -message RegexMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher"; - - // Google's `RE2 `_ regex engine. The regex string must adhere to - // the documented `syntax `_. The engine is designed - // to complete execution in linear time as well as limit the amount of memory used. - // - // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` - // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or - // complexity that a compiled regex can have before an exception is thrown or a warning is - // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and - // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). - // - // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, - // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented - // each time the program size exceeds the warn level threshold. - message GoogleRE2 { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.RegexMatcher.GoogleRE2"; - - // This field controls the RE2 "program size" which is a rough estimate of how complex a - // compiled regex is to evaluate. A regex that has a program size greater than the configured - // value will fail to compile. In this case, the configured max program size can be increased - // or the regex can be simplified. If not specified, the default is 100. - // - // This field is deprecated; regexp validation should be performed on the management server - // instead of being done by each individual client. - google.protobuf.UInt32Value max_program_size = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - } - - oneof engine_type { - option (validate.required) = true; - - // Google's RE2 regex engine. - GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; - } - - // The regex match string. The string must be supported by the configured engine. - string regex = 2 [(validate.rules).string = {min_len: 1}]; -} - -// Describes how to match a string and then produce a new string using a regular -// expression and a substitution string. -message RegexMatchAndSubstitute { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.RegexMatchAndSubstitute"; - - // The regular expression used to find portions of a string (hereafter called - // the "subject string") that should be replaced. When a new string is - // produced during the substitution operation, the new string is initially - // the same as the subject string, but then all matches in the subject string - // are replaced by the substitution string. If replacing all matches isn't - // desired, regular expression anchors can be used to ensure a single match, - // so as to replace just one occurrence of a pattern. Capture groups can be - // used in the pattern to extract portions of the subject string, and then - // referenced in the substitution string. - RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; - - // The string that should be substituted into matching portions of the - // subject string during a substitution operation to produce a new string. - // Capture groups in the pattern can be referenced in the substitution - // string. Note, however, that the syntax for referring to capture groups is - // defined by the chosen regular expression engine. Google's `RE2 - // `_ regular expression engine uses a - // backslash followed by the capture group number to denote a numbered - // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers - // to capture group 2. - string substitution = 2; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/string.proto b/generated_api_shadow/envoy/type/matcher/v3/string.proto deleted file mode 100644 index 4dc7cacffae6e..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/string.proto +++ /dev/null @@ -1,81 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/regex.proto"; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "StringProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: String matcher] - -// Specifies the way to match a string. -// [#next-free-field: 8] -message StringMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StringMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - string exact = 1; - - // The input string must have the prefix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *abc.xyz* - string prefix = 2 [(validate.rules).string = {min_len: 1}]; - - // The input string must have the suffix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc* - string suffix = 3 [(validate.rules).string = {min_len: 1}]; - - // The input string must match the regular expression specified here. - RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; - - // The input string must have the substring specified here. - // Note: empty contains match is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc.def* - string contains = 7 [(validate.rules).string = {min_len: 1}]; - - string hidden_envoy_deprecated_regex = 4 [ - deprecated = true, - (validate.rules).string = {max_bytes: 1024}, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; - } - - // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This - // has no effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. - bool ignore_case = 6; -} - -// Specifies a list of ways to match a string. -message ListStringMatcher { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.ListStringMatcher"; - - repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/struct.proto b/generated_api_shadow/envoy/type/matcher/v3/struct.proto deleted file mode 100644 index c753d07a5c0ac..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/struct.proto +++ /dev/null @@ -1,90 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/value.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Struct matcher] - -// StructMatcher provides a general interface to check if a given value is matched in -// google.protobuf.Struct. It uses `path` to retrieve the value -// from the struct and then check if it's matched to the specified value. -// -// For example, for the following Struct: -// -// .. code-block:: yaml -// -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following StructMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. -message StructMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StructMatcher"; - - // Specifies the segment in a path to retrieve value from Struct. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.StructMatcher.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The StructMatcher is matched if the value retrieved by path is matched to this value. - ValueMatcher value = 3 [(validate.rules).message = {required: true}]; -} diff --git a/generated_api_shadow/envoy/type/matcher/v3/value.proto b/generated_api_shadow/envoy/type/matcher/v3/value.proto deleted file mode 100644 index 040332273ba35..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/v3/value.proto +++ /dev/null @@ -1,71 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "envoy/type/matcher/v3/number.proto"; -import "envoy/type/matcher/v3/string.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "ValueProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Value matcher] - -// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. -// StructValue is not supported and is always not matched. -// [#next-free-field: 7] -message ValueMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ValueMatcher"; - - // NullMatch is an empty message to specify a null value. - message NullMatch { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.matcher.ValueMatcher.NullMatch"; - } - - // Specifies how to match a value. - oneof match_pattern { - option (validate.required) = true; - - // If specified, a match occurs if and only if the target value is a NullValue. - NullMatch null_match = 1; - - // If specified, a match occurs if and only if the target value is a double value and is - // matched to this field. - DoubleMatcher double_match = 2; - - // If specified, a match occurs if and only if the target value is a string value and is - // matched to this field. - StringMatcher string_match = 3; - - // If specified, a match occurs if and only if the target value is a bool value and is equal - // to this field. - bool bool_match = 4; - - // If specified, value match will be performed based on whether the path is referring to a - // valid primitive value in the metadata. If the path is referring to a non-primitive value, - // the result is always not matched. - bool present_match = 5; - - // If specified, a match occurs if and only if the target value is a list value and - // is matched to this field. - ListMatcher list_match = 6; - } -} - -// Specifies the way to match a list value. -message ListMatcher { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ListMatcher"; - - oneof match_pattern { - option (validate.required) = true; - - // If specified, at least one of the values in the list must match the value specified. - ValueMatcher one_of = 1; - } -} diff --git a/generated_api_shadow/envoy/type/matcher/value.proto b/generated_api_shadow/envoy/type/matcher/value.proto deleted file mode 100644 index aaecd14e8ecd4..0000000000000 --- a/generated_api_shadow/envoy/type/matcher/value.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher; - -import "envoy/type/matcher/number.proto"; -import "envoy/type/matcher/string.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher"; -option java_outer_classname = "ValueProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Value matcher] - -// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. -// StructValue is not supported and is always not matched. -// [#next-free-field: 7] -message ValueMatcher { - // NullMatch is an empty message to specify a null value. - message NullMatch { - } - - // Specifies how to match a value. - oneof match_pattern { - option (validate.required) = true; - - // If specified, a match occurs if and only if the target value is a NullValue. - NullMatch null_match = 1; - - // If specified, a match occurs if and only if the target value is a double value and is - // matched to this field. - DoubleMatcher double_match = 2; - - // If specified, a match occurs if and only if the target value is a string value and is - // matched to this field. - StringMatcher string_match = 3; - - // If specified, a match occurs if and only if the target value is a bool value and is equal - // to this field. - bool bool_match = 4; - - // If specified, value match will be performed based on whether the path is referring to a - // valid primitive value in the metadata. If the path is referring to a non-primitive value, - // the result is always not matched. - bool present_match = 5; - - // If specified, a match occurs if and only if the target value is a list value and - // is matched to this field. - ListMatcher list_match = 6; - } -} - -// Specifies the way to match a list value. -message ListMatcher { - oneof match_pattern { - option (validate.required) = true; - - // If specified, at least one of the values in the list must match the value specified. - ValueMatcher one_of = 1; - } -} diff --git a/generated_api_shadow/envoy/type/metadata/v2/BUILD b/generated_api_shadow/envoy/type/metadata/v2/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/type/metadata/v2/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/type/metadata/v2/metadata.proto b/generated_api_shadow/envoy/type/metadata/v2/metadata.proto deleted file mode 100644 index 43a1a7ca92750..0000000000000 --- a/generated_api_shadow/envoy/type/metadata/v2/metadata.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; - -package envoy.type.metadata.v2; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.metadata.v2"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_migrate).move_to_package = "envoy.type.metadata.v3"; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Metadata] - -// MetadataKey provides a general interface using `key` and `path` to retrieve value from -// :ref:`Metadata `. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.xxx: -// prop: -// foo: bar -// xyz: -// hello: envoy -// -// The following MetadataKey will retrieve a string value "bar" from the Metadata. -// -// .. code-block:: yaml -// -// key: envoy.xxx -// path: -// - key: prop -// - key: foo -// -message MetadataKey { - // Specifies the segment in a path to retrieve value from Metadata. - // Currently it is only supported to specify the key, i.e. field name, as one segment of a path. - message PathSegment { - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; - } - } - - // The key name of Metadata to retrieve the Struct from the metadata. - // Typically, it represents a builtin subsystem or custom extension. - string key = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The path to retrieve the Value from the Struct. It can be a prefix or a full path, - // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example, - // which depends on the particular scenario. - // - // Note: Due to that only the key type segment is supported, the path can not specify a list - // unless the list is the last segment. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// Describes what kind of metadata. -message MetadataKind { - // Represents dynamic metadata associated with the request. - message Request { - } - - // Represents metadata from :ref:`the route`. - message Route { - } - - // Represents metadata from :ref:`the upstream cluster`. - message Cluster { - } - - // Represents metadata from :ref:`the upstream - // host`. - message Host { - } - - oneof kind { - option (validate.required) = true; - - // Request kind of metadata. - Request request = 1; - - // Route kind of metadata. - Route route = 2; - - // Cluster kind of metadata. - Cluster cluster = 3; - - // Host kind of metadata. - Host host = 4; - } -} diff --git a/generated_api_shadow/envoy/type/metadata/v3/BUILD b/generated_api_shadow/envoy/type/metadata/v3/BUILD deleted file mode 100644 index aa64935f43d18..0000000000000 --- a/generated_api_shadow/envoy/type/metadata/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/metadata/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/metadata/v3/metadata.proto b/generated_api_shadow/envoy/type/metadata/v3/metadata.proto deleted file mode 100644 index 5dd58b23c6231..0000000000000 --- a/generated_api_shadow/envoy/type/metadata/v3/metadata.proto +++ /dev/null @@ -1,114 +0,0 @@ -syntax = "proto3"; - -package envoy.type.metadata.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.metadata.v3"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Metadata] - -// MetadataKey provides a general interface using `key` and `path` to retrieve value from -// :ref:`Metadata `. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.xxx: -// prop: -// foo: bar -// xyz: -// hello: envoy -// -// The following MetadataKey will retrieve a string value "bar" from the Metadata. -// -// .. code-block:: yaml -// -// key: envoy.xxx -// path: -// - key: prop -// - key: foo -// -message MetadataKey { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKey"; - - // Specifies the segment in a path to retrieve value from Metadata. - // Currently it is only supported to specify the key, i.e. field name, as one segment of a path. - message PathSegment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKey.PathSegment"; - - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The key name of Metadata to retrieve the Struct from the metadata. - // Typically, it represents a builtin subsystem or custom extension. - string key = 1 [(validate.rules).string = {min_len: 1}]; - - // The path to retrieve the Value from the Struct. It can be a prefix or a full path, - // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example, - // which depends on the particular scenario. - // - // Note: Due to that only the key type segment is supported, the path can not specify a list - // unless the list is the last segment. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; -} - -// Describes what kind of metadata. -message MetadataKind { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind"; - - // Represents dynamic metadata associated with the request. - message Request { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind.Request"; - } - - // Represents metadata from :ref:`the route`. - message Route { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind.Route"; - } - - // Represents metadata from :ref:`the upstream cluster`. - message Cluster { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind.Cluster"; - } - - // Represents metadata from :ref:`the upstream - // host`. - message Host { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.metadata.v2.MetadataKind.Host"; - } - - oneof kind { - option (validate.required) = true; - - // Request kind of metadata. - Request request = 1; - - // Route kind of metadata. - Route route = 2; - - // Cluster kind of metadata. - Cluster cluster = 3; - - // Host kind of metadata. - Host host = 4; - } -} diff --git a/generated_api_shadow/envoy/type/percent.proto b/generated_api_shadow/envoy/type/percent.proto deleted file mode 100644 index fc41a26662fe7..0000000000000 --- a/generated_api_shadow/envoy/type/percent.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "PercentProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Percent] - -// Identifies a percentage, in the range [0.0, 100.0]. -message Percent { - double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; -} - -// A fractional percentage is used in cases in which for performance reasons performing floating -// point to integer conversions during randomness calculations is undesirable. The message includes -// both a numerator and denominator that together determine the final fractional value. -// -// * **Example**: 1/100 = 1%. -// * **Example**: 3/10000 = 0.03%. -message FractionalPercent { - // Fraction percentages support several fixed denominator values. - enum DenominatorType { - // 100. - // - // **Example**: 1/100 = 1%. - HUNDRED = 0; - - // 10,000. - // - // **Example**: 1/10000 = 0.01%. - TEN_THOUSAND = 1; - - // 1,000,000. - // - // **Example**: 1/1000000 = 0.0001%. - MILLION = 2; - } - - // Specifies the numerator. Defaults to 0. - uint32 numerator = 1; - - // Specifies the denominator. If the denominator specified is less than the numerator, the final - // fractional percentage is capped at 1 (100%). - DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/type/range.proto b/generated_api_shadow/envoy/type/range.proto deleted file mode 100644 index 79aaa81975c38..0000000000000 --- a/generated_api_shadow/envoy/type/range.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "RangeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Range] - -// Specifies the int64 start and end of the range using half-open interval semantics [start, -// end). -message Int64Range { - // start of the range (inclusive) - int64 start = 1; - - // end of the range (exclusive) - int64 end = 2; -} - -// Specifies the int32 start and end of the range using half-open interval semantics [start, -// end). -message Int32Range { - // start of the range (inclusive) - int32 start = 1; - - // end of the range (exclusive) - int32 end = 2; -} - -// Specifies the double start and end of the range using half-open interval semantics [start, -// end). -message DoubleRange { - // start of the range (inclusive) - double start = 1; - - // end of the range (exclusive) - double end = 2; -} diff --git a/generated_api_shadow/envoy/type/semantic_version.proto b/generated_api_shadow/envoy/type/semantic_version.proto deleted file mode 100644 index 80fe016bfa161..0000000000000 --- a/generated_api_shadow/envoy/type/semantic_version.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "SemanticVersionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Semantic Version] - -// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate -// expected behaviors and APIs, the patch version field is used only -// for security fixes and can be generally ignored. -message SemanticVersion { - uint32 major_number = 1; - - uint32 minor_number = 2; - - uint32 patch = 3; -} diff --git a/generated_api_shadow/envoy/type/token_bucket.proto b/generated_api_shadow/envoy/type/token_bucket.proto deleted file mode 100644 index 41b6d268d5f6f..0000000000000 --- a/generated_api_shadow/envoy/type/token_bucket.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package envoy.type; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type"; -option java_outer_classname = "TokenBucketProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Token bucket] - -// Configures a token bucket, typically used for rate limiting. -message TokenBucket { - // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket - // initially contains. - uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}]; - - // The number of tokens added to the bucket during each fill interval. If not specified, defaults - // to a single token. - google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}]; - - // The fill interval that tokens are added to the bucket. During each fill interval - // `tokens_per_fill` are added to the bucket. The bucket will never contain more than - // `max_tokens` tokens. - google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = { - required: true - gt {} - }]; -} diff --git a/generated_api_shadow/envoy/type/tracing/v2/BUILD b/generated_api_shadow/envoy/type/tracing/v2/BUILD deleted file mode 100644 index aa64935f43d18..0000000000000 --- a/generated_api_shadow/envoy/type/tracing/v2/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/metadata/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto b/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto deleted file mode 100644 index 7506ae8861254..0000000000000 --- a/generated_api_shadow/envoy/type/tracing/v2/custom_tag.proto +++ /dev/null @@ -1,86 +0,0 @@ -syntax = "proto3"; - -package envoy.type.tracing.v2; - -import "envoy/type/metadata/v2/metadata.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.tracing.v2"; -option java_outer_classname = "CustomTagProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -// [#protodoc-title: Custom Tag] - -// Describes custom tags for the active span. -// [#next-free-field: 6] -message CustomTag { - // Literal type custom tag with static value for the tag value. - message Literal { - // Static literal value to populate the tag value. - string value = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // Environment type custom tag with environment name and default value. - message Environment { - // Environment variable name to obtain the value to populate the tag value. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // When the environment variable is not found, - // the tag value will be populated with this default value if specified, - // otherwise no tag will be populated. - string default_value = 2; - } - - // Header type custom tag with header name and default value. - message Header { - // Header name to obtain the value to populate the tag value. - string name = 1 - [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // When the header does not exist, - // the tag value will be populated with this default value if specified, - // otherwise no tag will be populated. - string default_value = 2; - } - - // Metadata type custom tag using - // :ref:`MetadataKey ` to retrieve the protobuf value - // from :ref:`Metadata `, and populate the tag value with - // `the canonical JSON `_ - // representation of it. - message Metadata { - // Specify what kind of metadata to obtain tag value from. - metadata.v2.MetadataKind kind = 1; - - // Metadata key to define the path to retrieve the tag value. - metadata.v2.MetadataKey metadata_key = 2; - - // When no valid metadata is found, - // the tag value would be populated with this default value if specified, - // otherwise no tag would be populated. - string default_value = 3; - } - - // Used to populate the tag name. - string tag = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Used to specify what kind of custom tag. - oneof type { - option (validate.required) = true; - - // A literal custom tag. - Literal literal = 2; - - // An environment custom tag. - Environment environment = 3; - - // A request header custom tag. - Header request_header = 4; - - // A custom tag to obtain tag value from the metadata. - Metadata metadata = 5; - } -} diff --git a/generated_api_shadow/envoy/type/tracing/v3/BUILD b/generated_api_shadow/envoy/type/tracing/v3/BUILD deleted file mode 100644 index 38eb160d482bf..0000000000000 --- a/generated_api_shadow/envoy/type/tracing/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type/metadata/v3:pkg", - "//envoy/type/tracing/v2:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto b/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto deleted file mode 100644 index ad99cafb22bf4..0000000000000 --- a/generated_api_shadow/envoy/type/tracing/v3/custom_tag.proto +++ /dev/null @@ -1,101 +0,0 @@ -syntax = "proto3"; - -package envoy.type.tracing.v3; - -import "envoy/type/metadata/v3/metadata.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.tracing.v3"; -option java_outer_classname = "CustomTagProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Custom Tag] - -// Describes custom tags for the active span. -// [#next-free-field: 6] -message CustomTag { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.tracing.v2.CustomTag"; - - // Literal type custom tag with static value for the tag value. - message Literal { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.tracing.v2.CustomTag.Literal"; - - // Static literal value to populate the tag value. - string value = 1 [(validate.rules).string = {min_len: 1}]; - } - - // Environment type custom tag with environment name and default value. - message Environment { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.tracing.v2.CustomTag.Environment"; - - // Environment variable name to obtain the value to populate the tag value. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // When the environment variable is not found, - // the tag value will be populated with this default value if specified, - // otherwise no tag will be populated. - string default_value = 2; - } - - // Header type custom tag with header name and default value. - message Header { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.tracing.v2.CustomTag.Header"; - - // Header name to obtain the value to populate the tag value. - string name = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // When the header does not exist, - // the tag value will be populated with this default value if specified, - // otherwise no tag will be populated. - string default_value = 2; - } - - // Metadata type custom tag using - // :ref:`MetadataKey ` to retrieve the protobuf value - // from :ref:`Metadata `, and populate the tag value with - // `the canonical JSON `_ - // representation of it. - message Metadata { - option (udpa.annotations.versioning).previous_message_type = - "envoy.type.tracing.v2.CustomTag.Metadata"; - - // Specify what kind of metadata to obtain tag value from. - metadata.v3.MetadataKind kind = 1; - - // Metadata key to define the path to retrieve the tag value. - metadata.v3.MetadataKey metadata_key = 2; - - // When no valid metadata is found, - // the tag value would be populated with this default value if specified, - // otherwise no tag would be populated. - string default_value = 3; - } - - // Used to populate the tag name. - string tag = 1 [(validate.rules).string = {min_len: 1}]; - - // Used to specify what kind of custom tag. - oneof type { - option (validate.required) = true; - - // A literal custom tag. - Literal literal = 2; - - // An environment custom tag. - Environment environment = 3; - - // A request header custom tag. - Header request_header = 4; - - // A custom tag to obtain tag value from the metadata. - Metadata metadata = 5; - } -} diff --git a/generated_api_shadow/envoy/type/v3/BUILD b/generated_api_shadow/envoy/type/v3/BUILD deleted file mode 100644 index da3a8659d2a8b..0000000000000 --- a/generated_api_shadow/envoy/type/v3/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = [ - "//envoy/type:pkg", - "@com_github_cncf_udpa//udpa/annotations:pkg", - ], -) diff --git a/generated_api_shadow/envoy/type/v3/hash_policy.proto b/generated_api_shadow/envoy/type/v3/hash_policy.proto deleted file mode 100644 index 96c39299698fc..0000000000000 --- a/generated_api_shadow/envoy/type/v3/hash_policy.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "HashPolicyProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Hash Policy] - -// Specifies the hash policy -message HashPolicy { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy"; - - // The source IP will be used to compute the hash used by hash-based load balancing - // algorithms. - message SourceIp { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy.SourceIp"; - } - - oneof policy_specifier { - option (validate.required) = true; - - SourceIp source_ip = 1; - } -} diff --git a/generated_api_shadow/envoy/type/v3/http.proto b/generated_api_shadow/envoy/type/v3/http.proto deleted file mode 100644 index fec15d11f871c..0000000000000 --- a/generated_api_shadow/envoy/type/v3/http.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "HttpProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP] - -enum CodecClientType { - HTTP1 = 0; - - HTTP2 = 1; - - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - HTTP3 = 2; -} diff --git a/generated_api_shadow/envoy/type/v3/http_status.proto b/generated_api_shadow/envoy/type/v3/http_status.proto deleted file mode 100644 index 8914b7a0264ae..0000000000000 --- a/generated_api_shadow/envoy/type/v3/http_status.proto +++ /dev/null @@ -1,142 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "HttpStatusProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP status codes] - -// HTTP response codes supported in Envoy. -// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml -enum StatusCode { - // Empty - This code not part of the HTTP status code specification, but it is needed for proto - // `enum` type. - Empty = 0; - - Continue = 100; - - OK = 200; - - Created = 201; - - Accepted = 202; - - NonAuthoritativeInformation = 203; - - NoContent = 204; - - ResetContent = 205; - - PartialContent = 206; - - MultiStatus = 207; - - AlreadyReported = 208; - - IMUsed = 226; - - MultipleChoices = 300; - - MovedPermanently = 301; - - Found = 302; - - SeeOther = 303; - - NotModified = 304; - - UseProxy = 305; - - TemporaryRedirect = 307; - - PermanentRedirect = 308; - - BadRequest = 400; - - Unauthorized = 401; - - PaymentRequired = 402; - - Forbidden = 403; - - NotFound = 404; - - MethodNotAllowed = 405; - - NotAcceptable = 406; - - ProxyAuthenticationRequired = 407; - - RequestTimeout = 408; - - Conflict = 409; - - Gone = 410; - - LengthRequired = 411; - - PreconditionFailed = 412; - - PayloadTooLarge = 413; - - URITooLong = 414; - - UnsupportedMediaType = 415; - - RangeNotSatisfiable = 416; - - ExpectationFailed = 417; - - MisdirectedRequest = 421; - - UnprocessableEntity = 422; - - Locked = 423; - - FailedDependency = 424; - - UpgradeRequired = 426; - - PreconditionRequired = 428; - - TooManyRequests = 429; - - RequestHeaderFieldsTooLarge = 431; - - InternalServerError = 500; - - NotImplemented = 501; - - BadGateway = 502; - - ServiceUnavailable = 503; - - GatewayTimeout = 504; - - HTTPVersionNotSupported = 505; - - VariantAlsoNegotiates = 506; - - InsufficientStorage = 507; - - LoopDetected = 508; - - NotExtended = 510; - - NetworkAuthenticationRequired = 511; -} - -// HTTP status. -message HttpStatus { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.HttpStatus"; - - // Supplies HTTP response code. - StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; -} diff --git a/generated_api_shadow/envoy/type/v3/percent.proto b/generated_api_shadow/envoy/type/v3/percent.proto deleted file mode 100644 index 3a89a3f44fd5f..0000000000000 --- a/generated_api_shadow/envoy/type/v3/percent.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "PercentProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Percent] - -// Identifies a percentage, in the range [0.0, 100.0]. -message Percent { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.Percent"; - - double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; -} - -// A fractional percentage is used in cases in which for performance reasons performing floating -// point to integer conversions during randomness calculations is undesirable. The message includes -// both a numerator and denominator that together determine the final fractional value. -// -// * **Example**: 1/100 = 1%. -// * **Example**: 3/10000 = 0.03%. -message FractionalPercent { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.FractionalPercent"; - - // Fraction percentages support several fixed denominator values. - enum DenominatorType { - // 100. - // - // **Example**: 1/100 = 1%. - HUNDRED = 0; - - // 10,000. - // - // **Example**: 1/10000 = 0.01%. - TEN_THOUSAND = 1; - - // 1,000,000. - // - // **Example**: 1/1000000 = 0.0001%. - MILLION = 2; - } - - // Specifies the numerator. Defaults to 0. - uint32 numerator = 1; - - // Specifies the denominator. If the denominator specified is less than the numerator, the final - // fractional percentage is capped at 1 (100%). - DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/generated_api_shadow/envoy/type/v3/range.proto b/generated_api_shadow/envoy/type/v3/range.proto deleted file mode 100644 index de1d55b09a214..0000000000000 --- a/generated_api_shadow/envoy/type/v3/range.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "RangeProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Range] - -// Specifies the int64 start and end of the range using half-open interval semantics [start, -// end). -message Int64Range { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.Int64Range"; - - // start of the range (inclusive) - int64 start = 1; - - // end of the range (exclusive) - int64 end = 2; -} - -// Specifies the int32 start and end of the range using half-open interval semantics [start, -// end). -message Int32Range { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.Int32Range"; - - // start of the range (inclusive) - int32 start = 1; - - // end of the range (exclusive) - int32 end = 2; -} - -// Specifies the double start and end of the range using half-open interval semantics [start, -// end). -message DoubleRange { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.DoubleRange"; - - // start of the range (inclusive) - double start = 1; - - // end of the range (exclusive) - double end = 2; -} diff --git a/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto b/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto deleted file mode 100644 index a3fb27ff47ba0..0000000000000 --- a/generated_api_shadow/envoy/type/v3/ratelimit_unit.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "RatelimitUnitProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Ratelimit Time Unit] - -// Identifies the unit of of time for rate limit. -enum RateLimitUnit { - // The time unit is not known. - UNKNOWN = 0; - - // The time unit representing a second. - SECOND = 1; - - // The time unit representing a minute. - MINUTE = 2; - - // The time unit representing an hour. - HOUR = 3; - - // The time unit representing a day. - DAY = 4; -} diff --git a/generated_api_shadow/envoy/type/v3/semantic_version.proto b/generated_api_shadow/envoy/type/v3/semantic_version.proto deleted file mode 100644 index a4126336f03ae..0000000000000 --- a/generated_api_shadow/envoy/type/v3/semantic_version.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "SemanticVersionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Semantic Version] - -// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate -// expected behaviors and APIs, the patch version field is used only -// for security fixes and can be generally ignored. -message SemanticVersion { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.SemanticVersion"; - - uint32 major_number = 1; - - uint32 minor_number = 2; - - uint32 patch = 3; -} diff --git a/generated_api_shadow/envoy/type/v3/token_bucket.proto b/generated_api_shadow/envoy/type/v3/token_bucket.proto deleted file mode 100644 index a96d50fbd0abc..0000000000000 --- a/generated_api_shadow/envoy/type/v3/token_bucket.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package envoy.type.v3; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.type.v3"; -option java_outer_classname = "TokenBucketProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Token bucket] - -// Configures a token bucket, typically used for rate limiting. -message TokenBucket { - option (udpa.annotations.versioning).previous_message_type = "envoy.type.TokenBucket"; - - // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket - // initially contains. - uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}]; - - // The number of tokens added to the bucket during each fill interval. If not specified, defaults - // to a single token. - google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}]; - - // The fill interval that tokens are added to the bucket. During each fill interval - // `tokens_per_fill` are added to the bucket. The bucket will never contain more than - // `max_tokens` tokens. - google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = { - required: true - gt {} - }]; -} diff --git a/generated_api_shadow/envoy/watchdog/v3alpha/BUILD b/generated_api_shadow/envoy/watchdog/v3alpha/BUILD deleted file mode 100644 index ee92fb652582e..0000000000000 --- a/generated_api_shadow/envoy/watchdog/v3alpha/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. - -load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") - -licenses(["notice"]) # Apache 2 - -api_proto_package( - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto b/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto deleted file mode 100644 index d6f34aa892cdb..0000000000000 --- a/generated_api_shadow/envoy/watchdog/v3alpha/abort_action.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package envoy.watchdog.v3alpha; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.watchdog.v3alpha"; -option java_outer_classname = "AbortActionProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).work_in_progress = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Watchdog Action that kills a stuck thread to kill the process.] - -// A GuardDogAction that will terminate the process by killing the -// stuck thread. This would allow easier access to the call stack of the stuck -// thread since we would run signal handlers on that thread. By default -// this will be registered to run as the last watchdog action on KILL and -// MULTIKILL events if those are enabled. -message AbortActionConfig { - // How long to wait for the thread to respond to the thread kill function - // before killing the process from this action. This is a blocking action. - // By default this is 5 seconds. - google.protobuf.Duration wait_duration = 1; -} diff --git a/source/common/config/subscription_factory_impl.cc b/source/common/config/subscription_factory_impl.cc index 74f5fb439e3e1..ad28b13ce7e3a 100644 --- a/source/common/config/subscription_factory_impl.cc +++ b/source/common/config/subscription_factory_impl.cc @@ -44,7 +44,7 @@ SubscriptionPtr SubscriptionFactoryImpl::subscriptionFromConfigSource( Utility::checkTransportVersion(api_config_source); const auto transport_api_version = envoy::config::core::v3::ApiVersion::V3; switch (api_config_source.api_type()) { - case envoy::config::core::v3::ApiConfigSource::hidden_envoy_deprecated_UNSUPPORTED_REST_LEGACY: + case envoy::config::core::v3::ApiConfigSource::DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE: throw EnvoyException( "REST_LEGACY no longer a supported ApiConfigSource. " "Please specify an explicit supported api_type in the following config:\n" + diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index c3dd32519f74e..68ed6e9096035 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -1269,7 +1269,6 @@ VirtualHostImpl::VirtualHostImpl( new PathRouteEntryImpl(*this, route, optional_http_filters, factory_context, validator)); break; } - case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex: case envoy::config::route::v3::RouteMatch::PathSpecifierCase::kSafeRegex: { routes_.emplace_back( new RegexRouteEntryImpl(*this, route, optional_http_filters, factory_context, validator)); diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index 5cb4675975b97..b22fa5e956e5f 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -37,51 +37,8 @@ const std::string& PerConnectionCluster::key() { CONSTRUCT_ON_FIRST_USE(std::string, "envoy.tcp_proxy.cluster"); } -Config::RouteImpl::RouteImpl( - const Config& parent, - const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute& - config) - : parent_(parent) { - cluster_name_ = config.cluster(); - - source_ips_ = Network::Address::IpList(config.source_ip_list()); - destination_ips_ = Network::Address::IpList(config.destination_ip_list()); - - if (!config.source_ports().empty()) { - Network::Utility::parsePortRangeList(config.source_ports(), source_port_ranges_); - } - - if (!config.destination_ports().empty()) { - Network::Utility::parsePortRangeList(config.destination_ports(), destination_port_ranges_); - } -} - -bool Config::RouteImpl::matches(Network::Connection& connection) const { - if (!source_port_ranges_.empty() && - !Network::Utility::portInRangeList(*connection.connectionInfoProvider().remoteAddress(), - source_port_ranges_)) { - return false; - } - - if (!source_ips_.empty() && - !source_ips_.contains(*connection.connectionInfoProvider().remoteAddress())) { - return false; - } - - if (!destination_port_ranges_.empty() && - !Network::Utility::portInRangeList(*connection.connectionInfoProvider().localAddress(), - destination_port_ranges_)) { - return false; - } - - if (!destination_ips_.empty() && - !destination_ips_.contains(*connection.connectionInfoProvider().localAddress())) { - return false; - } - - // if we made it past all checks, the route matches - return true; -} +Config::SimpleRouteImpl::SimpleRouteImpl(const Config& parent, absl::string_view cluster_name) + : parent_(parent), cluster_name_(cluster_name) {} Config::WeightedClusterEntry::WeightedClusterEntry( const Config& parent, const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy:: @@ -139,10 +96,7 @@ Config::Config(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProx }); if (!config.cluster().empty()) { - envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute - default_route; - default_route.set_cluster(config.cluster()); - routes_.emplace_back(std::make_shared(*this, default_route)); + default_route_ = std::make_shared(*this, config.cluster()); } if (config.has_metadata_match()) { @@ -156,9 +110,8 @@ Config::Config(const envoy::extensions::filters::network::tcp_proxy::v3::TcpProx } } - // Weighted clusters will be enabled only if both the default cluster and - // deprecated v1 routes are absent. - if (routes_.empty() && config.has_weighted_clusters()) { + // Weighted clusters will be enabled only if the default cluster is absent. + if (default_route_ == nullptr && config.has_weighted_clusters()) { total_cluster_weight_ = 0; for (const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::WeightedCluster:: ClusterWeight& cluster_desc : config.weighted_clusters().clusters()) { @@ -186,16 +139,11 @@ RouteConstSharedPtr Config::getRegularRouteFromEntries(Network::Connection& conn connection.streamInfo().filterState()->getDataReadOnly( PerConnectionCluster::key()); - envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute - per_connection_route; - per_connection_route.set_cluster(per_connection_cluster.value()); - return std::make_shared(*this, per_connection_route); + return std::make_shared(*this, per_connection_cluster.value()); } - for (const RouteConstSharedPtr& route : routes_) { - if (route->matches(connection)) { - return route; - } + if (default_route_ != nullptr) { + return default_route_; } // no match, no more routes to try diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index b157db0e1b845..3b64e58f5add5 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -161,24 +161,17 @@ class Config { const Network::HashPolicy* hashPolicy() { return hash_policy_.get(); } private: - struct RouteImpl : public Route { - RouteImpl( - const Config& parent, - const envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy::DeprecatedV1::TCPRoute& - config); + struct SimpleRouteImpl : public Route { + SimpleRouteImpl(const Config& parent, absl::string_view cluster_name); // Route - bool matches(Network::Connection& connection) const override; + bool matches(Network::Connection&) const override { return true; } const std::string& clusterName() const override { return cluster_name_; } const Router::MetadataMatchCriteria* metadataMatchCriteria() const override { return parent_.metadataMatchCriteria(); } const Config& parent_; - Network::Address::IpList source_ips_; - Network::PortRangeList source_port_ranges_; - Network::Address::IpList destination_ips_; - Network::PortRangeList destination_port_ranges_; std::string cluster_name_; }; @@ -208,7 +201,7 @@ class Config { }; using WeightedClusterEntryConstSharedPtr = std::shared_ptr; - std::vector routes_; + RouteConstSharedPtr default_route_; std::vector weighted_clusters_; uint64_t total_cluster_weight_; std::vector access_logs_; diff --git a/source/extensions/filters/http/jwt_authn/matcher.cc b/source/extensions/filters/http/jwt_authn/matcher.cc index 7eb1cfcab9d85..3439396a0fbed 100644 --- a/source/extensions/filters/http/jwt_authn/matcher.cc +++ b/source/extensions/filters/http/jwt_authn/matcher.cc @@ -159,7 +159,6 @@ MatcherConstPtr Matcher::create(const RequirementRule& rule) { return std::make_unique(rule); case RouteMatch::PathSpecifierCase::kPath: return std::make_unique(rule); - case RouteMatch::PathSpecifierCase::kHiddenEnvoyDeprecatedRegex: case RouteMatch::PathSpecifierCase::kSafeRegex: return std::make_unique(rule); case RouteMatch::PathSpecifierCase::kConnectMatcher: diff --git a/source/extensions/tracers/zipkin/span_buffer.cc b/source/extensions/tracers/zipkin/span_buffer.cc index 603a9129c0def..5c07f76abe397 100644 --- a/source/extensions/tracers/zipkin/span_buffer.cc +++ b/source/extensions/tracers/zipkin/span_buffer.cc @@ -48,7 +48,7 @@ SerializerPtr SpanBuffer::makeSerializer( const envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion& version, const bool shared_span_context) { switch (version) { - case envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1: + case envoy::config::trace::v3::ZipkinConfig::DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE: throw EnvoyException( "hidden_envoy_deprecated_HTTP_JSON_V1 has been deprecated. Please use a non-default " "envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion value."); diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index 6a0039c7f189e..57eba26cabcbb 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -83,7 +83,9 @@ void ValidationInstance::initialize(const Options& options, messageValidationContext().staticValidationVisitor(), *api_); Config::Utility::createTagProducer(bootstrap_); - bootstrap_.mutable_node()->set_hidden_envoy_deprecated_build_version(VersionInfo::version()); + if (!bootstrap_.node().user_agent_build_version().has_version()) { + *bootstrap_.mutable_node()->mutable_user_agent_build_version() = VersionInfo::buildVersion(); + } local_info_ = std::make_unique( stats().symbolTable(), bootstrap_.node(), bootstrap_.node_context_params(), local_address, diff --git a/test/common/config/BUILD b/test/common/config/BUILD index 98e4105927875..8886ac4a40399 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -10,12 +10,6 @@ licenses(["notice"]) # Apache 2 envoy_package() -envoy_cc_test( - name = "api_shadow_test", - srcs = ["api_shadow_test.cc"], - deps = ["@envoy_api//envoy/config/cluster/v3:pkg_cc_proto"], -) - envoy_cc_test( name = "decoded_resource_impl_test", srcs = ["decoded_resource_impl_test.cc"], diff --git a/test/common/config/api_shadow_test.cc b/test/common/config/api_shadow_test.cc deleted file mode 100644 index 2f4936ad5f813..0000000000000 --- a/test/common/config/api_shadow_test.cc +++ /dev/null @@ -1,20 +0,0 @@ -#include "envoy/config/cluster/v3/cluster.pb.h" - -#include "gtest/gtest.h" - -namespace Envoy { -namespace Config { -namespace { - -// Validate that deprecated fields are accessible via the shadow protos. -TEST(ApiShadowTest, All) { - envoy::config::cluster::v3::Cluster cluster; - - cluster.mutable_hidden_envoy_deprecated_tls_context(); - cluster.set_lb_policy( - envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB); -} - -} // namespace -} // namespace Config -} // namespace Envoy diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 86f133ecef107..06b776d8d03fb 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -524,10 +524,7 @@ class ClusterManagerSubsetInitializationTest for (int i = first; i <= last; i++) { if (envoy::config::cluster::v3::Cluster::LbPolicy_IsValid(i)) { auto policy = static_cast(i); - if (policy != - envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB) { - policies.push_back(policy); - } + policies.push_back(policy); } } return policies; diff --git a/test/extensions/tracers/zipkin/span_buffer_test.cc b/test/extensions/tracers/zipkin/span_buffer_test.cc index 92c8a7960b864..ed1d717edeeff 100644 --- a/test/extensions/tracers/zipkin/span_buffer_test.cc +++ b/test/extensions/tracers/zipkin/span_buffer_test.cc @@ -464,7 +464,7 @@ TEST(ZipkinSpanBufferTest, TestSerializeTimestampInTheFuture) { TEST(ZipkinSpanBufferTest, TestDeprecationOfHttpJsonV1) { EXPECT_THROW_WITH_MESSAGE( SpanBuffer buffer1( - envoy::config::trace::v3::ZipkinConfig::hidden_envoy_deprecated_HTTP_JSON_V1, false), + envoy::config::trace::v3::ZipkinConfig::DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE, false), Envoy::EnvoyException, "hidden_envoy_deprecated_HTTP_JSON_V1 has been deprecated. Please use a non-default " "envoy::config::trace::v3::ZipkinConfig::CollectorEndpointVersion value."); diff --git a/test/integration/http_subset_lb_integration_test.cc b/test/integration/http_subset_lb_integration_test.cc index d32ec0b1a94f7..2158305e8b6f2 100644 --- a/test/integration/http_subset_lb_integration_test.cc +++ b/test/integration/http_subset_lb_integration_test.cc @@ -30,8 +30,7 @@ class HttpSubsetLbIntegrationTest auto policy = static_cast(i); - if (policy == envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB || - policy == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED || + if (policy == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED || policy == envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG) { continue; } diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index 07c5659aece2a..758623e928eee 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -292,8 +292,8 @@ TEST_F(OptionsImplTest, OptionsAreInSyncWithProto) { // 5. hot restart version - print the hot restart version and exit. const uint32_t options_not_in_proto = 5; - // There are two deprecated options: "max_stats" and "max_obj_name_len". - const uint32_t deprecated_options = 2; + // There are no deprecated options currently, add here as needed. + const uint32_t deprecated_options = 0; EXPECT_EQ(options->count() - options_not_in_proto, command_line_options->GetDescriptor()->field_count() - deprecated_options); diff --git a/test/server/server_fuzz_test.cc b/test/server/server_fuzz_test.cc index 7795619951f21..11127df57c2d8 100644 --- a/test/server/server_fuzz_test.cc +++ b/test/server/server_fuzz_test.cc @@ -46,9 +46,6 @@ makeHermeticPathsAndPorts(Fuzz::PerTestEnvironment& test_env, // The header_prefix is a write-once then read-only singleton that persists across tests. We clear // this field so that fuzz tests don't fail over multiple iterations. output.clear_header_prefix(); - if (output.has_hidden_envoy_deprecated_runtime()) { - output.mutable_hidden_envoy_deprecated_runtime()->set_symlink_root(test_env.temporaryPath("")); - } for (auto& listener : *output.mutable_static_resources()->mutable_listeners()) { if (listener.has_address()) { makePortHermetic(test_env, *listener.mutable_address()); diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 9a6535515fcab..eb5f1853e3736 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -30,8 +30,6 @@ const std::string toString(envoy::type::matcher::v3::StringMatcher::MatchPattern return "suffix"; case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kSafeRegex: return "safe_regex"; - case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex: - return "deprecated_regex"; case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kContains: return "contains"; case envoy::type::matcher::v3::StringMatcher::MatchPatternCase::MATCH_PATTERN_NOT_SET: @@ -45,10 +43,6 @@ const std::string toString(const envoy::config::route::v3::HeaderMatcher& header case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kExactMatch: return "exact_match"; break; - case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase:: - kHiddenEnvoyDeprecatedRegexMatch: - return "regex_match"; - break; case envoy::config::route::v3::HeaderMatcher::HeaderMatchSpecifierCase::kSafeRegexMatch: return "safe_regex_match"; break; diff --git a/test/tools/type_whisperer/api_type_db_test.cc b/test/tools/type_whisperer/api_type_db_test.cc index 9857a40311845..07ffbc0da33da 100644 --- a/test/tools/type_whisperer/api_type_db_test.cc +++ b/test/tools/type_whisperer/api_type_db_test.cc @@ -6,6 +6,8 @@ namespace Tools { namespace TypeWhisperer { namespace { +// TODO(htuch): removal API type DB. + // Validate that ApiTypeDb::getLatestTypeInformation returns nullopt when no // type information exists. TEST(ApiTypeDb, GetLatestTypeInformationForTypeUnknown) { @@ -13,14 +15,6 @@ TEST(ApiTypeDb, GetLatestTypeInformationForTypeUnknown) { EXPECT_EQ(absl::nullopt, unknown_type_information); } -// Validate that ApiTypeDb::getLatestTypeInformation fetches the latest type -// information when an upgrade occurs. -TEST(ApiTypeDb, GetLatestTypeInformationForTypeKnownUpgraded) { - const auto known_type_information = ApiTypeDb::getLatestTypeInformation("envoy.type.Int64Range"); - EXPECT_EQ("envoy.type.v3.Int64Range", known_type_information->type_name_); - EXPECT_EQ("envoy/type/v3/range.proto", known_type_information->proto_path_); -} - // Validate that ApiTypeDb::getLatestTypeInformation is idempotent when no // upgrade occurs. TEST(ApiTypeDb, GetLatestTypeInformationForTypeKnownNoUpgrade) { diff --git a/tools/api_boost/README.md b/tools/api_boost/README.md deleted file mode 100644 index 6a67e445c40b6..0000000000000 --- a/tools/api_boost/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Envoy API upgrades - -This directory contains tooling to support the [Envoy API versioning -guidelines](api/API_VERSIONING.md). Envoy internally tracks the latest API -version for any given package. Since each package may have a different API -version, and we have have > 15k of API protos, we require machine assistance to -scale the upgrade process. - -We refer to the process of upgrading Envoy to the latest version of the API as -*API boosting*. This is a manual process, where a developer wanting to bump -major version at the API clock invokes: - -```console -/tools/api_boost/api_boost.py --build_api_booster --generate_compilation_database -``` - -followed by `fix_format`. The full process is still WiP, but we expect that -there will be some manual fixup required of test cases (e.g. YAML fragments) as -well. - -You will need to configure `LLVM_CONFIG` as per the [Clang Libtooling setup -guide](tools/clang_tools/README.md). - -## Status - -The API boosting tooling is still WiP. It is slated to land in the v3 release -(EOY 2019), at which point it should be considered ready for general consumption -by experienced developers who work on Envoy APIs. diff --git a/tools/api_boost/api_boost.py b/tools/api_boost/api_boost.py deleted file mode 100755 index abee6f6e0e93a..0000000000000 --- a/tools/api_boost/api_boost.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env python3 - -# Tool that assists in upgrading the Envoy source tree to the latest API. -# Internally, Envoy uses the latest vN or vNalpha for a given package. Envoy -# will perform a reflection based version upgrade on any older protos that are -# presented to it in configuration at ingestion time. -# -# Usage (from a clean tree): -# -# api_boost.py --generate_compilation_database --build_api_booster - -import argparse -import functools -import json -import os -import multiprocessing as mp -import pathlib -import re -import shlex -import subprocess as sp - -# Detect API #includes. -API_INCLUDE_REGEX = re.compile('#include "(envoy/.*)/[^/]+\.pb\.(validate\.)?h"') - -# Needed for CI to pass down bazel options. -BAZEL_BUILD_OPTIONS = shlex.split(os.environ.get('BAZEL_BUILD_OPTIONS', '')) - - -# Obtain the directory containing a path prefix, e.g. ./foo/bar.txt is ./foo, -# ./foo/ba is ./foo, ./foo/bar/ is ./foo/bar. -def prefix_directory(path_prefix): - return path_prefix if os.path.isdir(path_prefix) else os.path.dirname(path_prefix) - - -# Update a C++ file to the latest API. -def api_boost_file(llvm_include_path, debug_log, path): - print('Processing %s' % path) - if 'API_NO_BOOST_FILE' in pathlib.Path(path).read_text(): - if debug_log: - print('Not boosting %s due to API_NO_BOOST_FILE\n' % path) - return None - # Run the booster - try: - result = sp.run([ - './bazel-bin/external/envoy_dev/clang_tools/api_booster/api_booster', - '--extra-arg-before=-xc++', - '--extra-arg=-isystem%s' % llvm_include_path, '--extra-arg=-Wno-undefined-internal', - '--extra-arg=-Wno-old-style-cast', path - ], - capture_output=True, - check=True) - except sp.CalledProcessError as e: - print('api_booster failure for %s: %s %s' % (path, e, e.stderr.decode('utf-8'))) - raise - if debug_log: - print(result.stderr.decode('utf-8')) - - # Consume stdout containing the list of inferred API headers. - return sorted(set(result.stdout.decode('utf-8').splitlines())) - - -# Rewrite API includes to the inferred headers. Currently this is handled -# outside of the clang-ast-replacements. In theory we could either integrate -# with this or with clang-include-fixer, but it's pretty simply to handle as done -# below, we have more control over special casing as well, so ¯\_(ツ)_/¯. -def rewrite_includes(args): - path, api_includes = args - # Files with API_NO_BOOST_FILE will have None returned by api_boost_file. - if api_includes is None: - return - # We just dump the inferred API header includes at the start of the #includes - # in the file and remove all the present API header includes. This does not - # match Envoy style; we rely on later invocations of fix_format.sh to take - # care of this alignment. - output_lines = [] - include_lines = ['#include "%s"' % f for f in api_includes] - input_text = pathlib.Path(path).read_text() - for line in input_text.splitlines(): - if include_lines and line.startswith('#include'): - output_lines.extend(include_lines) - include_lines = None - # Exclude API includes, except for a special case related to v2alpha - # ext_authz; this is needed to include the service descriptor in the build - # and is a hack that will go away when we remove v2. - if re.match(API_INCLUDE_REGEX, line) and 'envoy/service/auth/v2alpha' not in line: - continue - output_lines.append(line) - # Rewrite file. - pathlib.Path(path).write_text('\n'.join(output_lines) + '\n') - - -# Update the Envoy source tree the latest API. -def api_boost_tree( - target_paths, - generate_compilation_database=False, - build_api_booster=False, - debug_log=False, - sequential=False): - dep_build_targets = ['//%s/...' % prefix_directory(prefix) for prefix in target_paths] - - # Optional setup of state. We need the compilation database and api_booster - # tool in place before we can start boosting. - if generate_compilation_database: - print('Building compilation database for %s' % dep_build_targets) - sp.run(['./tools/gen_compilation_database.py', '--include_headers'] + dep_build_targets, - check=True) - - if build_api_booster: - # Similar to gen_compilation_database.py, we only need the cc_library for - # setup. The long term fix for this is in - # https://github.com/bazelbuild/bazel/issues/9578. - # - # Figure out some cc_libraries that cover most of our external deps. This is - # the same logic as in gen_compilation_database.py. - query = 'kind(cc_library, {})'.format(' union '.join(dep_build_targets)) - dep_lib_build_targets = sp.check_output(['bazel', 'query', query]).decode().splitlines() - # We also need some misc. stuff such as test binaries for setup of benchmark - # dep. - query = 'attr("tags", "compilation_db_dep", {})'.format(' union '.join(dep_build_targets)) - dep_lib_build_targets.extend( - sp.check_output(['bazel', 'query', query]).decode().splitlines()) - extra_api_booster_args = [] - if debug_log: - extra_api_booster_args.append('--copt=-DENABLE_DEBUG_LOG') - - # Slightly easier to debug when we build api_booster on its own. - sp.run([ - 'bazel', - 'build', - '--strip=always', - '@envoy_dev//clang_tools/api_booster', - ] + BAZEL_BUILD_OPTIONS + extra_api_booster_args, - check=True) - sp.run([ - 'bazel', - 'build', - '--strip=always', - ] + BAZEL_BUILD_OPTIONS + dep_lib_build_targets, - check=True) - - # Figure out where the LLVM include path is. We need to provide this - # explicitly as the api_booster is built inside the Bazel cache and doesn't - # know about this path. - # TODO(htuch): this is fragile and depends on Clang version, should figure out - # a cleaner approach. - llvm_include_path = os.path.join( - sp.check_output([os.getenv('LLVM_CONFIG'), '--libdir']).decode().rstrip(), - 'clang/11.0.1/include') - - # Determine the files in the target dirs eligible for API boosting, based on - # known files in the compilation database. - file_paths = set([]) - for entry in json.loads(pathlib.Path('compile_commands.json').read_text()): - file_path = entry['file'] - if any(file_path.startswith(prefix) for prefix in target_paths): - file_paths.add(file_path) - # Ensure a determinstic ordering if we are going to process sequentially. - if sequential: - file_paths = sorted(file_paths) - - # The API boosting is file local, so this is trivially parallelizable, use - # multiprocessing pool with default worker pool sized to cpu_count(), since - # this is CPU bound. - try: - with mp.Pool(processes=1 if sequential else None) as p: - # We need multiple phases, to ensure that any dependency on files being modified - # in one thread on consumed transitive headers on the other thread isn't an - # issue. This also ensures that we complete all analysis error free before - # any mutation takes place. - # TODO(htuch): we should move to run-clang-tidy.py once the headers fixups - # are Clang-based. - api_includes = p.map( - functools.partial(api_boost_file, llvm_include_path, debug_log), file_paths) - # Apply Clang replacements before header fixups, since the replacements - # are all relative to the original file. - for prefix_dir in set(map(prefix_directory, target_paths)): - sp.run(['clang-apply-replacements', prefix_dir], check=True) - # Fixup headers. - p.map(rewrite_includes, zip(file_paths, api_includes)) - finally: - # Cleanup any stray **/*.clang-replacements.yaml. - for prefix in target_paths: - clang_replacements = pathlib.Path( - prefix_directory(prefix)).glob('**/*.clang-replacements.yaml') - for path in clang_replacements: - path.unlink() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Update Envoy tree to the latest API') - parser.add_argument('--generate_compilation_database', action='store_true') - parser.add_argument('--build_api_booster', action='store_true') - parser.add_argument('--debug_log', action='store_true') - parser.add_argument('--sequential', action='store_true') - parser.add_argument('paths', nargs='*', default=['source', 'test', 'include']) - args = parser.parse_args() - api_boost_tree( - args.paths, - generate_compilation_database=args.generate_compilation_database, - build_api_booster=args.build_api_booster, - debug_log=args.debug_log, - sequential=args.sequential) diff --git a/tools/api_boost/api_boost_test.py b/tools/api_boost/api_boost_test.py deleted file mode 100755 index a6384a7bff1db..0000000000000 --- a/tools/api_boost/api_boost_test.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python3 - -# Golden C++ source tests for API boosting. This is effectively a test for the -# combination of api_boost.py, the Clang libtooling-based -# tools/clang_tools/api_booster, as well as the type whisperer and API type -# database. - -import argparse -from collections import namedtuple -import logging -import os -import pathlib -import shutil -import subprocess -import sys -import tempfile - -import api_boost - -TestCase = namedtuple('TestCase', ['name', 'description']) - -# List of test in the form [(file_name, explanation)] -TESTS = list( - map( - lambda x: TestCase(*x), [ - ('deprecate', 'Deprecations'), - ('elaborated_type', 'ElaboratedTypeLoc type upgrades'), - ('using_decl', 'UsingDecl upgrades for named types'), - ('rename', 'Annotation-based renaming'), - ('decl_ref_expr', 'DeclRefExpr upgrades for named constants'), - ('no_boost_file', 'API_NO_BOOST_FILE annotations'), - ('validate', 'Validation proto header inference'), - ])) - -TESTDATA_PATH = 'tools/api_boost/testdata' - - -def diff(some_path, other_path): - result = subprocess.run(['diff', '-u', some_path, other_path], capture_output=True) - if result.returncode == 0: - return None - return result.stdout.decode('utf-8') + result.stderr.decode('utf-8') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Golden C++ source tests for api_boost.py') - parser.add_argument('tests', nargs='*') - args = parser.parse_args() - - # Accumulated error messages. - logging.basicConfig(format='%(message)s') - messages = [] - - def should_run_test(test_name): - return len(args.tests) == 0 or test_name in args.tests - - # Run API booster against test artifacts in a directory relative to workspace. - # We use a temporary copy as the API booster does in-place rewriting. - with tempfile.TemporaryDirectory(dir=pathlib.Path.cwd()) as path: - # Setup temporary tree. - shutil.copy(os.path.join(TESTDATA_PATH, 'BUILD'), path) - for test in TESTS: - if should_run_test(test.name): - shutil.copy(os.path.join(TESTDATA_PATH, test.name + '.cc'), path) - else: - # Place an empty file to make Bazel happy. - pathlib.Path(path, test.name + '.cc').write_text('') - - # Run API booster. - relpath_to_testdata = str(pathlib.Path(path).relative_to(pathlib.Path.cwd())) - api_boost.api_boost_tree([ - os.path.join(relpath_to_testdata, test.name) - for test in TESTS - if should_run_test(test.name) - ], - generate_compilation_database=True, - build_api_booster=True, - debug_log=True, - sequential=True) - - # Validate output against golden files. - for test in TESTS: - if should_run_test(test.name): - delta = diff( - os.path.join(TESTDATA_PATH, test.name + '.cc.gold'), - os.path.join(path, test.name + '.cc')) - if delta is not None: - messages.append( - 'Non-empty diff for %s (%s):\n%s\n' % (test.name, test.description, delta)) - - if len(messages) > 0: - logging.error('FAILED:\n{}'.format('\n'.join(messages))) - sys.exit(1) - logging.warning('PASS') diff --git a/tools/api_boost/testdata/BUILD b/tools/api_boost/testdata/BUILD deleted file mode 100644 index f3e1298ff05b8..0000000000000 --- a/tools/api_boost/testdata/BUILD +++ /dev/null @@ -1,66 +0,0 @@ -load( - "//bazel:envoy_build_system.bzl", - "envoy_cc_library", - "envoy_package", -) - -licenses(["notice"]) # Apache 2 - -envoy_package() - -envoy_cc_library( - name = "decl_ref_expr", - srcs = ["decl_ref_expr.cc"], - deps = [ - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/api/v2/route:pkg_cc_proto", - "@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "deprecate", - srcs = ["deprecate.cc"], - deps = [ - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/api/v2/route:pkg_cc_proto", - "@envoy_api//envoy/type/matcher:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "elaborated_type", - srcs = ["elaborated_type.cc"], - deps = [ - "@envoy_api//envoy/api/v2:pkg_cc_proto", - "@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "rename", - srcs = ["rename.cc"], - deps = ["@envoy_api//envoy/api/v2/route:pkg_cc_proto"], -) - -envoy_cc_library( - name = "no_boost_file", - srcs = ["no_boost_file.cc"], - deps = ["@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto"], -) - -envoy_cc_library( - name = "using_decl", - srcs = ["using_decl.cc"], - deps = ["@envoy_api//envoy/config/overload/v2alpha:pkg_cc_proto"], -) - -envoy_cc_library( - name = "validate", - srcs = ["validate.cc"], - deps = [ - "//envoy/protobuf:message_validator_interface", - "//source/common/protobuf:utility_lib", - "@envoy_api//envoy/api/v2:pkg_cc_proto", - ], -) diff --git a/tools/api_boost/testdata/decl_ref_expr.cc b/tools/api_boost/testdata/decl_ref_expr.cc deleted file mode 100644 index 9b644d08abea0..0000000000000 --- a/tools/api_boost/testdata/decl_ref_expr.cc +++ /dev/null @@ -1,44 +0,0 @@ -#include "envoy/api/v2/cds.pb.h" -#include "envoy/api/v2/route/route.pb.h" -#include "envoy/config/overload/v2alpha/overload.pb.h" - -#define API_NO_BOOST(x) x -#define BAR(x) x -#define ASSERT(x) static_cast(x) - -using envoy::config::overload::v2alpha::Trigger; - -using envoy::api::v2::Cluster; -using MutableStringClusterAccessor = std::string* (Cluster::*)(); - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const envoy::config::overload::v2alpha::Trigger& config) { - switch (config.trigger_oneof_case()) { - case envoy::config::overload::v2alpha::Trigger::kThreshold: - break; - default: - break; - } - switch (config.trigger_oneof_case()) { - case Trigger::kThreshold: - break; - default: - break; - } - API_NO_BOOST(envoy::api::v2::route::RouteAction) route_action; - route_action.host_rewrite(); - API_NO_BOOST(envoy::config::overload::v2alpha::Trigger) foo; - BAR(API_NO_BOOST(envoy::config::overload::v2alpha::Trigger)) bar; - BAR(envoy::config::overload::v2alpha::Trigger) baz; - envoy::config::overload::v2alpha::ThresholdTrigger::default_instance(); - ASSERT(envoy::config::overload::v2alpha::Trigger::kThreshold == Trigger::kThreshold); - ASSERT(Foo::kThreshold == Trigger::kThreshold); - envoy::api::v2::Cluster::LbPolicy_Name(0); - static_cast(envoy::api::v2::Cluster::MAGLEV); - MutableStringClusterAccessor foo2 = &envoy::api::v2::Cluster::mutable_name; - static_cast(foo2); - } - - using Foo = envoy::config::overload::v2alpha::Trigger; -}; diff --git a/tools/api_boost/testdata/decl_ref_expr.cc.gold b/tools/api_boost/testdata/decl_ref_expr.cc.gold deleted file mode 100644 index d7a337fc38b5b..0000000000000 --- a/tools/api_boost/testdata/decl_ref_expr.cc.gold +++ /dev/null @@ -1,45 +0,0 @@ -#include "envoy/api/v2/route/route_components.pb.h" -#include "envoy/config/cluster/v4alpha/cluster.pb.h" -#include "envoy/config/overload/v2alpha/overload.pb.h" -#include "envoy/config/overload/v3/overload.pb.h" - -#define API_NO_BOOST(x) x -#define BAR(x) x -#define ASSERT(x) static_cast(x) - -using envoy::config::overload::v3::Trigger; - -using envoy::config::cluster::v4alpha::Cluster; -using MutableStringClusterAccessor = std::string* (Cluster::*)(); - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const envoy::config::overload::v3::Trigger& config) { - switch (config.trigger_oneof_case()) { - case envoy::config::overload::v3::Trigger::TriggerOneofCase::kThreshold: - break; - default: - break; - } - switch (config.trigger_oneof_case()) { - case Trigger::kThreshold: - break; - default: - break; - } - API_NO_BOOST(envoy::api::v2::route::RouteAction) route_action; - route_action.host_rewrite(); - API_NO_BOOST(envoy::config::overload::v2alpha::Trigger) foo; - BAR(API_NO_BOOST(envoy::config::overload::v2alpha::Trigger)) bar; - BAR(envoy::config::overload::v3::Trigger) baz; - envoy::config::overload::v3::ThresholdTrigger::default_instance(); - ASSERT(envoy::config::overload::v3::Trigger::TriggerOneofCase::kThreshold == Trigger::kThreshold); - ASSERT(Foo::kThreshold == Trigger::kThreshold); - envoy::config::cluster::v4alpha::Cluster::LbPolicy_Name(0); - static_cast(envoy::config::cluster::v4alpha::Cluster::MAGLEV); - MutableStringClusterAccessor foo2 = &envoy::config::cluster::v4alpha::Cluster::mutable_name; - static_cast(foo2); - } - - using Foo = envoy::config::overload::v3::Trigger; -}; diff --git a/tools/api_boost/testdata/deprecate.cc b/tools/api_boost/testdata/deprecate.cc deleted file mode 100644 index d0a3f58b41b32..0000000000000 --- a/tools/api_boost/testdata/deprecate.cc +++ /dev/null @@ -1,11 +0,0 @@ -#include "envoy/api/v2/cds.pb.h" -#include "envoy/api/v2/route/route.pb.h" -#include "envoy/type/matcher/string.pb.h" - -void test() { - envoy::api::v2::route::VirtualHost vhost; - vhost.per_filter_config(); - vhost.mutable_per_filter_config(); - static_cast(envoy::type::matcher::StringMatcher::kRegex); - static_cast(envoy::api::v2::Cluster::ORIGINAL_DST_LB); -} diff --git a/tools/api_boost/testdata/deprecate.cc.gold b/tools/api_boost/testdata/deprecate.cc.gold deleted file mode 100644 index 0158efa26d9a0..0000000000000 --- a/tools/api_boost/testdata/deprecate.cc.gold +++ /dev/null @@ -1,11 +0,0 @@ -#include "envoy/config/cluster/v4alpha/cluster.pb.h" -#include "envoy/config/route/v4alpha/route_components.pb.h" -#include "envoy/type/matcher/v4alpha/string.pb.h" - -void test() { - envoy::config::route::v4alpha::VirtualHost vhost; - vhost.hidden_envoy_deprecated_per_filter_config(); - vhost.mutable_hidden_envoy_deprecated_per_filter_config(); - static_cast(envoy::type::matcher::v4alpha::StringMatcher::MatchPatternCase::kHiddenEnvoyDeprecatedRegex); - static_cast(envoy::config::cluster::v4alpha::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB); -} diff --git a/tools/api_boost/testdata/elaborated_type.cc b/tools/api_boost/testdata/elaborated_type.cc deleted file mode 100644 index 6a30d1e3330e2..0000000000000 --- a/tools/api_boost/testdata/elaborated_type.cc +++ /dev/null @@ -1,10 +0,0 @@ -#include "envoy/api/v2/cds.pb.h" -#include "envoy/config/overload/v2alpha/overload.pb.h" - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const envoy::config::overload::v2alpha::ThresholdTrigger& /*config*/) {} - void someMethod(envoy::api::v2::Cluster_LbPolicy) {} - - const envoy::config::overload::v2alpha::Trigger::TriggerOneofCase case_{}; -}; diff --git a/tools/api_boost/testdata/elaborated_type.cc.gold b/tools/api_boost/testdata/elaborated_type.cc.gold deleted file mode 100644 index 442426177598e..0000000000000 --- a/tools/api_boost/testdata/elaborated_type.cc.gold +++ /dev/null @@ -1,10 +0,0 @@ -#include "envoy/config/cluster/v4alpha/cluster.pb.h" -#include "envoy/config/overload/v3/overload.pb.h" - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const envoy::config::overload::v3::ThresholdTrigger& /*config*/) {} - void someMethod(envoy::config::cluster::v4alpha::Cluster::LbPolicy) {} - - const envoy::config::overload::v3::Trigger::TriggerOneofCase case_{}; -}; diff --git a/tools/api_boost/testdata/no_boost_file.cc b/tools/api_boost/testdata/no_boost_file.cc deleted file mode 100644 index 82d11a26410b0..0000000000000 --- a/tools/api_boost/testdata/no_boost_file.cc +++ /dev/null @@ -1,12 +0,0 @@ -#include "envoy/config/overload/v2alpha/overload.pb.h" - -// API_NO_BOOST_FILE - -using envoy::config::overload::v2alpha::ThresholdTrigger; -using SomePtrAlias = std::unique_ptr; - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {} - ThresholdTriggerImpl(SomePtrAlias /*config*/) {} -}; diff --git a/tools/api_boost/testdata/no_boost_file.cc.gold b/tools/api_boost/testdata/no_boost_file.cc.gold deleted file mode 100644 index 82d11a26410b0..0000000000000 --- a/tools/api_boost/testdata/no_boost_file.cc.gold +++ /dev/null @@ -1,12 +0,0 @@ -#include "envoy/config/overload/v2alpha/overload.pb.h" - -// API_NO_BOOST_FILE - -using envoy::config::overload::v2alpha::ThresholdTrigger; -using SomePtrAlias = std::unique_ptr; - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {} - ThresholdTriggerImpl(SomePtrAlias /*config*/) {} -}; diff --git a/tools/api_boost/testdata/rename.cc b/tools/api_boost/testdata/rename.cc deleted file mode 100644 index 96e56b5f0b040..0000000000000 --- a/tools/api_boost/testdata/rename.cc +++ /dev/null @@ -1,7 +0,0 @@ -#include "envoy/api/v2/route/route.pb.h" - -void test() { - envoy::api::v2::route::RouteAction route_action; - route_action.host_rewrite(); - route_action.set_host_rewrite("blah"); -} diff --git a/tools/api_boost/testdata/rename.cc.gold b/tools/api_boost/testdata/rename.cc.gold deleted file mode 100644 index 124a528b05fdc..0000000000000 --- a/tools/api_boost/testdata/rename.cc.gold +++ /dev/null @@ -1,7 +0,0 @@ -#include "envoy/config/route/v4alpha/route_components.pb.h" - -void test() { - envoy::config::route::v4alpha::RouteAction route_action; - route_action.host_rewrite_literal(); - route_action.set_host_rewrite_literal("blah"); -} diff --git a/tools/api_boost/testdata/using_decl.cc b/tools/api_boost/testdata/using_decl.cc deleted file mode 100644 index 88b3f2ef44035..0000000000000 --- a/tools/api_boost/testdata/using_decl.cc +++ /dev/null @@ -1,11 +0,0 @@ -#include "envoy/config/overload/v2alpha/overload.pb.h" - -using envoy::config::overload::v2alpha::ThresholdTrigger; -using ::envoy::config::overload::v2alpha::Trigger; -using SomePtrAlias = std::unique_ptr; - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {} - ThresholdTriggerImpl(SomePtrAlias /*config*/) {} -}; diff --git a/tools/api_boost/testdata/using_decl.cc.gold b/tools/api_boost/testdata/using_decl.cc.gold deleted file mode 100644 index 879485050a2d3..0000000000000 --- a/tools/api_boost/testdata/using_decl.cc.gold +++ /dev/null @@ -1,11 +0,0 @@ -#include "envoy/config/overload/v3/overload.pb.h" - -using envoy::config::overload::v3::ThresholdTrigger; -using envoy::config::overload::v3::Trigger; -using SomePtrAlias = std::unique_ptr; - -class ThresholdTriggerImpl { -public: - ThresholdTriggerImpl(const ThresholdTrigger& /*config*/) {} - ThresholdTriggerImpl(SomePtrAlias /*config*/) {} -}; diff --git a/tools/api_boost/testdata/validate.cc b/tools/api_boost/testdata/validate.cc deleted file mode 100644 index 97fbd6bac35d9..0000000000000 --- a/tools/api_boost/testdata/validate.cc +++ /dev/null @@ -1,10 +0,0 @@ -#include "envoy/api/v2/cds.pb.h" -#include "envoy/api/v2/cluster.pb.validate.h" -#include "envoy/protobuf/message_validator.h" - -#include "source/common/protobuf/utility.h" - -void foo(Envoy::ProtobufMessage::ValidationVisitor& validator) { - envoy::api::v2::Cluster msg; - Envoy::MessageUtil::downcastAndValidate(msg, validator); -} diff --git a/tools/api_boost/testdata/validate.cc.gold b/tools/api_boost/testdata/validate.cc.gold deleted file mode 100644 index 5b991b7294e5e..0000000000000 --- a/tools/api_boost/testdata/validate.cc.gold +++ /dev/null @@ -1,10 +0,0 @@ -#include "envoy/config/cluster/v4alpha/cluster.pb.h" -#include "envoy/config/cluster/v4alpha/cluster.pb.validate.h" -#include "envoy/protobuf/message_validator.h" - -#include "source/common/protobuf/utility.h" - -void foo(Envoy::ProtobufMessage::ValidationVisitor& validator) { - envoy::config::cluster::v4alpha::Cluster msg; - Envoy::MessageUtil::downcastAndValidate(msg, validator); -} diff --git a/tools/api_proto_breaking_change_detector/BUILD b/tools/api_proto_breaking_change_detector/BUILD index bce76c323d361..4ef316b7794a3 100644 --- a/tools/api_proto_breaking_change_detector/BUILD +++ b/tools/api_proto_breaking_change_detector/BUILD @@ -9,7 +9,7 @@ py_binary( ], data = [ "@com_github_bufbuild_buf//:buf", - "@envoy_api_canonical//:proto_breaking_change_detector_buf_config", + "@envoy_api//:proto_breaking_change_detector_buf_config", ], main = "detector.py", tags = ["manual"], diff --git a/tools/api_proto_breaking_change_detector/detector_test.py b/tools/api_proto_breaking_change_detector/detector_test.py index 1566bf2d500d8..d34bd1528d4d2 100644 --- a/tools/api_proto_breaking_change_detector/detector_test.py +++ b/tools/api_proto_breaking_change_detector/detector_test.py @@ -109,8 +109,7 @@ def setUpClass(cls): copytree(testdata_path, cls._temp_dir.name, dirs_exist_ok=True) # copy in buf config - bazel_buf_config_loc = Path.cwd().joinpath( - "external", "envoy_api_canonical", "buf.yaml") + bazel_buf_config_loc = Path.cwd().joinpath("external", "envoy_api", "buf.yaml") copyfile(bazel_buf_config_loc, cls._config_file_loc) # pull buf dependencies and initialize git repo with test data files diff --git a/tools/api_proto_plugin/utils.py b/tools/api_proto_plugin/utils.py index 91007f5f54043..440b252effb47 100644 --- a/tools/api_proto_plugin/utils.py +++ b/tools/api_proto_plugin/utils.py @@ -11,8 +11,8 @@ def proto_file_canonical_from_label(label): A string with the path, e.g. for @envoy_api//envoy/type/matcher:metadata.proto this would be envoy/type/matcher/matcher.proto. """ - assert (label.startswith('@envoy_api_canonical//')) - return label[len('@envoy_api_canonical//'):].replace(':', '/') + assert (label.startswith('@envoy_api//')) + return label[len('@envoy_api//'):].replace(':', '/') def bazel_bin_path_for_output_artifact(label, suffix, root=''): @@ -24,9 +24,9 @@ def bazel_bin_path_for_output_artifact(label, suffix, root=''): root: location of bazel-bin/, if not specified, PWD. Returns: - Path in bazel-bin/external/envoy_api_canonical for label output with given suffix. + Path in bazel-bin/external/envoy_api for label output with given suffix. """ proto_file_path = proto_file_canonical_from_label(label) return os.path.join( - root, 'bazel-bin/external/envoy_api_canonical', os.path.dirname(proto_file_path), 'pkg', + root, 'bazel-bin/external/envoy_api', os.path.dirname(proto_file_path), 'pkg', proto_file_path + suffix) diff --git a/tools/clang_tools/api_booster/BUILD b/tools/clang_tools/api_booster/BUILD deleted file mode 100644 index d6affe19640b6..0000000000000 --- a/tools/clang_tools/api_booster/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -load( - "//clang_tools/support:clang_tools.bzl", - "clang_tools_cc_binary", - "clang_tools_cc_library", - "clang_tools_cc_test", -) - -licenses(["notice"]) # Apache 2 - -clang_tools_cc_binary( - name = "api_booster", - srcs = ["main.cc"], - deps = [ - ":proto_cxx_utils_lib", - "@clang_tools//:clang_astmatchers", - "@clang_tools//:clang_basic", - "@clang_tools//:clang_tooling", - "@envoy//tools/type_whisperer:api_type_db_lib", - ], -) - -clang_tools_cc_library( - name = "proto_cxx_utils_lib", - srcs = ["proto_cxx_utils.cc"], - hdrs = ["proto_cxx_utils.h"], - deps = [ - "@com_google_absl//absl/container:node_hash_map", - "@com_google_absl//absl/strings", - "@com_google_absl//absl/types:optional", - ], -) - -clang_tools_cc_test( - name = "proto_cxx_utils_test", - srcs = ["proto_cxx_utils_test.cc"], - deps = [":proto_cxx_utils_lib"], -) diff --git a/tools/clang_tools/api_booster/main.cc b/tools/clang_tools/api_booster/main.cc deleted file mode 100644 index 1972a58d560c6..0000000000000 --- a/tools/clang_tools/api_booster/main.cc +++ /dev/null @@ -1,598 +0,0 @@ -// Upgrade a single Envoy C++ file to the latest API version. -// -// Currently this tool is a WIP and only does inference of .pb[.validate].h -// #include locations. This already exercises some of the muscles we need, such -// as AST matching, rudimentary type inference and API type database lookup. -// -// NOLINT(namespace-envoy) - -#include -#include -#include -#include - -// Declares clang::SyntaxOnlyAction. -#include "clang/ASTMatchers/ASTMatchers.h" -#include "clang/ASTMatchers/ASTMatchFinder.h" -#include "clang/Frontend/FrontendActions.h" -#include "clang/Tooling/CommonOptionsParser.h" -#include "clang/Tooling/Core/Replacement.h" -#include "clang/Tooling/Refactoring.h" -#include "clang/Tooling/ReplacementsYaml.h" - -// Declares llvm::cl::extrahelp. -#include "llvm/Support/CommandLine.h" - -#include "proto_cxx_utils.h" - -#include "tools/type_whisperer/api_type_db.h" - -#include "absl/container/node_hash_map.h" -#include "absl/strings/str_cat.h" - -// Enable to see debug log messages. -#ifdef ENABLE_DEBUG_LOG -#define DEBUG_LOG(s) \ - do { \ - std::cerr << (s) << std::endl; \ - } while (0) -#else -#define DEBUG_LOG(s) -#endif - -using namespace Envoy::Tools::TypeWhisperer; - -namespace ApiBooster { - -class ApiBooster : public clang::ast_matchers::MatchFinder::MatchCallback, - public clang::tooling::SourceFileCallbacks { -public: - ApiBooster(std::map& replacements) - : replacements_(replacements) {} - - // AST match callback dispatcher. - void run(const clang::ast_matchers::MatchFinder::MatchResult& match_result) override { - clang::SourceManager& source_manager = match_result.Context->getSourceManager(); - DEBUG_LOG("AST match callback dispatcher"); - for (const auto it : match_result.Nodes.getMap()) { - const std::string match_text = getSourceText(it.second.getSourceRange(), source_manager); - const clang::SourceRange spelling_range = - getSpellingRange(it.second.getSourceRange(), source_manager); - const std::string spelling_text = getSourceText(spelling_range, source_manager); - DEBUG_LOG(absl::StrCat(" Result for ", it.first, " [", truncateForDebug(match_text), "]")); - if (match_text != spelling_text) { - DEBUG_LOG(absl::StrCat(" with spelling text [", truncateForDebug(spelling_text), "]")); - } - } - if (const auto* type_loc = match_result.Nodes.getNodeAs("type")) { - onTypeLocMatch(*type_loc, source_manager); - return; - } - if (const auto* using_decl = match_result.Nodes.getNodeAs("using_decl")) { - onUsingDeclMatch(*using_decl, source_manager); - return; - } - if (const auto* decl_ref_expr = - match_result.Nodes.getNodeAs("decl_ref_expr")) { - onDeclRefExprMatch(*decl_ref_expr, *match_result.Context, source_manager); - return; - } - if (const auto* call_expr = match_result.Nodes.getNodeAs("call_expr")) { - onCallExprMatch(*call_expr, *match_result.Context, source_manager); - return; - } - if (const auto* member_call_expr = - match_result.Nodes.getNodeAs("member_call_expr")) { - onMemberCallExprMatch(*member_call_expr, source_manager); - return; - } - if (const auto* tmpl = - match_result.Nodes.getNodeAs("tmpl")) { - onClassTemplateSpecializationDeclMatch(*tmpl, source_manager); - return; - } - } - - // Visitor callback for start of a compilation unit. - bool handleBeginSource(clang::CompilerInstance& CI) override { - source_api_proto_paths_.clear(); - return true; - } - - // Visitor callback for end of a compilation unit. - void handleEndSource() override { - // Dump known API header paths to stdout for api_boost.py to rewrite with - // (no rewriting support in this tool yet). - for (const std::string& proto_path : source_api_proto_paths_) { - std::cout << proto_path << std::endl; - } - } - -private: - static bool isEnvoyNamespace(absl::string_view s) { - return absl::StartsWith(s, "envoy::") || absl::StartsWith(s, "::envoy::"); - } - - static std::string truncateForDebug(const std::string& text) { - const uint32_t MaxExpansionChars = 250; - return text.size() > MaxExpansionChars ? text.substr(0, MaxExpansionChars) + "..." : text; - } - - // Match callback for TypeLoc. These are explicit mentions of the type in the - // source. If we have a match on type, we should track the corresponding .pb.h - // and attempt to upgrade. - void onTypeLocMatch(const clang::TypeLoc& type_loc, const clang::SourceManager& source_manager) { - absl::optional source_range; - const std::string type_name = - type_loc.getType().getCanonicalType().getUnqualifiedType().getAsString(); - // Remove qualifiers, e.g. const. - const clang::UnqualTypeLoc unqual_type_loc = type_loc.getUnqualifiedLoc(); - DEBUG_LOG(absl::StrCat("Type class ", type_loc.getType()->getTypeClassName())); - // Today we are only smart enough to rewrite ElaborateTypeLoc, which are - // full namespace prefixed types. We probably will need to support more, in - // particular if we want message-level type renaming. TODO(htuch): add more - // supported AST TypeLoc classes as needed. - if (unqual_type_loc.getTypeLocClass() == clang::TypeLoc::Elaborated && - isEnvoyNamespace(getSourceText( - getSpellingRange(unqual_type_loc.getSourceRange(), source_manager), source_manager))) { - source_range = absl::make_optional(unqual_type_loc.getSourceRange()); - tryBoostType(type_name, source_range, source_manager, type_loc.getType()->getTypeClassName(), - false); - } else { - // If we're not going to rewrite, we still deliver SourceLocation to - // tryBoostType to assist with determination of API_NO_BOOST(). - tryBoostType(type_name, unqual_type_loc.getBeginLoc(), -1, source_manager, - type_loc.getType()->getTypeClassName(), false); - } - } - - // Match callback for clang::UsingDecl. These are 'using' aliases for API type - // names. - void onUsingDeclMatch(const clang::UsingDecl& using_decl, - const clang::SourceManager& source_manager) { - // Not all using declaration are types, but we try the rewrite in case there - // is such an API type database match. - const clang::SourceRange source_range = clang::SourceRange( - using_decl.getQualifierLoc().getBeginLoc(), using_decl.getNameInfo().getEndLoc()); - const std::string type_name = getSourceText(source_range, source_manager); - tryBoostType(type_name, source_range, source_manager, "UsingDecl", true); - } - - // Match callback for clang::DeclRefExpr. These occur when enums constants, - // e.g. foo::bar::kBaz, appear in the source. - void onDeclRefExprMatch(const clang::DeclRefExpr& decl_ref_expr, const clang::ASTContext& context, - const clang::SourceManager& source_manager) { - // We don't need to consider non-namespace qualified DeclRefExprfor now (no - // renaming support yet). - if (!decl_ref_expr.hasQualifier()) { - return; - } - const std::string decl_name = decl_ref_expr.getNameInfo().getAsString(); - // There are generated methods to stringify/parse/validate enum values, - // these need special treatment as they look like types with special - // suffices. - for (const std::string& enum_generated_method_suffix : {"_Name", "_Parse", "_IsValid"}) { - if (absl::EndsWith(decl_name, enum_generated_method_suffix)) { - // Remove trailing suffix from reference for replacement range and type - // name purposes. - const clang::SourceLocation begin_loc = - source_manager.getSpellingLoc(decl_ref_expr.getBeginLoc()); - const std::string type_name_with_suffix = - getSourceText(decl_ref_expr.getSourceRange(), source_manager); - const std::string type_name = type_name_with_suffix.substr( - 0, type_name_with_suffix.size() - enum_generated_method_suffix.size()); - tryBoostType(type_name, begin_loc, type_name.size(), source_manager, - "DeclRefExpr suffixed " + enum_generated_method_suffix, false); - return; - } - } - // Remove trailing : from namespace qualifier. - const clang::SourceRange source_range = - clang::SourceRange(decl_ref_expr.getQualifierLoc().getBeginLoc(), - decl_ref_expr.getQualifierLoc().getEndLoc().getLocWithOffset(-1)); - // Only try to boost type if it's explicitly an Envoy qualified type. - const std::string source_type_name = getSourceText(source_range, source_manager); - const clang::QualType ast_type = - decl_ref_expr.getDecl()->getType().getCanonicalType().getUnqualifiedType(); - const std::string ast_type_name = ast_type.getAsString(); - if (isEnvoyNamespace(source_type_name)) { - // Generally we pull the type from the named entity's declaration type, - // since this allows us to map from things like envoy::type::HTTP2 to the - // underlying fully qualified envoy::type::CodecClientType::HTTP2 prior to - // API type database lookup. However, for the generated static methods or - // field accessors, we don't want to deal with lookup via the function - // type, so we use the source text directly. - const std::string type_name = ast_type.isPODType(context) ? ast_type_name : source_type_name; - tryBoostType(type_name, source_range, source_manager, "DeclRefExpr", true); - } - const auto latest_type_info = getTypeInformationFromCType(ast_type_name, true); - // In some cases we need to upgrade the name the DeclRefExpr points at. If - // this isn't a known API type, our work here is done. - if (!latest_type_info) { - return; - } - const clang::SourceRange decl_source_range = decl_ref_expr.getNameInfo().getSourceRange(); - // Deprecated enum constants need to be upgraded. - if (latest_type_info->enum_type_) { - const auto enum_value_rename = - ProtoCxxUtils::renameEnumValue(decl_name, latest_type_info->renames_); - if (enum_value_rename) { - const clang::SourceRange decl_source_range = decl_ref_expr.getNameInfo().getSourceRange(); - const clang::tooling::Replacement enum_value_replacement( - source_manager, source_manager.getSpellingLoc(decl_source_range.getBegin()), - sourceRangeLength(decl_source_range, source_manager), *enum_value_rename); - insertReplacement(enum_value_replacement); - } - return; - } - // We need to map from envoy::type::matcher::StringMatcher::kRegex to - // envoy::type::matcher::v3::StringMatcher::kHiddenEnvoyDeprecatedRegex. - const auto constant_rename = - ProtoCxxUtils::renameConstant(decl_name, latest_type_info->renames_); - if (constant_rename) { - const clang::tooling::Replacement constant_replacement( - source_manager, decl_source_range.getBegin(), - sourceRangeLength(decl_source_range, source_manager), *constant_rename); - insertReplacement(constant_replacement); - } - } - - // Match callback clang::CallExpr. We don't need to rewrite, but if it's something like - // loadFromYamlAndValidate, we might need to look at the argument type to - // figure out any corresponding .pb.validate.h we require. - void onCallExprMatch(const clang::CallExpr& call_expr, const clang::ASTContext& context, - const clang::SourceManager& source_manager) { - auto* direct_callee = call_expr.getDirectCallee(); - if (direct_callee != nullptr) { - const absl::node_hash_map ValidateNameToArg = { - {"loadFromYamlAndValidate", 1}, - {"loadFromFileAndValidate", 1}, - {"downcastAndValidate", -1}, - {"validate", 0}, - }; - const std::string& callee_name = direct_callee->getNameInfo().getName().getAsString(); - DEBUG_LOG(absl::StrCat("callee_name ", callee_name)); - const auto arg = ValidateNameToArg.find(callee_name); - // Sometimes we hit false positives because we aren't qualifying above. - // TODO(htuch): fix this. - if (arg != ValidateNameToArg.end() && - arg->second < static_cast(call_expr.getNumArgs())) { - const std::string type_name = arg->second >= 0 ? call_expr.getArg(arg->second) - ->getType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString() - : call_expr.getCallReturnType(context) - .getNonReferenceType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString(); - DEBUG_LOG(absl::StrCat("Validation header boosting ", type_name)); - tryBoostType(type_name, {}, source_manager, "validation invocation", true, true); - } - } - } - - // Match callback for clang::CxxMemberCallExpr. We rewrite things like - // ->mutable_foo() to ->mutable_foo_new_name() during renames. - void onMemberCallExprMatch(const clang::CXXMemberCallExpr& member_call_expr, - const clang::SourceManager& source_manager) { - const std::string type_name = - member_call_expr.getObjectType().getCanonicalType().getUnqualifiedType().getAsString(); - const auto latest_type_info = getTypeInformationFromCType(type_name, true); - // If this isn't a known API type, our work here is done. - if (!latest_type_info) { - return; - } - // Figure out if the referenced object was declared under API_NO_BOOST. This - // only works for simple cases, best effort. - const auto* object_expr = member_call_expr.getImplicitObjectArgument(); - if (object_expr != nullptr) { - const auto* decl = object_expr->getReferencedDeclOfCallee(); - if (decl != nullptr && - getSourceText(decl->getSourceRange(), source_manager).find("API_NO_BOOST") != - std::string::npos) { - DEBUG_LOG("Skipping method replacement due to API_NO_BOOST"); - return; - } - } - tryRenameMethod(*latest_type_info, member_call_expr.getExprLoc(), source_manager); - } - - bool tryRenameMethod(const TypeInformation& type_info, clang::SourceLocation method_loc, - const clang::SourceManager& source_manager) { - const clang::SourceRange source_range = {source_manager.getSpellingLoc(method_loc), - source_manager.getSpellingLoc(method_loc)}; - const std::string method_name = getSourceText(source_range, source_manager); - DEBUG_LOG(absl::StrCat("Checking for rename of ", method_name)); - const auto method_rename = ProtoCxxUtils::renameMethod(method_name, type_info.renames_); - if (method_rename) { - const clang::tooling::Replacement method_replacement( - source_manager, source_range.getBegin(), sourceRangeLength(source_range, source_manager), - *method_rename); - insertReplacement(method_replacement); - return true; - } - return false; - } - - // Match callback for clang::ClassTemplateSpecializationDecl. An additional - // place we need to look for .pb.validate.h reference is instantiation of - // FactoryBase. - void onClassTemplateSpecializationDeclMatch(const clang::ClassTemplateSpecializationDecl& tmpl, - const clang::SourceManager& source_manager) { - const std::string tmpl_type_name = tmpl.getSpecializedTemplate() - ->getInjectedClassNameSpecialization() - .getCanonicalType() - .getAsString(); - if (absl::EndsWith(tmpl_type_name, "FactoryBase")) { - const std::string type_name = tmpl.getTemplateArgs() - .get(0) - .getAsType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString(); - tryBoostType(type_name, {}, source_manager, "FactoryBase template", true, true); - } - if (tmpl_type_name == "FactoryBase") { - const std::string type_name_0 = tmpl.getTemplateArgs() - .get(0) - .getAsType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString(); - tryBoostType(type_name_0, {}, source_manager, "FactoryBase template", true, true); - const std::string type_name_1 = tmpl.getTemplateArgs() - .get(1) - .getAsType() - .getCanonicalType() - .getUnqualifiedType() - .getAsString(); - tryBoostType(type_name_1, {}, source_manager, "FactoryBase template", true, true); - } - } - - // Attempt to boost a given type and rewrite the given source range. - void tryBoostType(const std::string& type_name, absl::optional source_range, - const clang::SourceManager& source_manager, absl::string_view debug_description, - bool requires_enum_truncation, bool validation_required = false) { - if (source_range) { - tryBoostType(type_name, source_range->getBegin(), - sourceRangeLength(*source_range, source_manager), source_manager, - debug_description, requires_enum_truncation, validation_required); - } else { - tryBoostType(type_name, {}, -1, source_manager, debug_description, requires_enum_truncation, - validation_required); - } - } - - bool underApiNoBoost(clang::SourceLocation loc, const clang::SourceManager& source_manager) { - if (loc.isMacroID()) { - const auto macro_name = clang::Lexer::getImmediateMacroName(loc, source_manager, lexer_lopt_); - if (macro_name.str() == "API_NO_BOOST") { - return true; - } - } - return false; - } - - void tryBoostType(const std::string& type_name, clang::SourceLocation begin_loc, int length, - const clang::SourceManager& source_manager, absl::string_view debug_description, - bool requires_enum_truncation, bool validation_required = false) { - bool is_skip_macro = false; - if (underApiNoBoost(begin_loc, source_manager)) { - DEBUG_LOG("Skipping replacement due to API_NO_BOOST"); - is_skip_macro = true; - } - const auto type_info = getTypeInformationFromCType(type_name, !is_skip_macro); - // If this isn't a known API type, our work here is done. - if (!type_info) { - return; - } - DEBUG_LOG(absl::StrCat("Matched type '", type_name, "' (", debug_description, ") length ", - length, " at ", begin_loc.printToString(source_manager))); - // Track corresponding imports. - source_api_proto_paths_.insert(adjustProtoSuffix(type_info->proto_path_, ".pb.h")); - if (validation_required) { - source_api_proto_paths_.insert(adjustProtoSuffix(type_info->proto_path_, ".pb.validate.h")); - } - // Not all AST matchers know how to do replacements (yet?). - if (length == -1 || is_skip_macro) { - return; - } - const clang::SourceLocation spelling_begin = source_manager.getSpellingLoc(begin_loc); - // We need to look at the text we're replacing to decide whether we should - // use the qualified C++'ified proto name. - const bool qualified = - getSourceText(spelling_begin, length, source_manager).find("::") != std::string::npos; - std::string case_residual; - if (absl::EndsWith(type_name, "Case")) { - case_residual = type_name.substr(type_name.rfind(':') - 1); - } - // Add corresponding replacement. - const clang::tooling::Replacement type_replacement( - source_manager, source_manager.getSpellingLoc(begin_loc), length, - ProtoCxxUtils::protoToCxxType(type_info->type_name_, qualified, - type_info->enum_type_ && requires_enum_truncation) + - case_residual); - insertReplacement(type_replacement); - } - - void insertReplacement(const clang::tooling::Replacement& replacement) { - llvm::Error error = replacements_[std::string(replacement.getFilePath())].add(replacement); - if (error) { - std::cerr << " Replacement insertion error: " << llvm::toString(std::move(error)) - << std::endl; - } else { - std::cerr << " Replacement added: " << replacement.toString() << std::endl; - } - } - - // Modeled after getRangeSize() in Clang's Replacements.cpp. Turns out it's - // non-trivial to get the actual length of a SourceRange, as the end location - // point to the start of the last token. - int sourceRangeLength(clang::SourceRange source_range, - const clang::SourceManager& source_manager) { - const clang::SourceLocation spelling_begin = - source_manager.getSpellingLoc(source_range.getBegin()); - const clang::SourceLocation spelling_end = source_manager.getSpellingLoc(source_range.getEnd()); - std::pair start = source_manager.getDecomposedLoc(spelling_begin); - std::pair end = source_manager.getDecomposedLoc(spelling_end); - if (start.first != end.first) { - return -1; - } - end.second += clang::Lexer::MeasureTokenLength(spelling_end, source_manager, lexer_lopt_); - return end.second - start.second; - } - - std::string getSourceText(clang::SourceLocation begin_loc, int size, - const clang::SourceManager& source_manager) { - return std::string(clang::Lexer::getSourceText( - {clang::SourceRange(begin_loc, begin_loc.getLocWithOffset(size)), false}, source_manager, - lexer_lopt_, 0)); - } - - std::string getSourceText(clang::SourceRange source_range, - const clang::SourceManager& source_manager) { - return std::string(clang::Lexer::getSourceText( - clang::CharSourceRange::getTokenRange(source_range), source_manager, lexer_lopt_, 0)); - } - - void addNamedspaceQualifiedTypeReplacement() {} - - // Remove .proto from a path, apply specified suffix instead. - std::string adjustProtoSuffix(absl::string_view proto_path, absl::string_view suffix) { - return absl::StrCat(proto_path.substr(0, proto_path.size() - 6), suffix); - } - - // Obtain the latest type information for a given from C++ type, e.g. envoy:config::v2::Cluster, - // from the API type database. - absl::optional getTypeInformationFromCType(const std::string& c_type_name, - bool latest) { - // Ignore compound or non-API types. - // TODO(htuch): this is all super hacky and not really right, we should be - // removing qualifiers etc. to get to the underlying type name. - const std::string type_name = std::regex_replace(c_type_name, std::regex("^(class|enum) "), ""); - if (!isEnvoyNamespace(type_name) || absl::StrContains(type_name, " ")) { - return {}; - } - const std::string proto_type_name = ProtoCxxUtils::cxxToProtoType(type_name); - - // Use API type database to map from proto type to path. - auto result = latest ? ApiTypeDb::getLatestTypeInformation(proto_type_name) - : ApiTypeDb::getExistingTypeInformation(proto_type_name); - if (result) { - // Remove the .proto extension. - return result; - } else if (!absl::StartsWith(proto_type_name, "envoy.HotRestart") && - !absl::StartsWith(proto_type_name, "envoy.RouterCheckToolSchema") && - !absl::StartsWith(proto_type_name, "envoy.annotations") && - !absl::StartsWith(proto_type_name, "envoy.test") && - !absl::StartsWith(proto_type_name, "envoy.tracers.xray.daemon")) { - // Die hard if we don't have a useful proto type for something that looks - // like an API type(modulo a short allowlist). - std::cerr << "Unknown API type: " << proto_type_name << std::endl; - // TODO(htuch): maybe there is a nicer way to terminate AST traversal? - ::exit(1); - } - - return {}; - } - - static clang::SourceRange getSpellingRange(clang::SourceRange source_range, - const clang::SourceManager& source_manager) { - return {source_manager.getSpellingLoc(source_range.getBegin()), - source_manager.getSpellingLoc(source_range.getEnd())}; - } - - // Set of inferred .pb[.validate].h, updated as the AST matcher callbacks above fire. - std::set source_api_proto_paths_; - // Map from source file to replacements. - std::map& replacements_; - // Language options for interacting with Lexer. Currently empty. - clang::LangOptions lexer_lopt_; -}; // namespace ApiBooster - -} // namespace ApiBooster - -int main(int argc, const char** argv) { - // Apply a custom category to all command-line options so that they are the - // only ones displayed. - llvm::cl::OptionCategory api_booster_tool_category("api-booster options"); - - clang::tooling::CommonOptionsParser options_parser(argc, argv, api_booster_tool_category); - clang::tooling::RefactoringTool tool(options_parser.getCompilations(), - options_parser.getSourcePathList()); - - ApiBooster::ApiBooster api_booster(tool.getReplacements()); - clang::ast_matchers::MatchFinder finder; - - // Match on all mentions of types in the AST. - auto type_matcher = - clang::ast_matchers::typeLoc(clang::ast_matchers::isExpansionInMainFile()).bind("type"); - finder.addMatcher(type_matcher, &api_booster); - - // Match on all "using" declarations. - auto using_decl_matcher = - clang::ast_matchers::usingDecl(clang::ast_matchers::isExpansionInMainFile()) - .bind("using_decl"); - finder.addMatcher(using_decl_matcher, &api_booster); - - // Match on references to enum constants. - auto decl_ref_expr_matcher = - clang::ast_matchers::declRefExpr(clang::ast_matchers::isExpansionInMainFile()) - .bind("decl_ref_expr"); - finder.addMatcher(decl_ref_expr_matcher, &api_booster); - - // Match on all call expressions. We are interested in particular in calls - // where validation on protos is performed. - auto call_matcher = - clang::ast_matchers::callExpr(clang::ast_matchers::isExpansionInMainFile()).bind("call_expr"); - finder.addMatcher(call_matcher, &api_booster); - - // Match on all .foo() or ->foo() expressions. We are interested in these for renames - // and deprecations. - auto member_call_expr = - clang::ast_matchers::cxxMemberCallExpr(clang::ast_matchers::isExpansionInMainFile()) - .bind("member_call_expr"); - finder.addMatcher(member_call_expr, &api_booster); - - // Match on all template instantiations. We are interested in particular in - // instantiations of factories where validation on protos is performed. - auto tmpl_matcher = clang::ast_matchers::classTemplateSpecializationDecl( - clang::ast_matchers::matchesName(".*FactoryBase.*")) - .bind("tmpl"); - finder.addMatcher(tmpl_matcher, &api_booster); - - // Apply ApiBooster to AST matches. This will generate a set of replacements in - // tool.getReplacements(). - const int run_result = tool.run(newFrontendActionFactory(&finder, &api_booster).get()); - if (run_result != 0) { - std::cerr << "Exiting with non-zero result " << run_result << std::endl; - return run_result; - } - - // Serialize replacements to
.clang-replacements.yaml. - // These are suitable for consuming by clang-apply-replacements. - for (const auto& file_replacement : tool.getReplacements()) { - // Populate TranslationUnitReplacements from file replacements (this is what - // there exists llvm::yaml serialization support for). - clang::tooling::TranslationUnitReplacements tu_replacements; - tu_replacements.MainSourceFile = file_replacement.first; - for (const auto& r : file_replacement.second) { - tu_replacements.Replacements.push_back(r); - DEBUG_LOG(r.toString()); - } - // Serialize TranslationUnitReplacements to YAML. - std::string yaml_content; - llvm::raw_string_ostream yaml_content_stream(yaml_content); - llvm::yaml::Output yaml(yaml_content_stream); - yaml << tu_replacements; - // Write to
.clang-replacements.yaml. - std::ofstream serialized_replacement_file(tu_replacements.MainSourceFile + - ".clang-replacements.yaml"); - serialized_replacement_file << yaml_content_stream.str(); - } - - return 0; -} diff --git a/tools/clang_tools/api_booster/proto_cxx_utils.cc b/tools/clang_tools/api_booster/proto_cxx_utils.cc deleted file mode 100644 index 194bdc0e6bf7b..0000000000000 --- a/tools/clang_tools/api_booster/proto_cxx_utils.cc +++ /dev/null @@ -1,102 +0,0 @@ -#include "proto_cxx_utils.h" - -namespace ApiBooster { - -std::string ProtoCxxUtils::cxxToProtoType(const std::string& cxx_type_name) { - // Convert from C++ to a qualified proto type. This is fairly hacky stuff, - // we're essentially reversing the conventions that the protobuf C++ - // compiler is using, e.g. replacing _ and :: with . as needed, guessing - // that a Case suffix implies some enum switching. - const std::string rel_cxx_type_name = - absl::StartsWith(cxx_type_name, "::") ? cxx_type_name.substr(2) : cxx_type_name; - std::vector frags = absl::StrSplit(rel_cxx_type_name, "::"); - // TODO(htuch): if we add some more stricter checks on mangled name usage in - // check_format.py, we should be able to eliminate this. - for (std::string& frag : frags) { - if (!frag.empty() && isupper(frag[0])) { - frag = std::regex_replace(frag, std::regex("_"), "."); - } - } - if (absl::EndsWith(frags.back(), "Case")) { - frags.pop_back(); - } - return absl::StrJoin(frags, "."); -} - -std::string ProtoCxxUtils::protoToCxxType(const std::string& proto_type_name, bool qualified, - bool enum_type) { - std::vector frags = absl::StrSplit(proto_type_name, '.'); - // We drop the enum type name, it's not needed and confuses the mangling - // when enums are nested in messages. - if (enum_type) { - frags.pop_back(); - } - if (qualified) { - return absl::StrJoin(frags, "::"); - } else { - return frags.back(); - } -} - -absl::optional -ProtoCxxUtils::renameMethod(absl::string_view method_name, - const absl::node_hash_map renames) { - // Simple O(N * M) match, where M is constant (the set of prefixes/suffixes) so - // should be fine. - for (const auto& field_rename : renames) { - const std::vector GeneratedMethodPrefixes = { - "clear_", "set_", "has_", "mutable_", "set_allocated_", "release_", "add_", "", - }; - // Most of the generated methods are some prefix. - for (const std::string& prefix : GeneratedMethodPrefixes) { - if (method_name == prefix + field_rename.first) { - return prefix + field_rename.second; - } - } - // _size is the only suffix. - if (method_name == field_rename.first + "_size") { - return field_rename.second + "_size"; - } - } - return {}; -} - -absl::optional -ProtoCxxUtils::renameConstant(absl::string_view constant_name, - const absl::node_hash_map renames) { - if (constant_name.size() < 2 || constant_name[0] != 'k' || !isupper(constant_name[1])) { - return {}; - } - std::vector frags; - for (const char c : constant_name.substr(1)) { - if (isupper(c)) { - frags.emplace_back(1, tolower(c)); - } else { - frags.back().push_back(c); - } - } - const std::string field_name = absl::StrJoin(frags, "_"); - const auto it = renames.find(field_name); - if (it == renames.cend()) { - return {}; - } - std::vector new_frags = absl::StrSplit(it->second, '_'); - for (auto& frag_it : new_frags) { - if (!frag_it.empty()) { - frag_it[0] = toupper(frag_it[0]); - } - } - return "k" + absl::StrJoin(new_frags, ""); -} - -absl::optional -ProtoCxxUtils::renameEnumValue(absl::string_view enum_value_name, - const absl::node_hash_map renames) { - const auto it = renames.find(std::string(enum_value_name)); - if (it == renames.cend()) { - return {}; - } - return it->second; -} - -} // namespace ApiBooster diff --git a/tools/clang_tools/api_booster/proto_cxx_utils.h b/tools/clang_tools/api_booster/proto_cxx_utils.h deleted file mode 100644 index 10eff61a79104..0000000000000 --- a/tools/clang_tools/api_booster/proto_cxx_utils.h +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -#include - -#include "absl/container/node_hash_map.h" -#include "absl/strings/str_join.h" -#include "absl/strings/str_split.h" -#include "absl/types/optional.h" - -namespace ApiBooster { - -// Protobuf C++ code generation hackery. This is where the utilities that map -// between C++ and protobuf types, enum constants and identifiers live. Most of -// this is heuristic and needs to match whatever the protobuf compiler does. -// TODO(htuch): investigate what can be done to make use of embedded proto -// descriptors in generated stubs to make these utils more robust. -class ProtoCxxUtils { -public: - // Convert from a C++ type, e.g. foo::bar::v2, to a protobuf type, e.g. - // foo.bar.v2. - static std::string cxxToProtoType(const std::string& cxx_type_name); - - // Given a method, e.g. mutable_foo, rele, and a map of renames in a give proto, - // determine if the method is covered by a generated C++ stub for a renamed - // field in proto, and if so, return the new method name. - static absl::optional - renameMethod(absl::string_view method_name, - const absl::node_hash_map renames); - - // Given a constant, e.g. kFooBar, determine if it needs upgrading. We need - // this for synthesized oneof cases. - static absl::optional - renameConstant(absl::string_view constant_name, - const absl::node_hash_map renames); - - // Given an enum value, e.g. FOO_BAR determine if it needs upgrading. - static absl::optional - renameEnumValue(absl::string_view enum_value_name, - const absl::node_hash_map renames); - - // Convert from a protobuf type, e.g. foo.bar.v2, to a C++ type, e.g. - // foo::bar::v2. - static std::string protoToCxxType(const std::string& proto_type_name, bool qualified, - bool enum_type); -}; - -} // namespace ApiBooster diff --git a/tools/clang_tools/api_booster/proto_cxx_utils_test.cc b/tools/clang_tools/api_booster/proto_cxx_utils_test.cc deleted file mode 100644 index 2a06413bd4d25..0000000000000 --- a/tools/clang_tools/api_booster/proto_cxx_utils_test.cc +++ /dev/null @@ -1,72 +0,0 @@ -#include "gtest/gtest.h" -#include "proto_cxx_utils.h" - -namespace ApiBooster { -namespace { - -// Validate C++ to proto type name conversion. -TEST(ProtoCxxUtils, CxxToProtoType) { - EXPECT_EQ("", ProtoCxxUtils::cxxToProtoType("")); - EXPECT_EQ("foo", ProtoCxxUtils::cxxToProtoType("foo")); - EXPECT_EQ("foo.bar", ProtoCxxUtils::cxxToProtoType("foo::bar")); - EXPECT_EQ("foo.bar", ProtoCxxUtils::cxxToProtoType("foo::bar::FooCase")); - EXPECT_EQ("foo.bar.Baz.Blah", ProtoCxxUtils::cxxToProtoType("foo::bar::Baz_Blah")); -} - -// Validate proto to C++ type name conversion. -TEST(ProtoCxxUtils, ProtoToCxxType) { - EXPECT_EQ("", ProtoCxxUtils::protoToCxxType("", false, false)); - EXPECT_EQ("", ProtoCxxUtils::protoToCxxType("", true, false)); - EXPECT_EQ("foo", ProtoCxxUtils::protoToCxxType("foo", false, false)); - EXPECT_EQ("foo", ProtoCxxUtils::protoToCxxType("foo", true, false)); - EXPECT_EQ("bar", ProtoCxxUtils::protoToCxxType("foo.bar", false, false)); - EXPECT_EQ("foo::bar", ProtoCxxUtils::protoToCxxType("foo.bar", true, false)); - EXPECT_EQ("foo::Bar", ProtoCxxUtils::protoToCxxType("foo.Bar", true, false)); - EXPECT_EQ("foo", ProtoCxxUtils::protoToCxxType("foo.Bar", true, true)); - EXPECT_EQ("foo::Bar::Baz", ProtoCxxUtils::protoToCxxType("foo.Bar.Baz", true, false)); - EXPECT_EQ("foo::Bar::Baz::Blah", ProtoCxxUtils::protoToCxxType("foo.Bar.Baz.Blah", true, false)); - EXPECT_EQ("foo::Bar::Baz", ProtoCxxUtils::protoToCxxType("foo.Bar.Baz.Blah", true, true)); -} - -// Validate proto field accessor upgrades. -TEST(ProtoCxxUtils, RenameMethod) { - const absl::node_hash_map renames = { - {"foo", "bar"}, - {"bar", "baz"}, - }; - EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameMethod("whatevs", renames)); - EXPECT_EQ("bar", ProtoCxxUtils::renameMethod("foo", renames)); - EXPECT_EQ("baz", ProtoCxxUtils::renameMethod("bar", renames)); - - EXPECT_EQ("clear_bar", ProtoCxxUtils::renameMethod("clear_foo", renames)); - EXPECT_EQ("set_bar", ProtoCxxUtils::renameMethod("set_foo", renames)); - EXPECT_EQ("has_bar", ProtoCxxUtils::renameMethod("has_foo", renames)); - EXPECT_EQ("mutable_bar", ProtoCxxUtils::renameMethod("mutable_foo", renames)); - EXPECT_EQ("set_allocated_bar", ProtoCxxUtils::renameMethod("set_allocated_foo", renames)); - EXPECT_EQ("release_bar", ProtoCxxUtils::renameMethod("release_foo", renames)); - EXPECT_EQ("add_bar", ProtoCxxUtils::renameMethod("add_foo", renames)); - EXPECT_EQ("bar_size", ProtoCxxUtils::renameMethod("foo_size", renames)); -} - -// Validate proto constant upgrades. -TEST(ProtoCxxUtils, RenameConstant) { - const absl::node_hash_map renames = { - {"foo_bar", "bar_foo"}, - {"foo_baz", "baz"}, - }; - EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameConstant("whatevs", renames)); - EXPECT_EQ("kBarFoo", ProtoCxxUtils::renameConstant("kFooBar", renames)); - EXPECT_EQ("kBaz", ProtoCxxUtils::renameConstant("kFooBaz", renames)); -} - -// Validate proto enum value upgrades. -TEST(ProtoCxxUtils, RenameEnumValue) { - const absl::node_hash_map renames = { - {"FOO_BAR", "BAR_FOO"}, - }; - EXPECT_EQ(absl::nullopt, ProtoCxxUtils::renameEnumValue("FOO_BAZ", renames)); - EXPECT_EQ("BAR_FOO", ProtoCxxUtils::renameEnumValue("FOO_BAR", renames)); -} - -} // namespace -} // namespace ApiBooster diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index fa88387510e19..d01897fc338a0 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -151,8 +151,6 @@ # Please DO NOT extend this allow list without consulting # @envoyproxy/dependency-shepherds. BUILD_URLS_ALLOWLIST = ( - "./generated_api_shadow/bazel/repository_locations.bzl", - "./generated_api_shadow/bazel/envoy_http_archive.bzl", "./bazel/repository_locations.bzl", "./bazel/external/cargo/crates.bzl", "./api/bazel/repository_locations.bzl", @@ -282,7 +280,6 @@ def __init__(self, args): self.operation_type = args.operation_type self.target_path = args.target_path self.api_prefix = args.api_prefix - self.api_shadow_root = args.api_shadow_prefix self.envoy_build_rule_check = not args.skip_envoy_build_rule_check self.namespace_check = args.namespace_check self.namespace_check_excluded_paths = args.namespace_check_excluded_paths + [ @@ -486,7 +483,7 @@ def allow_listed_for_build_urls(self, file_path): return file_path in BUILD_URLS_ALLOWLIST def is_api_file(self, file_path): - return file_path.startswith(self.api_prefix) or file_path.startswith(self.api_shadow_root) + return file_path.startswith(self.api_prefix) def is_build_file(self, file_path): basename = os.path.basename(file_path) @@ -869,7 +866,7 @@ def check_source_line(self, line, file_path, report_error): + "https://github.com/LuaJIT/LuaJIT/issues/450#issuecomment-433659873 for details.") if file_path.endswith(PROTO_SUFFIX): - exclude_path = ['v1', 'v2', 'generated_api_shadow'] + exclude_path = ['v1', 'v2'] result = PROTO_VALIDATION_STRING.search(line) if result is not None: if not any(x in file_path for x in exclude_path): @@ -926,8 +923,7 @@ def check_build_path(self, file_path): error_messages += self.execute_command( command, "envoy_build_fixer check failed", file_path) - if self.is_build_file(file_path) and (file_path.startswith(self.api_prefix + "envoy") or - file_path.startswith(self.api_shadow_root + "envoy")): + if self.is_build_file(file_path) and file_path.startswith(self.api_prefix + "envoy"): found = False for line in self.read_lines(file_path): if "api_proto_package(" in line: @@ -1053,21 +1049,6 @@ def check_owners(self, dir_name, owned_directories, error_messages): error_messages.append( "New directory %s appears to not have owners in CODEOWNERS" % dir_name) - def check_api_shadow_starlark_files(self, file_path, error_messages): - command = "diff -u " - command += file_path + " " - api_shadow_starlark_path = self.api_shadow_root + re.sub(r"\./api/", '', file_path) - command += api_shadow_starlark_path - - error_message = self.execute_command( - command, "invalid .bzl in generated_api_shadow", file_path) - if self.operation_type == "check": - error_messages += error_message - elif self.operation_type == "fix" and len(error_message) != 0: - shutil.copy(file_path, api_shadow_starlark_path) - - return error_messages - def check_format_visitor(self, arg, dir_name, names): """Run check_format in parallel for the given files. Args: @@ -1103,11 +1084,6 @@ def check_format_visitor(self, arg, dir_name, names): self.check_owners(str(top_level), owned_directories, error_messages) for file_name in names: - if dir_name.startswith("./api") and self.is_starlark_file(file_name): - result = pool.apply_async( - self.check_api_shadow_starlark_files, - args=(dir_name + "/" + file_name, error_messages)) - result_list.append(result) result = pool.apply_async( self.check_format_return_trace_on_error, args=(dir_name + "/" + file_name,)) result_list.append(result) @@ -1147,11 +1123,6 @@ def whitelisted_for_memcpy(self, file_path): default=multiprocessing.cpu_count(), help="number of worker processes to use; defaults to one per core.") parser.add_argument("--api-prefix", type=str, default="./api/", help="path of the API tree.") - parser.add_argument( - "--api-shadow-prefix", - type=str, - default="./generated_api_shadow/", - help="path of the shadow API tree.") parser.add_argument( "--skip_envoy_build_rule_check", action="store_true", diff --git a/tools/dependency/BUILD b/tools/dependency/BUILD index f8945ed0136cc..2909e36a8b8a9 100644 --- a/tools/dependency/BUILD +++ b/tools/dependency/BUILD @@ -11,8 +11,8 @@ py_library( srcs = ["exports.py"], data = [ "//bazel:repository_locations.bzl", - "@envoy_api_canonical//bazel:repository_locations.bzl", - "@envoy_api_canonical//bazel:repository_locations_utils.bzl", + "@envoy_api//bazel:repository_locations.bzl", + "@envoy_api//bazel:repository_locations_utils.bzl", ], ) diff --git a/tools/dependency/exports.py b/tools/dependency/exports.py index ed365b8a91d74..d20e7b4c79125 100644 --- a/tools/dependency/exports.py +++ b/tools/dependency/exports.py @@ -17,7 +17,7 @@ def load_module(name, path): # this is the relative path in a bazel build # to call this module outside of a bazel build set the `API_PATH` first, # for example, if running from the envoy repo root: `export API_PATH=api/` -api_path = os.getenv("API_PATH", "external/envoy_api_canonical") +api_path = os.getenv("API_PATH", "external/envoy_api") # Modules envoy_repository_locations = load_module( diff --git a/tools/dependency/validate.py b/tools/dependency/validate.py index 100c104cb26f3..8cf6e8f816d84 100755 --- a/tools/dependency/validate.py +++ b/tools/dependency/validate.py @@ -39,7 +39,7 @@ def load_module(name, path): IGNORE_DEPS = set([ 'envoy', 'envoy_api', - 'envoy_api_canonical', + 'envoy_api', 'platforms', 'bazel_tools', 'local_config_cc', diff --git a/tools/docs/generate_api_rst.py b/tools/docs/generate_api_rst.py index e5539332de1fb..670400140be18 100644 --- a/tools/docs/generate_api_rst.py +++ b/tools/docs/generate_api_rst.py @@ -31,7 +31,7 @@ def main(): # the contents of `proto_srcs` are the result of a bazel genquery, # containing bazel target rules, eg: # - # @envoy_api_canonical//envoy/watchdog/v3alpha:abort_action.proto + # @envoy_api//envoy/watchdog/v3alpha:abort_action.proto # # this transforms them to a list with a "canonical" form of: # diff --git a/tools/proto_format/proto_format.sh b/tools/proto_format/proto_format.sh index e80dab257f55b..d6ae826618efa 100755 --- a/tools/proto_format/proto_format.sh +++ b/tools/proto_format/proto_format.sh @@ -35,13 +35,13 @@ if [[ "$1" == "freeze" ]]; then fi # Invoke protoxform aspect. -bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_db_target=@envoy_api_canonical//versioning:active_protos ${FREEZE_ARG} \ - @envoy_api_canonical//versioning:active_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto +bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_db_target=@envoy_api//versioning:active_protos ${FREEZE_ARG} \ + @envoy_api//versioning:active_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto # Find all source protos. PROTO_TARGETS=() for proto_type in active frozen; do - protos=$(bazel query "labels(srcs, labels(deps, @envoy_api_canonical//versioning:${proto_type}_protos))") + protos=$(bazel query "labels(srcs, labels(deps, @envoy_api//versioning:${proto_type}_protos))") while read -r line; do PROTO_TARGETS+=("$line"); done \ <<< "$protos" done @@ -50,12 +50,11 @@ done TOOLS="$(dirname "$(dirname "$(realpath "$0")")")" # To satisfy dependency on api_proto_plugin. export PYTHONPATH="$TOOLS" -# Build protoprint and merge_active_shadow_tools for use in proto_sync.py. -bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint //tools/protoxform:merge_active_shadow +# Build protoprint for use in proto_sync.py. +bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint # Copy back the FileDescriptorProtos that protoxform emitted to the source tree. This involves -# pretty-printing to format with protoprint and potentially merging active/shadow versions of protos -# with merge_active_shadow. +# pretty-printing to format with protoprint. ./tools/proto_format/proto_sync.py "--mode=${PROTO_SYNC_CMD}" "${PROTO_TARGETS[@]}" --ci # Need to regenerate //versioning:active_protos before building type DB below if freezing. @@ -66,7 +65,3 @@ fi # Generate api/BUILD file based on updated type database. bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/type_whisperer:api_build_file cp -f bazel-bin/tools/type_whisperer/BUILD.api_build_file api/BUILD - -# Misc. manual copies to keep generated_api_shadow/ in sync with api/. -cp -f ./api/bazel/*.bzl ./api/bazel/BUILD ./generated_api_shadow/bazel -cp -f ./api/BUILD ./generated_api_shadow/ diff --git a/tools/proto_format/proto_sync.py b/tools/proto_format/proto_sync.py index b3ea6686b6be9..8e8e1bc4a5991 100755 --- a/tools/proto_format/proto_sync.py +++ b/tools/proto_format/proto_sync.py @@ -1,10 +1,6 @@ #!/usr/bin/env python3 # 1. Take protoxform artifacts from Bazel cache and pretty-print with protoprint.py. -# 2. In the case where we are generating an Envoy internal shadow, it may be -# necessary to combine the current active proto, subject to hand editing, with -# shadow artifacts from the previous version; this is done via -# merge_active_shadow.py. # 3. Diff or copy resulting artifacts to the source tree. import argparse @@ -218,31 +214,9 @@ def proto_print(src, dst): ]) -def merge_active_shadow(active_src, shadow_src, dst): - """Merge active/shadow FileDescriptorProto to a destination file. - - Args: - active_src: source path for active FileDescriptorProto. - shadow_src: source path for active FileDescriptorProto. - dst: destination path for FileDescriptorProto. - """ - print('merge_active_shadow %s' % dst) - subprocess.check_output([ - 'bazel-bin/tools/protoxform/merge_active_shadow', - active_src, - shadow_src, - dst, - ]) - - def sync_proto_file(dst_srcs): """Pretty-print a proto descriptor from protoxform.py Bazel cache artifacts." - In the case where we are generating an Envoy internal shadow, it may be - necessary to combine the current active proto, subject to hand editing, with - shadow artifacts from the previous verion; this is done via - merge_active_shadow(). - Args: dst_srcs: destination/sources path tuple. """ @@ -256,19 +230,8 @@ def sync_proto_file(dst_srcs): # We should only see an active and next major version candidate from # previous version today. assert (len(srcs) == 2) - shadow_srcs = [ - s for s in srcs if s.endswith('.next_major_version_candidate.envoy_internal.proto') - ] active_src = [s for s in srcs if s.endswith('active_or_frozen.proto')][0] - # If we're building the shadow, we need to combine the next major version - # candidate shadow with the potentially hand edited active version. - if len(shadow_srcs) > 0: - assert (len(shadow_srcs) == 1) - with tempfile.NamedTemporaryFile() as f: - merge_active_shadow(active_src, shadow_srcs[0], f.name) - proto_print(f.name, dst) - else: - proto_print(active_src, dst) + proto_print(active_src, dst) src = active_src rel_dst_path = get_destination_path(src) return ['//%s:pkg' % str(rel_dst_path.parent)] @@ -477,7 +440,7 @@ def should_sync(path, api_proto_modified_files, py_tools_modified_files): return False -def sync(api_root, mode, is_ci, labels, shadow): +def sync(api_root, mode, is_ci, labels): api_proto_modified_files = git_modified_files('api', 'proto') py_tools_modified_files = git_modified_files('tools', 'py') with tempfile.TemporaryDirectory() as tmp: @@ -487,8 +450,7 @@ def sync(api_root, mode, is_ci, labels, shadow): paths.append(utils.bazel_bin_path_for_output_artifact(label, '.active_or_frozen.proto')) paths.append( utils.bazel_bin_path_for_output_artifact( - label, '.next_major_version_candidate.envoy_internal.proto' - if shadow else '.next_major_version_candidate.proto')) + label, '.next_major_version_candidate.proto')) dst_src_paths = defaultdict(list) for path in paths: if os.path.exists(path) and os.stat(path).st_size > 0: @@ -556,10 +518,8 @@ def sync(api_root, mode, is_ci, labels, shadow): parser = argparse.ArgumentParser() parser.add_argument('--mode', choices=['check', 'fix']) parser.add_argument('--api_root', default='./api') - parser.add_argument('--api_shadow_root', default='./generated_api_shadow') parser.add_argument('--ci', action="store_true", default=False) parser.add_argument('labels', nargs='*') args = parser.parse_args() - sync(args.api_root, args.mode, args.ci, args.labels, False) - sync(args.api_shadow_root, args.mode, args.ci, args.labels, True) + sync(args.api_root, args.mode, args.ci, args.labels) diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 4eb0e6f5cf9ec..747a1a8c330ec 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -43,5 +43,5 @@ py_binary( protodoc_rule( name = "api_v3_protodoc", - deps = ["@envoy_api_canonical//:v3_protos"], + deps = ["@envoy_api//:v3_protos"], ) diff --git a/tools/protoxform/BUILD b/tools/protoxform/BUILD index 5473b9d31d24c..df7802802314f 100644 --- a/tools/protoxform/BUILD +++ b/tools/protoxform/BUILD @@ -1,38 +1,10 @@ -load("@rules_python//python:defs.bzl", "py_binary", "py_test") +load("@rules_python//python:defs.bzl", "py_binary") licenses(["notice"]) # Apache 2 -py_binary( - name = "merge_active_shadow", - srcs = [ - "merge_active_shadow.py", - "utils.py", - ], - deps = [ - "//tools/api_proto_plugin", - "//tools/type_whisperer:api_type_db_proto_py_proto", - "@com_envoyproxy_protoc_gen_validate//validate:validate_py", - "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", - "@com_google_googleapis//google/api:annotations_py_proto", - "@com_google_protobuf//:protobuf_python", - "@envoy_api_canonical//envoy/annotations:pkg_py_proto", - ], -) - -py_test( - name = "merge_active_shadow_test", - srcs = ["merge_active_shadow_test.py"], - deps = [ - ":merge_active_shadow", - "//tools/api_proto_plugin", - "@com_google_protobuf//:protobuf_python", - ], -) - py_binary( name = "protoxform", srcs = [ - "migrate.py", "options.py", "protoxform.py", "utils.py", @@ -44,7 +16,7 @@ py_binary( "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", "@com_google_googleapis//google/api:annotations_py_proto", - "@envoy_api_canonical//envoy/annotations:pkg_py_proto", + "@envoy_api//envoy/annotations:pkg_py_proto", ], ) @@ -69,6 +41,6 @@ py_binary( "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", "@com_google_googleapis//google/api:annotations_py_proto", "@com_google_protobuf//:protobuf_python", - "@envoy_api_canonical//envoy/annotations:pkg_py_proto", + "@envoy_api//envoy/annotations:pkg_py_proto", ], ) diff --git a/tools/protoxform/merge_active_shadow.py b/tools/protoxform/merge_active_shadow.py deleted file mode 100644 index fec1be4922647..0000000000000 --- a/tools/protoxform/merge_active_shadow.py +++ /dev/null @@ -1,239 +0,0 @@ -# Merge active and previous version's generated next major version candidate -# shadow. This involve simultaneously traversing both FileDescriptorProtos and: -# 1. Recovering hidden_envoy_deprecated_* fields and enum values in active proto. -# 2. Recovering deprecated (sub)message types. -# 3. Misc. fixups for oneof metadata and reserved ranges/names. - -from collections import defaultdict -import copy -import pathlib -import sys - -from tools.api_proto_plugin import type_context as api_type_context -from tools.protoxform import utils - -from google.protobuf import descriptor_pb2, text_format -from envoy.annotations import deprecation_pb2 - -PROTO_PACKAGES = ( - "google.api.annotations", "validate.validate", "envoy.annotations.deprecation", - "envoy.annotations.resource", "udpa.annotations.migrate", "udpa.annotations.security", - "udpa.annotations.status", "udpa.annotations.sensitive", "udpa.annotations.versioning") - - -# Set reserved_range in target_proto to reflect previous_reserved_range skipping -# skip_reserved_numbers. -def adjust_reserved_range(target_proto, previous_reserved_range, skip_reserved_numbers): - del target_proto.reserved_range[:] - for rr in previous_reserved_range: - # We can only handle singleton ranges today. - assert ((rr.start == rr.end) or (rr.end == rr.start + 1)) - if rr.start not in skip_reserved_numbers: - target_proto.reserved_range.add().MergeFrom(rr) - - -# Add dependencies for envoy.annotations.disallowed_by_default -def add_deprecation_dependencies(target_proto_dependencies, proto_field, is_enum): - if is_enum: - if proto_field.options.HasExtension(deprecation_pb2.disallowed_by_default_enum) and \ - "envoy/annotations/deprecation.proto" not in target_proto_dependencies: - target_proto_dependencies.append("envoy/annotations/deprecation.proto") - else: - if proto_field.options.HasExtension(deprecation_pb2.disallowed_by_default) and \ - "envoy/annotations/deprecation.proto" not in target_proto_dependencies: - target_proto_dependencies.append("envoy/annotations/deprecation.proto") - if proto_field.type_name == ".google.protobuf.Struct" and \ - "google/protobuf/struct.proto" not in target_proto_dependencies: - target_proto_dependencies.append("google/protobuf/struct.proto") - - -# Merge active/shadow EnumDescriptorProtos to a fresh target EnumDescriptorProto. -def merge_active_shadow_enum(active_proto, shadow_proto, target_proto, target_proto_dependencies): - target_proto.MergeFrom(active_proto) - if not shadow_proto: - return - shadow_values = {v.name: v for v in shadow_proto.value} - skip_reserved_numbers = [] - # For every reserved name, check to see if it's in the shadow, and if so, - # reintroduce in target_proto. - del target_proto.reserved_name[:] - for n in active_proto.reserved_name: - hidden_n = 'hidden_envoy_deprecated_' + n - if hidden_n in shadow_values: - v = shadow_values[hidden_n] - add_deprecation_dependencies(target_proto_dependencies, v, True) - skip_reserved_numbers.append(v.number) - target_proto.value.add().MergeFrom(v) - else: - target_proto.reserved_name.append(n) - adjust_reserved_range(target_proto, active_proto.reserved_range, skip_reserved_numbers) - # Special fixup for deprecation of default enum values. - for tv in target_proto.value: - if tv.name == 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE': - for sv in shadow_proto.value: - if sv.number == tv.number: - assert (sv.number == 0) - tv.CopyFrom(sv) - - -# Adjust source code info comments path to reflect insertions of oneof fields -# inside the middle of an existing collection of fields. -def adjust_source_code_info(type_context, field_index, field_adjustment): - - def has_path_prefix(s, t): - return len(s) <= len(t) and all(p[0] == p[1] for p in zip(s, t)) - - for loc in type_context.source_code_info.proto.location: - if has_path_prefix(type_context.path + [2], loc.path): - path_field_index = len(type_context.path) + 1 - if path_field_index < len(loc.path) and loc.path[path_field_index] >= field_index: - loc.path[path_field_index] += field_adjustment - - -# Merge active/shadow DescriptorProtos to a fresh target DescriptorProto. -def merge_active_shadow_message( - type_context, active_proto, shadow_proto, target_proto, target_proto_dependencies): - target_proto.MergeFrom(active_proto) - if not shadow_proto: - return - shadow_fields = {f.name: f for f in shadow_proto.field} - skip_reserved_numbers = [] - # For every reserved name, check to see if it's in the shadow, and if so, - # reintroduce in target_proto. We track both the normal fields we need to add - # back in (extra_simple_fields) and those that belong to oneofs - # (extra_oneof_fields). The latter require special treatment, as we can't just - # append them to the end of the message, they need to be reordered. - extra_simple_fields = [] - extra_oneof_fields = defaultdict(list) # oneof index -> list of fields - del target_proto.reserved_name[:] - for n in active_proto.reserved_name: - hidden_n = 'hidden_envoy_deprecated_' + n - if hidden_n in shadow_fields: - f = shadow_fields[hidden_n] - add_deprecation_dependencies(target_proto_dependencies, f, False) - skip_reserved_numbers.append(f.number) - missing_field = copy.deepcopy(f) - # oneof fields from the shadow need to have their index set to the - # corresponding index in active/target_proto. - if missing_field.HasField('oneof_index'): - oneof_name = shadow_proto.oneof_decl[missing_field.oneof_index].name - missing_oneof_index = None - for oneof_index, oneof_decl in enumerate(target_proto.oneof_decl): - if oneof_decl.name == oneof_name: - missing_oneof_index = oneof_index - if missing_oneof_index is None: - missing_oneof_index = len(target_proto.oneof_decl) - target_proto.oneof_decl.add().MergeFrom( - shadow_proto.oneof_decl[missing_field.oneof_index]) - missing_field.oneof_index = missing_oneof_index - extra_oneof_fields[missing_oneof_index].append(missing_field) - else: - extra_simple_fields.append(missing_field) - else: - target_proto.reserved_name.append(n) - # Copy existing fields, as we need to nuke them. - existing_fields = copy.deepcopy(target_proto.field) - del target_proto.field[:] - # Rebuild fields, taking into account extra_oneof_fields. protoprint.py - # expects that oneof fields are consecutive, so need to sort for this. - current_oneof_index = None - - def append_extra_oneof_fields(current_oneof_index, last_oneof_field_index): - # Add fields from extra_oneof_fields for current_oneof_index. - for oneof_f in extra_oneof_fields[current_oneof_index]: - target_proto.field.add().MergeFrom(oneof_f) - field_adjustment = len(extra_oneof_fields[current_oneof_index]) - # Fixup the comments in source code info. Note that this is really - # inefficient, O(N^2) in the worst case, but since we have relatively few - # deprecated fields, is the easiest to implement method. - if last_oneof_field_index is not None: - adjust_source_code_info(type_context, last_oneof_field_index, field_adjustment) - del extra_oneof_fields[current_oneof_index] - return field_adjustment - - field_index = 0 - for f in existing_fields: - if current_oneof_index is not None: - field_oneof_index = f.oneof_index if f.HasField('oneof_index') else None - # Are we exiting the oneof? If so, add the respective extra_one_fields. - if field_oneof_index != current_oneof_index: - field_index += append_extra_oneof_fields(current_oneof_index, field_index) - current_oneof_index = field_oneof_index - elif f.HasField('oneof_index'): - current_oneof_index = f.oneof_index - target_proto.field.add().MergeFrom(f) - field_index += 1 - if current_oneof_index is not None: - # No need to adjust source code info here, since there are no comments for - # trailing deprecated fields, so just set field index to None. - append_extra_oneof_fields(current_oneof_index, None) - # Non-oneof fields are easy to treat, we just append them to the existing - # fields. They don't get any comments, but that's fine in the generated - # shadows. - for f in extra_simple_fields: - target_proto.field.add().MergeFrom(f) - for oneof_index in sorted(extra_oneof_fields.keys()): - for f in extra_oneof_fields[oneof_index]: - target_proto.field.add().MergeFrom(f) - # Same is true for oneofs that are exclusively from the shadow. - adjust_reserved_range(target_proto, active_proto.reserved_range, skip_reserved_numbers) - # Visit nested message types - del target_proto.nested_type[:] - shadow_msgs = {msg.name: msg for msg in shadow_proto.nested_type} - for index, msg in enumerate(active_proto.nested_type): - merge_active_shadow_message( - type_context.extend_nested_message(index, msg.name, msg.options.deprecated), msg, - shadow_msgs.get(msg.name), target_proto.nested_type.add(), target_proto_dependencies) - # Visit nested enum types - del target_proto.enum_type[:] - shadow_enums = {msg.name: msg for msg in shadow_proto.enum_type} - for enum in active_proto.enum_type: - merge_active_shadow_enum( - enum, shadow_enums.get(enum.name), target_proto.enum_type.add(), - target_proto_dependencies) - # Ensure target has any deprecated sub-message types in case they are needed. - active_msg_names = set([msg.name for msg in active_proto.nested_type]) - for msg in shadow_proto.nested_type: - if msg.name not in active_msg_names: - target_proto.nested_type.add().MergeFrom(msg) - - -# Merge active/shadow FileDescriptorProtos, returning the resulting FileDescriptorProto. -def merge_active_shadow_file(active_file_proto, shadow_file_proto): - target_file_proto = copy.deepcopy(active_file_proto) - source_code_info = api_type_context.SourceCodeInfo( - target_file_proto.name, target_file_proto.source_code_info) - package_type_context = api_type_context.TypeContext(source_code_info, target_file_proto.package) - # Visit message types - del target_file_proto.message_type[:] - shadow_msgs = {msg.name: msg for msg in shadow_file_proto.message_type} - for index, msg in enumerate(active_file_proto.message_type): - merge_active_shadow_message( - package_type_context.extend_message(index, msg.name, msg.options.deprecated), msg, - shadow_msgs.get(msg.name), target_file_proto.message_type.add(), - target_file_proto.dependency) - # Visit enum types - del target_file_proto.enum_type[:] - shadow_enums = {msg.name: msg for msg in shadow_file_proto.enum_type} - for enum in active_file_proto.enum_type: - merge_active_shadow_enum( - enum, shadow_enums.get(enum.name), target_file_proto.enum_type.add(), - target_file_proto.dependency) - # Ensure target has any deprecated message types in case they are needed. - active_msg_names = set([msg.name for msg in active_file_proto.message_type]) - for msg in shadow_file_proto.message_type: - if msg.name not in active_msg_names: - target_file_proto.message_type.add().MergeFrom(msg) - return target_file_proto - - -if __name__ == '__main__': - active_src, shadow_src, dst = sys.argv[1:] - - utils.load_protos(PROTO_PACKAGES) - - active_proto = descriptor_pb2.FileDescriptorProto() - text_format.Merge(pathlib.Path(active_src).read_text(), active_proto) - shadow_proto = descriptor_pb2.FileDescriptorProto() - text_format.Merge(pathlib.Path(shadow_src).read_text(), shadow_proto) - pathlib.Path(dst).write_text(str(merge_active_shadow_file(active_proto, shadow_proto))) diff --git a/tools/protoxform/merge_active_shadow_test.py b/tools/protoxform/merge_active_shadow_test.py deleted file mode 100644 index c15af85918828..0000000000000 --- a/tools/protoxform/merge_active_shadow_test.py +++ /dev/null @@ -1,590 +0,0 @@ -import unittest - -import merge_active_shadow - -from tools.api_proto_plugin import type_context as api_type_context -from tools.protoxform import utils - -from google.protobuf import descriptor_pb2 -from google.protobuf import text_format - - -class MergeActiveShadowTest(unittest.TestCase): - # Dummy type context for tests that don't care about this. - def fake_type_context(self): - fake_source_code_info = descriptor_pb2.SourceCodeInfo() - source_code_info = api_type_context.SourceCodeInfo('fake', fake_source_code_info) - return api_type_context.TypeContext(source_code_info, 'fake_package') - - # Poor man's text proto equivalence. Tensorflow has better tools for this, - # i.e. assertProto2Equal. - def assert_text_proto_eq(self, lhs, rhs): - self.assertMultiLineEqual(lhs.strip(), rhs.strip()) - - def testadjust_reserved_range(self): - """adjust_reserved_range removes specified skip_reserved_numbers.""" - desc_pb_text = """ -reserved_range { - start: 41 - end: 41 -} -reserved_range { - start: 42 - end: 42 -} -reserved_range { - start: 43 - end: 44 -} -reserved_range { - start: 50 - end: 51 -} - """ - desc = descriptor_pb2.DescriptorProto() - text_format.Merge(desc_pb_text, desc) - target = descriptor_pb2.DescriptorProto() - merge_active_shadow.adjust_reserved_range(target, desc.reserved_range, [42, 43]) - target_pb_text = """ -reserved_range { - start: 41 - end: 41 -} -reserved_range { - start: 50 - end: 51 -} - """ - self.assert_text_proto_eq(target_pb_text, str(target)) - - def testmerge_active_shadow_enum(self): - """merge_active_shadow_enum recovers shadow values.""" - active_pb_text = """ -value { - number: 1 - name: "foo" -} -value { - number: 0 - name: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE" -} -value { - number: 3 - name: "bar" -} -reserved_name: "baz" -reserved_range { - start: 2 - end: 3 -} - """ - active_proto = descriptor_pb2.EnumDescriptorProto() - text_format.Merge(active_pb_text, active_proto) - shadow_pb_text = """ -value { - number: 1 - name: "foo" -} -value { - number: 0 - name: "wow" -} -value { - number: 3 - name: "bar" -} -value { - number: 2 - name: "hidden_envoy_deprecated_baz" -} -value { - number: 4 - name: "hidden_envoy_deprecated_huh" -} - """ - shadow_proto = descriptor_pb2.EnumDescriptorProto() - text_format.Merge(shadow_pb_text, shadow_proto) - target_proto = descriptor_pb2.EnumDescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_enum( - active_proto, shadow_proto, target_proto, target_proto_dependencies) - target_pb_text = """ -value { - name: "foo" - number: 1 -} -value { - name: "wow" - number: 0 -} -value { - name: "bar" - number: 3 -} -value { - name: "hidden_envoy_deprecated_baz" - number: 2 -} - """ - self.assert_text_proto_eq(target_pb_text, str(target_proto)) - - def testmerge_active_shadow_message_comments(self): - """merge_active_shadow_message preserves comment field correspondence.""" - active_pb_text = """ -field { - number: 9 - name: "oneof_1_0" - oneof_index: 0 -} -field { - number: 1 - name: "simple_field_0" -} -field { - number: 0 - name: "oneof_2_0" - oneof_index: 2 -} -field { - number: 8 - name: "oneof_2_1" - oneof_index: 2 -} -field { - number: 3 - name: "oneof_0_0" - oneof_index: 1 -} -field { - number: 4 - name: "newbie" -} -field { - number: 7 - name: "oneof_3_0" - oneof_index: 3 -} -reserved_name: "missing_oneof_field_0" -reserved_name: "missing_oneof_field_1" -reserved_name: "missing_oneof_field_2" -oneof_decl { - name: "oneof_0" -} -oneof_decl { - name: "oneof_1" -} -oneof_decl { - name: "oneof_2" -} -oneof_decl { - name: "oneof_3" -} - """ - active_proto = descriptor_pb2.DescriptorProto() - text_format.Merge(active_pb_text, active_proto) - active_source_code_info_text = """ -location { - path: [4, 1, 2, 4] - leading_comments: "field_4" -} -location { - path: [4, 1, 2, 5] - leading_comments: "field_5" -} -location { - path: [4, 1, 2, 3] - leading_comments: "field_3" -} -location { - path: [4, 1, 2, 0] - leading_comments: "field_0" -} -location { - path: [4, 1, 2, 1] - leading_comments: "field_1" -} -location { - path: [4, 0, 2, 2] - leading_comments: "ignore_0" -} -location { - path: [4, 1, 2, 6] - leading_comments: "field_6" -} -location { - path: [4, 1, 2, 2] - leading_comments: "field_2" -} -location { - path: [3] - leading_comments: "ignore_1" -} -""" - active_source_code_info = descriptor_pb2.SourceCodeInfo() - text_format.Merge(active_source_code_info_text, active_source_code_info) - shadow_pb_text = """ -field { - number: 10 - name: "hidden_envoy_deprecated_missing_oneof_field_0" - oneof_index: 0 -} -field { - number: 11 - name: "hidden_envoy_deprecated_missing_oneof_field_1" - oneof_index: 3 -} -field { - number: 11 - name: "hidden_envoy_deprecated_missing_oneof_field_2" - oneof_index: 2 -} -oneof_decl { - name: "oneof_0" -} -oneof_decl { - name: "oneof_1" -} -oneof_decl { - name: "oneof_2" -} -oneof_decl { - name: "some_removed_oneof" -} -oneof_decl { - name: "oneof_3" -} -""" - shadow_proto = descriptor_pb2.DescriptorProto() - text_format.Merge(shadow_pb_text, shadow_proto) - target_proto = descriptor_pb2.DescriptorProto() - source_code_info = api_type_context.SourceCodeInfo('fake', active_source_code_info) - fake_type_context = api_type_context.TypeContext(source_code_info, 'fake_package') - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - fake_type_context.extend_message(1, "foo", False), active_proto, shadow_proto, - target_proto, target_proto_dependencies) - target_pb_text = """ -field { - name: "oneof_1_0" - number: 9 - oneof_index: 0 -} -field { - name: "hidden_envoy_deprecated_missing_oneof_field_0" - number: 10 - oneof_index: 0 -} -field { - name: "simple_field_0" - number: 1 -} -field { - name: "oneof_2_0" - number: 0 - oneof_index: 2 -} -field { - name: "oneof_2_1" - number: 8 - oneof_index: 2 -} -field { - name: "hidden_envoy_deprecated_missing_oneof_field_2" - number: 11 - oneof_index: 2 -} -field { - name: "oneof_0_0" - number: 3 - oneof_index: 1 -} -field { - name: "newbie" - number: 4 -} -field { - name: "oneof_3_0" - number: 7 - oneof_index: 3 -} -field { - name: "hidden_envoy_deprecated_missing_oneof_field_1" - number: 11 - oneof_index: 4 -} -oneof_decl { - name: "oneof_0" -} -oneof_decl { - name: "oneof_1" -} -oneof_decl { - name: "oneof_2" -} -oneof_decl { - name: "oneof_3" -} -oneof_decl { - name: "some_removed_oneof" -} - """ - target_source_code_info_text = """ -location { - path: 4 - path: 1 - path: 2 - path: 6 - leading_comments: "field_4" -} -location { - path: 4 - path: 1 - path: 2 - path: 7 - leading_comments: "field_5" -} -location { - path: 4 - path: 1 - path: 2 - path: 4 - leading_comments: "field_3" -} -location { - path: 4 - path: 1 - path: 2 - path: 0 - leading_comments: "field_0" -} -location { - path: 4 - path: 1 - path: 2 - path: 2 - leading_comments: "field_1" -} -location { - path: 4 - path: 0 - path: 2 - path: 2 - leading_comments: "ignore_0" -} -location { - path: 4 - path: 1 - path: 2 - path: 8 - leading_comments: "field_6" -} -location { - path: 4 - path: 1 - path: 2 - path: 3 - leading_comments: "field_2" -} -location { - path: 3 - leading_comments: "ignore_1" -} -""" - self.maxDiff = None - self.assert_text_proto_eq(target_pb_text, str(target_proto)) - self.assert_text_proto_eq( - target_source_code_info_text, str(fake_type_context.source_code_info.proto)) - - def testmerge_active_shadow_message(self): - """merge_active_shadow_message recovers shadow fields with oneofs.""" - active_pb_text = """ -field { - number: 1 - name: "foo" -} -field { - number: 0 - name: "bar" - oneof_index: 2 -} -field { - number: 3 - name: "baz" -} -field { - number: 4 - name: "newbie" -} -reserved_name: "wow" -reserved_range { - start: 2 - end: 3 -} -oneof_decl { - name: "ign" -} -oneof_decl { - name: "ign2" -} -oneof_decl { - name: "some_oneof" -} - """ - active_proto = descriptor_pb2.DescriptorProto() - text_format.Merge(active_pb_text, active_proto) - shadow_pb_text = """ -field { - number: 1 - name: "foo" -} -field { - number: 0 - name: "bar" -} -field { - number: 3 - name: "baz" -} -field { - number: 5 - name: "hidden_envoy_deprecated_wow" - options { - deprecated: true - [validate.rules] { - string { - max_bytes: 1024 - } - } - [envoy.annotations.disallowed_by_default]: true - } - oneof_index: 0 -} -oneof_decl { - name: "some_oneof" -} - """ - shadow_proto = descriptor_pb2.DescriptorProto() - text_format.Merge(shadow_pb_text, shadow_proto) - target_proto = descriptor_pb2.DescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - self.fake_type_context(), active_proto, shadow_proto, target_proto, - target_proto_dependencies) - target_pb_text = """ -field { - name: "foo" - number: 1 -} -field { - name: "bar" - number: 0 - oneof_index: 2 -} -field { - name: "hidden_envoy_deprecated_wow" - number: 5 - options { - deprecated: true - [validate.rules] { - string { - max_bytes: 1024 - } - } - [envoy.annotations.disallowed_by_default]: true - } - oneof_index: 2 -} -field { - name: "baz" - number: 3 -} -field { - name: "newbie" - number: 4 -} -oneof_decl { - name: "ign" -} -oneof_decl { - name: "ign2" -} -oneof_decl { - name: "some_oneof" -} -reserved_range { - start: 2 - end: 3 -} - """ - self.assert_text_proto_eq(target_pb_text, str(target_proto)) - self.assertEqual(target_proto_dependencies[0], 'envoy/annotations/deprecation.proto') - - def testmerge_active_shadow_message_no_shadow_message(self): - """merge_active_shadow_message doesn't require a shadow message for new nested active messages.""" - active_proto = descriptor_pb2.DescriptorProto() - shadow_proto = descriptor_pb2.DescriptorProto() - active_proto.nested_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - self.fake_type_context(), active_proto, shadow_proto, target_proto, - target_proto_dependencies) - self.assertEqual(target_proto.nested_type[0].name, 'foo') - - def testmerge_active_shadow_message_no_shadow_enum(self): - """merge_active_shadow_message doesn't require a shadow enum for new nested active enums.""" - active_proto = descriptor_pb2.DescriptorProto() - shadow_proto = descriptor_pb2.DescriptorProto() - active_proto.enum_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - self.fake_type_context(), active_proto, shadow_proto, target_proto, - target_proto_dependencies) - self.assertEqual(target_proto.enum_type[0].name, 'foo') - - def testmerge_active_shadow_message_missing(self): - """merge_active_shadow_message recovers missing messages from shadow.""" - active_proto = descriptor_pb2.DescriptorProto() - shadow_proto = descriptor_pb2.DescriptorProto() - shadow_proto.nested_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto_dependencies = [] - merge_active_shadow.merge_active_shadow_message( - self.fake_type_context(), active_proto, shadow_proto, target_proto, - target_proto_dependencies) - self.assertEqual(target_proto.nested_type[0].name, 'foo') - - def testmerge_active_shadow_file_missing(self): - """merge_active_shadow_file recovers missing messages from shadow.""" - active_proto = descriptor_pb2.FileDescriptorProto() - shadow_proto = descriptor_pb2.FileDescriptorProto() - shadow_proto.message_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto = merge_active_shadow.merge_active_shadow_file(active_proto, shadow_proto) - self.assertEqual(target_proto.message_type[0].name, 'foo') - - def testmerge_active_shadow_file_no_shadow_message(self): - """merge_active_shadow_file doesn't require a shadow message for new active messages.""" - active_proto = descriptor_pb2.FileDescriptorProto() - shadow_proto = descriptor_pb2.FileDescriptorProto() - active_proto.message_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto = merge_active_shadow.merge_active_shadow_file(active_proto, shadow_proto) - self.assertEqual(target_proto.message_type[0].name, 'foo') - - def testmerge_active_shadow_file_no_shadow_enum(self): - """merge_active_shadow_file doesn't require a shadow enum for new active enums.""" - active_proto = descriptor_pb2.FileDescriptorProto() - shadow_proto = descriptor_pb2.FileDescriptorProto() - active_proto.enum_type.add().name = 'foo' - target_proto = descriptor_pb2.DescriptorProto() - target_proto = merge_active_shadow.merge_active_shadow_file(active_proto, shadow_proto) - self.assertEqual(target_proto.enum_type[0].name, 'foo') - - -# TODO(htuch): add some test for recursion. - -if __name__ == '__main__': - utils.load_protos(merge_active_shadow.PROTO_PACKAGES) - unittest.main() diff --git a/tools/protoxform/migrate.py b/tools/protoxform/migrate.py deleted file mode 100644 index 0ab0808abc502..0000000000000 --- a/tools/protoxform/migrate.py +++ /dev/null @@ -1,276 +0,0 @@ -# API upgrade business logic. - -import copy -import re - -from tools.api_proto_plugin import traverse -from tools.api_proto_plugin import visitor -from tools.protoxform import options -from tools.protoxform import utils - -from envoy_api_canonical.envoy.annotations import resource_pb2 -from udpa.annotations import migrate_pb2 -from udpa.annotations import status_pb2 -from google.api import annotations_pb2 - -ENVOY_API_TYPE_REGEX_STR = 'envoy_api_(msg|enum_value|field|enum)_([\w\.]+)' -ENVOY_COMMENT_WITH_TYPE_REGEX = re.compile( - '<%s>|:ref:`%s`' % (ENVOY_API_TYPE_REGEX_STR, ENVOY_API_TYPE_REGEX_STR)) - - -class UpgradeVisitor(visitor.Visitor): - """Visitor to generate an upgraded proto from a FileDescriptor proto. - - See visitor.Visitor for visitor method docs comments. - """ - - def __init__(self, n, typedb, envoy_internal_shadow, package_version_status): - self._base_version = n - self._typedb = typedb - self._envoy_internal_shadow = envoy_internal_shadow - self._package_version_status = package_version_status - - def _upgraded_comment(self, c): - - def upgrade_type(match): - # We're upgrading a type within a RST anchor reference here. These are - # stylized and match the output format of tools/protodoc. We need to do - # some special handling of field/enum values, and also the normalization - # that was performed in v2 for envoy.api.v2 types. - label_ref_type, label_normalized_type_name, section_ref_type, section_normalized_type_name = match.groups( - ) - if label_ref_type is not None: - ref_type = label_ref_type - normalized_type_name = label_normalized_type_name - else: - ref_type = section_ref_type - normalized_type_name = section_normalized_type_name - if ref_type == 'field' or ref_type == 'enum_value': - normalized_type_name, residual = normalized_type_name.rsplit('.', 1) - else: - residual = '' - type_name = 'envoy.' + normalized_type_name - api_v2_type_name = 'envoy.api.v2.' + normalized_type_name - if type_name in self._typedb.types: - type_desc = self._typedb.types[type_name] - else: - # We need to deal with envoy.api.* normalization in the v2 API. We won't - # need this in v3+, so rather than churn docs, we just have this workaround. - type_desc = self._typedb.types[api_v2_type_name] - repl_type = type_desc.next_version_type_name[ - len('envoy.'):] if type_desc.next_version_type_name else normalized_type_name - # TODO(htuch): this should really either go through the type database or - # via the descriptor pool and annotations, but there are only two of these - # we need for the initial v2 -> v3 docs cut, so hard coding for now. - # Tracked at https://github.com/envoyproxy/envoy/issues/9734. - if repl_type == 'config.route.v3.RouteAction': - if residual == 'host_rewrite': - residual = 'host_rewrite_literal' - elif residual == 'auto_host_rewrite_header': - residual = 'auto_host_rewrite' - new_ref = 'envoy_api_%s_%s%s' % ( - ref_type, repl_type, '.' + residual if residual else '') - if label_ref_type is not None: - return '<%s>' % new_ref - else: - return ':ref:`%s`' % new_ref - - return re.sub(ENVOY_COMMENT_WITH_TYPE_REGEX, upgrade_type, c) - - def _upgraded_post_method(self, m): - return re.sub(r'^/v%d/' % self._base_version, '/v%d/' % (self._base_version + 1), m) - - # Upgraded type using canonical type naming, e.g. foo.bar. - def _upgraded_type_canonical(self, t): - if not t.startswith('envoy'): - return t - type_desc = self._typedb.types[t] - if type_desc.next_version_type_name: - return type_desc.next_version_type_name - return t - - # Upgraded type using internal type naming, e.g. .foo.bar. - def _upgraded_type(self, t): - if not t.startswith('.envoy'): - return t - return '.' + self._upgraded_type_canonical(t[1:]) - - def _deprecate(self, proto, field_or_value): - """Deprecate a field or value in a message/enum proto. - - Args: - proto: DescriptorProto or EnumDescriptorProto message. - field_or_value: field or value inside proto. - """ - if self._envoy_internal_shadow: - field_or_value.name = 'hidden_envoy_deprecated_' + field_or_value.name - else: - reserved = proto.reserved_range.add() - reserved.start = field_or_value.number - reserved.end = field_or_value.number + 1 - proto.reserved_name.append(field_or_value.name) - options.add_hide_option(field_or_value.options) - - def _rename(self, proto, migrate_annotation): - """Rename a field/enum/service/message - - Args: - proto: DescriptorProto or corresponding proto message - migrate_annotation: udpa.annotations.MigrateAnnotation message - """ - if migrate_annotation.rename: - proto.name = migrate_annotation.rename - migrate_annotation.rename = "" - - def _oneof_promotion(self, msg_proto, field_proto, migrate_annotation): - """Promote a field to a oneof. - - Args: - msg_proto: DescriptorProto for message containing field. - field_proto: FieldDescriptorProto for field. - migrate_annotation: udpa.annotations.FieldMigrateAnnotation message - """ - if migrate_annotation.oneof_promotion: - oneof_index = -1 - for n, oneof_decl in enumerate(msg_proto.oneof_decl): - if oneof_decl.name == migrate_annotation.oneof_promotion: - oneof_index = n - if oneof_index == -1: - oneof_index = len(msg_proto.oneof_decl) - oneof_decl = msg_proto.oneof_decl.add() - oneof_decl.name = migrate_annotation.oneof_promotion - field_proto.oneof_index = oneof_index - migrate_annotation.oneof_promotion = "" - - def visit_service(self, service_proto, type_context): - upgraded_proto = copy.deepcopy(service_proto) - for m in upgraded_proto.method: - if m.options.HasExtension(annotations_pb2.http): - http_options = m.options.Extensions[annotations_pb2.http] - # TODO(htuch): figure out a more systematic approach using the type DB - # to service upgrade. - http_options.post = self._upgraded_post_method(http_options.post) - m.input_type = self._upgraded_type(m.input_type) - m.output_type = self._upgraded_type(m.output_type) - if service_proto.options.HasExtension(resource_pb2.resource): - upgraded_proto.options.Extensions[ - resource_pb2.resource].type = self._upgraded_type_canonical( - service_proto.options.Extensions[resource_pb2.resource].type) - return upgraded_proto - - def visit_message(self, msg_proto, type_context, nested_msgs, nested_enums): - upgraded_proto = copy.deepcopy(msg_proto) - if upgraded_proto.options.deprecated and not self._envoy_internal_shadow: - options.add_hide_option(upgraded_proto.options) - options.set_versioning_annotation(upgraded_proto.options, type_context.name) - # Mark deprecated fields as ready for deletion by protoxform. - for f in upgraded_proto.field: - if f.options.deprecated: - self._deprecate(upgraded_proto, f) - if self._envoy_internal_shadow: - # When shadowing, we use the upgraded version of types (which should - # themselves also be shadowed), to allow us to avoid unnecessary - # references to the previous version (and complexities around - # upgrading during API boosting). - f.type_name = self._upgraded_type(f.type_name) - else: - # Make sure the type name is erased so it isn't picked up by protoxform - # when computing deps. - f.type_name = "" - else: - f.type_name = self._upgraded_type(f.type_name) - if f.options.HasExtension(migrate_pb2.field_migrate): - field_migrate = f.options.Extensions[migrate_pb2.field_migrate] - self._rename(f, field_migrate) - self._oneof_promotion(upgraded_proto, f, field_migrate) - # Upgrade nested messages. - del upgraded_proto.nested_type[:] - upgraded_proto.nested_type.extend(nested_msgs) - # Upgrade enums. - del upgraded_proto.enum_type[:] - upgraded_proto.enum_type.extend(nested_enums) - return upgraded_proto - - def visit_enum(self, enum_proto, type_context): - upgraded_proto = copy.deepcopy(enum_proto) - if upgraded_proto.options.deprecated and not self._envoy_internal_shadow: - options.add_hide_option(upgraded_proto.options) - for v in upgraded_proto.value: - if v.options.deprecated: - # We need special handling for the zero field, as proto3 needs some value - # here. - if v.number == 0 and not self._envoy_internal_shadow: - v.name = 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE' - else: - # Mark deprecated enum values as ready for deletion by protoxform. - self._deprecate(upgraded_proto, v) - elif v.options.HasExtension(migrate_pb2.enum_value_migrate): - self._rename(v, v.options.Extensions[migrate_pb2.enum_value_migrate]) - return upgraded_proto - - def visit_file(self, file_proto, type_context, services, msgs, enums): - upgraded_proto = copy.deepcopy(file_proto) - # Upgrade imports. - upgraded_proto.dependency[:] = [ - dependency for dependency in upgraded_proto.dependency - if dependency not in ("udpa/annotations/migrate.proto") - ] - # Upgrade package. - upgraded_proto.package = self._typedb.next_version_protos[ - upgraded_proto.name].qualified_package - upgraded_proto.name = self._typedb.next_version_protos[upgraded_proto.name].proto_path - upgraded_proto.options.ClearExtension(migrate_pb2.file_migrate) - upgraded_proto.options.Extensions[ - status_pb2.file_status].package_version_status = self._package_version_status - # Upgrade comments. - for location in upgraded_proto.source_code_info.location: - location.leading_comments = self._upgraded_comment(location.leading_comments) - location.trailing_comments = self._upgraded_comment(location.trailing_comments) - for n, c in enumerate(location.leading_detached_comments): - location.leading_detached_comments[n] = self._upgraded_comment(c) - # Upgrade services. - del upgraded_proto.service[:] - upgraded_proto.service.extend(services) - # Upgrade messages. - del upgraded_proto.message_type[:] - upgraded_proto.message_type.extend(msgs) - # Upgrade enums. - del upgraded_proto.enum_type[:] - upgraded_proto.enum_type.extend(enums) - - return upgraded_proto - - -def version_upgrade_xform(n, envoy_internal_shadow, file_proto, params): - """Transform a FileDescriptorProto from vN[alpha\d] to v(N+1). - - Args: - n: version N to upgrade from. - envoy_internal_shadow: generate a shadow for Envoy internal use containing deprecated fields. - file_proto: vN[alpha\d] FileDescriptorProto message. - params: plugin parameters. - - Returns: - v(N+1) FileDescriptorProto message. - """ - # Load type database. - if params['type_db_path']: - utils.load_type_db(params['type_db_path']) - typedb = utils.get_type_db() - # If this isn't a proto in an upgraded package, return None. - if file_proto.name not in typedb.next_version_protos or not typedb.next_version_protos[ - file_proto.name]: - return None - # Otherwise, this .proto needs upgrading, do it. - freeze = 'extra_args' in params and params['extra_args'] == 'freeze' - existing_pkg_version_status = file_proto.options.Extensions[ - status_pb2.file_status].package_version_status - # Normally, we are generating the NEXT_MAJOR_VERSION_CANDIDATE. However, if - # freezing and previously this was the active major version, the migrated - # version is now the ACTIVE version. - if freeze and existing_pkg_version_status == status_pb2.ACTIVE: - package_version_status = status_pb2.ACTIVE - else: - package_version_status = status_pb2.NEXT_MAJOR_VERSION_CANDIDATE - return traverse.traverse_file( - file_proto, UpgradeVisitor(n, typedb, envoy_internal_shadow, package_version_status)) diff --git a/tools/protoxform/protoxform.bzl b/tools/protoxform/protoxform.bzl index abdbac95b3963..0d3c32b6e9b2e 100644 --- a/tools/protoxform/protoxform.bzl +++ b/tools/protoxform/protoxform.bzl @@ -6,11 +6,7 @@ def _protoxform_impl(target, ctx): ctx, "proto", "protoxform", - [ - ".active_or_frozen.proto", - ".next_major_version_candidate.proto", - ".next_major_version_candidate.envoy_internal.proto", - ], + [".active_or_frozen.proto"], ) # Bazel aspect (https://docs.bazel.build/versions/master/starlark/aspects.html) diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py index 9e370dcc0c824..54eb431ae38e1 100755 --- a/tools/protoxform/protoxform.py +++ b/tools/protoxform/protoxform.py @@ -8,15 +8,14 @@ import functools from tools.api_proto_plugin import plugin, visitor -from tools.protoxform import migrate, utils +from tools.protoxform import utils from udpa.annotations import status_pb2 PROTO_PACKAGES = ( - "google.api.annotations", "validate.validate", - "envoy_api_canonical.envoy.annotations.deprecation", - "envoy_api_canonical.envoy.annotations.resource", "udpa.annotations.migrate", - "udpa.annotations.security", "udpa.annotations.status", "udpa.annotations.sensitive") + "google.api.annotations", "validate.validate", "envoy_api.envoy.annotations.deprecation", + "envoy_api.envoy.annotations.resource", "udpa.annotations.migrate", "udpa.annotations.security", + "udpa.annotations.status", "udpa.annotations.sensitive", "udpa.annotations.versioning") class ProtoXformError(Exception): @@ -56,8 +55,6 @@ def visit_file(self, file_proto, type_context, services, msgs, enums): if existing_pkg_version_status == status_pb2.UNKNOWN and not pkg_version_status_exempt: raise ProtoXformError('package_version_status must be set in %s' % file_proto.name) # Only update package_version_status for .active_or_frozen.proto, - # migrate.version_upgrade_xform has taken care of next major version - # candidates. if self._active_or_frozen and not pkg_version_status_exempt: # Freeze if this is an active package with a next major version. Preserve # frozen status otherwise. @@ -81,16 +78,6 @@ def main(): '.active_or_frozen.proto', functools.partial(ProtoFormatVisitor, True), want_params=True), - plugin.OutputDescriptor( - '.next_major_version_candidate.proto', - functools.partial(ProtoFormatVisitor, False), - functools.partial(migrate.version_upgrade_xform, 2, False), - want_params=True), - plugin.OutputDescriptor( - '.next_major_version_candidate.envoy_internal.proto', - functools.partial(ProtoFormatVisitor, False), - functools.partial(migrate.version_upgrade_xform, 2, True), - want_params=True) ]) diff --git a/tools/protoxform/protoxform_test.sh b/tools/protoxform/protoxform_test.sh index 69cbc859aa520..0e6dad6960718 100755 --- a/tools/protoxform/protoxform_test.sh +++ b/tools/protoxform/protoxform_test.sh @@ -19,14 +19,3 @@ bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_ //tools/testdata/protoxform:fix_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint ./tools/protoxform/protoxform_test_helper.py fix "${PROTO_TARGETS[@]}" - -# protoxform freeze test cases -PROTO_TARGETS=() -protos=$(bazel query "labels(srcs, labels(deps, //tools/testdata/protoxform:freeze_protos))") -while read -r line; do PROTO_TARGETS+=("$line"); done \ - <<< "$protos" -bazel build "${BAZEL_BUILD_OPTIONS[@]}" --//tools/api_proto_plugin:default_type_db_target=//tools/testdata/protoxform:freeze_protos \ - --//tools/api_proto_plugin:extra_args=freeze \ - //tools/testdata/protoxform:freeze_protos --aspects //tools/protoxform:protoxform.bzl%protoxform_aspect --output_groups=proto -bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/protoxform:protoprint -./tools/protoxform/protoxform_test_helper.py freeze "${PROTO_TARGETS[@]}" diff --git a/tools/protoxform/protoxform_test_helper.py b/tools/protoxform/protoxform_test_helper.py index b36134ce35208..c5356ed24a3f3 100755 --- a/tools/protoxform/protoxform_test_helper.py +++ b/tools/protoxform/protoxform_test_helper.py @@ -134,8 +134,6 @@ def run(cmd, path, filename, version): for target in sys.argv[2:]: path, filename = path_and_filename(target) messages += run(cmd, path, filename, 'active_or_frozen') - messages += run(cmd, path, filename, 'next_major_version_candidate') - messages += run(cmd, path, filename, 'next_major_version_candidate.envoy_internal') if len(messages) == 0: logging.warning("PASS") diff --git a/tools/testdata/protoxform/BUILD b/tools/testdata/protoxform/BUILD index 6769f453f6ff7..b9e228605acd6 100644 --- a/tools/testdata/protoxform/BUILD +++ b/tools/testdata/protoxform/BUILD @@ -9,14 +9,3 @@ proto_library( "//tools/testdata/protoxform/envoy/v2:fix_protos", ], ) - -proto_library( - name = "freeze_protos", - visibility = ["//visibility:public"], - deps = [ - "//tools/testdata/protoxform/envoy/active_non_terminal/v2:freeze_protos", - "//tools/testdata/protoxform/envoy/active_terminal/v2:freeze_protos", - "//tools/testdata/protoxform/envoy/frozen/v2:freeze_protos", - "//tools/testdata/protoxform/envoy/frozen/v3:freeze_protos", - ], -) diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD deleted file mode 100644 index 3031a25f6bb62..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "freeze_protos", - srcs = ["active_non_terminal.proto"], - visibility = ["//visibility:public"], - deps = [ - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@envoy_api//envoy/annotations:pkg", - ], -) diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto deleted file mode 100644 index 241084d62a6ed..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; - -package envoy.active_non_terminal.v2; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveNonTerminal { - int32 foo = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - int32 bar = 2; -} diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold deleted file mode 100644 index 50b6993f398fa..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.active_or_frozen.gold +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package envoy.active_non_terminal.v2; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.active_non_terminal.v2"; -option java_outer_classname = "ActiveNonTerminalProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message ActiveNonTerminal { - int32 foo = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - int32 bar = 2; -} diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index 34e9f4d7e6eaa..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.active_non_terminal.v3; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.active_non_terminal.v3"; -option java_outer_classname = "ActiveNonTerminalProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveNonTerminal { - option (udpa.annotations.versioning).previous_message_type = - "envoy.active_non_terminal.v2.ActiveNonTerminal"; - - int32 hidden_envoy_deprecated_foo = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - int32 bar = 2; -} diff --git a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold deleted file mode 100644 index 5d369aefd96df..0000000000000 --- a/tools/testdata/protoxform/envoy/active_non_terminal/v2/active_non_terminal.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.active_non_terminal.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.active_non_terminal.v3"; -option java_outer_classname = "ActiveNonTerminalProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveNonTerminal { - option (udpa.annotations.versioning).previous_message_type = - "envoy.active_non_terminal.v2.ActiveNonTerminal"; - - reserved 1; - - reserved "foo"; - - int32 bar = 2; -} diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD b/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD deleted file mode 100644 index db6244be9a36f..0000000000000 --- a/tools/testdata/protoxform/envoy/active_terminal/v2/BUILD +++ /dev/null @@ -1,10 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "freeze_protos", - srcs = ["active_terminal.proto"], - visibility = ["//visibility:public"], - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto deleted file mode 100644 index 1c5bdaca36832..0000000000000 --- a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto +++ /dev/null @@ -1,11 +0,0 @@ -syntax = "proto3"; - -package envoy.active_terminal.v2; - -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveTerminal { - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold deleted file mode 100644 index 5e49be1e63b4e..0000000000000 --- a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.active_or_frozen.gold +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package envoy.active_terminal.v2; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.active_terminal.v2"; -option java_outer_classname = "ActiveTerminalProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message ActiveTerminal { - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/active_terminal/v2/active_terminal.proto.next_major_version_candidate.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/frozen/v2/BUILD b/tools/testdata/protoxform/envoy/frozen/v2/BUILD deleted file mode 100644 index a8556bddea134..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "freeze_protos", - srcs = [ - "frozen.proto", - "frozen_versioned_deprecation.proto", - ], - visibility = ["//visibility:public"], - deps = [ - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@envoy_api//envoy/annotations:pkg", - ], -) diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto deleted file mode 100644 index defe7ff3eac40..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v2; - -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message Frozen { - int32 foo = 1; - int32 bar = 2 [deprecated = true]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold deleted file mode 100644 index 5086376ee4354..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.active_or_frozen.gold +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v2; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v2"; -option java_outer_classname = "FrozenProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message Frozen { - int32 foo = 1; - - int32 bar = 2 [deprecated = true]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index f67c7f33a3783..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message Frozen { - option (udpa.annotations.versioning).previous_message_type = "envoy.frozen.v2.Frozen"; - - int32 foo = 1; - - int32 hidden_envoy_deprecated_bar = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold deleted file mode 100644 index 7c10c1313b27c..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message Frozen { - option (udpa.annotations.versioning).previous_message_type = "envoy.frozen.v2.Frozen"; - - reserved 2; - - reserved "bar"; - - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto b/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto deleted file mode 100644 index bf5e1efa73014..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v2; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message FrozenVersionedDeprecation { - int32 foo = 1; - int32 bar = 2 [deprecated = true]; - int32 baz = 3 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.5"]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.active_or_frozen.gold deleted file mode 100644 index 521f4c3eda9a9..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.active_or_frozen.gold +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v2; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v2"; -option java_outer_classname = "FrozenVersionedDeprecationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = FROZEN; - -message FrozenVersionedDeprecation { - int32 foo = 1; - - int32 bar = 2 [deprecated = true]; - - int32 baz = 3 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.5"]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e82dc9bb6cee9..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenVersionedDeprecationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message FrozenVersionedDeprecation { - option (udpa.annotations.versioning).previous_message_type = - "envoy.frozen.v2.FrozenVersionedDeprecation"; - - int32 foo = 1; - - int32 hidden_envoy_deprecated_bar = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - int32 hidden_envoy_deprecated_baz = 3 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.5"]; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.gold deleted file mode 100644 index fa99251cf2793..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v2/frozen_versioned_deprecation.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenVersionedDeprecationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message FrozenVersionedDeprecation { - option (udpa.annotations.versioning).previous_message_type = - "envoy.frozen.v2.FrozenVersionedDeprecation"; - - reserved 2, 3; - - reserved "bar", "baz"; - - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/BUILD b/tools/testdata/protoxform/envoy/frozen/v3/BUILD deleted file mode 100644 index 39fb4eabe7238..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") - -licenses(["notice"]) # Apache 2 - -proto_library( - name = "freeze_protos", - srcs = [ - "frozen.proto", - "frozen_versioned_deprecation.proto", - ], - visibility = ["//visibility:public"], - deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], -) diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto deleted file mode 100644 index 0e09acf92fe69..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message Frozen { - int32 foo = 1; - reserved 2; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold deleted file mode 100644 index 23740e54e11f3..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.active_or_frozen.gold +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message Frozen { - reserved 2; - - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen.proto.next_major_version_candidate.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto b/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto deleted file mode 100644 index e95fc725c6b7a..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; - -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message FrozenVersionedDeprecation { - int32 foo = 1; - reserved 2, 3; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.active_or_frozen.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.active_or_frozen.gold deleted file mode 100644 index d280f1f632be2..0000000000000 --- a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.active_or_frozen.gold +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package envoy.frozen.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.frozen.v3"; -option java_outer_classname = "FrozenVersionedDeprecationProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -message FrozenVersionedDeprecation { - reserved 2, 3; - - int32 foo = 1; -} diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/frozen/v3/frozen_versioned_deprecation.proto.next_major_version_candidate.gold deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tools/testdata/protoxform/envoy/v2/BUILD b/tools/testdata/protoxform/envoy/v2/BUILD index 18cca27da4c6c..1a3bcb6e90b38 100644 --- a/tools/testdata/protoxform/envoy/v2/BUILD +++ b/tools/testdata/protoxform/envoy/v2/BUILD @@ -19,18 +19,3 @@ proto_library( "@envoy_api//envoy/api/v2:pkg", ], ) - -proto_library( - name = "freeze_protos", - srcs = [ - "active_non_terminal.proto", - "active_terminal.proto", - "frozen.proto", - ], - visibility = ["//visibility:public"], - deps = [ - "@com_github_cncf_udpa//udpa/annotations:pkg", - "@envoy_api//envoy/annotations:pkg", - "@envoy_api//envoy/api/v2:pkg", - ], -) diff --git a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index cd6b36941d926..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "DiscoveryServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -service SomeDiscoveryService { - option (envoy.annotations.resource).type = "envoy.v3.SomeResource"; - - rpc StreamSomething(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaSomething(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc FetchSomething(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:some"; - option (google.api.http).body = "*"; - } -} - -message SomeResource { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.SomeResource"; - - string bar = 1; -} diff --git a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold deleted file mode 100644 index cd6b36941d926..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/discovery_service.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "envoy/api/v2/discovery.proto"; - -import "google/api/annotations.proto"; - -import "envoy/annotations/resource.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "DiscoveryServiceProto"; -option java_multiple_files = true; -option java_generic_services = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -service SomeDiscoveryService { - option (envoy.annotations.resource).type = "envoy.v3.SomeResource"; - - rpc StreamSomething(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { - } - - rpc DeltaSomething(stream api.v2.DeltaDiscoveryRequest) - returns (stream api.v2.DeltaDiscoveryResponse) { - } - - rpc FetchSomething(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { - option (google.api.http).post = "/v3/discovery:some"; - option (google.api.http).body = "*"; - } -} - -message SomeResource { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.SomeResource"; - - string bar = 1; -} diff --git a/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index fe6bb1585b87d..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.external.v3; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "tools/testdata/protoxform/external/package_type.proto"; -import "tools/testdata/protoxform/external/root_type.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.external.v3"; -option java_outer_classname = "FullyQualifiedNamesProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// Verifies normalization of fully-qualified type names. -// [#next-free-field: 8] -message UsesFullyQualifiedTypeNames { - option (udpa.annotations.versioning).previous_message_type = - "envoy.v2.UsesFullyQualifiedTypeNames"; - - api.v2.core.Locality another_envoy_type = 1; - - api.v2.core.Locality another_envoy_type_fqn = 2; - - google.protobuf.Any google_protobuf_any = 3; - - google.protobuf.Any google_protobuf_any_fqn = 4; - - .external.PackageLevelType external_package_level_type = 5; - - .external.PackageLevelType external_package_level_type_fqn = 6; - - .RootLevelType external_root_level_type_fqn = 7; -} diff --git a/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.gold deleted file mode 100644 index fe6bb1585b87d..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/fully_qualified_names.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package envoy.external.v3; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; - -import "tools/testdata/protoxform/external/package_type.proto"; -import "tools/testdata/protoxform/external/root_type.proto"; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.external.v3"; -option java_outer_classname = "FullyQualifiedNamesProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -// Verifies normalization of fully-qualified type names. -// [#next-free-field: 8] -message UsesFullyQualifiedTypeNames { - option (udpa.annotations.versioning).previous_message_type = - "envoy.v2.UsesFullyQualifiedTypeNames"; - - api.v2.core.Locality another_envoy_type = 1; - - api.v2.core.Locality another_envoy_type_fqn = 2; - - google.protobuf.Any google_protobuf_any = 3; - - google.protobuf.Any google_protobuf_any_fqn = 4; - - .external.PackageLevelType external_package_level_type = 5; - - .external.PackageLevelType external_package_level_type_fqn = 6; - - .RootLevelType external_root_level_type_fqn = 7; -} diff --git a/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index acd2fee1a9621..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "OneofProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message OneofExample { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.OneofExample"; - - oneof baz_specifier { - string foo = 1; - } - - oneof bar_specifier { - string bar = 2; - - string blah = 3; - } -} diff --git a/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold deleted file mode 100644 index acd2fee1a9621..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/oneof.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "OneofProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message OneofExample { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.OneofExample"; - - oneof baz_specifier { - string foo = 1; - } - - oneof bar_specifier { - string bar = 2; - - string blah = 3; - } -} diff --git a/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index e7aaa8085a3e5..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.foo.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.foo.v3"; -option java_outer_classname = "PackageMoveProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message Package { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package"; - - message Entry { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package.Entry"; - - string key = 1; - - string value = 2; - } - - repeated Entry entries = 1; -} diff --git a/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold deleted file mode 100644 index e7aaa8085a3e5..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/package_move.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.foo.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.foo.v3"; -option java_outer_classname = "PackageMoveProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -message Package { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package"; - - message Entry { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Package.Entry"; - - string key = 1; - - string value = 2; - } - - repeated Entry entries = 1; -} diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold deleted file mode 100644 index 46cf693236ef9..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.envoy_internal.gold +++ /dev/null @@ -1,63 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "envoy/annotations/deprecation.proto"; -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "SampleProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -enum SomeEnum { - hidden_envoy_deprecated_DEFAULT = 0 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.7"]; - FOO = 1; - hidden_envoy_deprecated_BAR = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.7"]; - WOW = 3; - hidden_envoy_deprecated_OLD = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.6"]; - hidden_envoy_deprecated_DEP = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.7"]; - hidden_envoy_deprecated_VERY_OLD = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.5"]; -} - -// [#next-free-field: 7] -message Sample { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample"; - - enum DeprecateEnum { - option deprecated = true; - - FIRST = 0; - SECOND = 1; - } - - message Entry { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample.Entry"; - - string key = 1; - - string value = 2; - } - - repeated Entry entries = 1; - - string hidden_envoy_deprecated_will_deprecated = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - string renamed_component = 3; - - string hidden_envoy_deprecated_old_deprecated = 4 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.6"]; - - string hidden_envoy_deprecated_new_deprecated = 5 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.7"]; - - string hidden_envoy_deprecated_very_old_deprecated = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.5"]; -} diff --git a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold b/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold deleted file mode 100644 index 0c07d7a04cd06..0000000000000 --- a/tools/testdata/protoxform/envoy/v2/sample.proto.next_major_version_candidate.gold +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package envoy.v3; - -import "udpa/annotations/status.proto"; -import "udpa/annotations/versioning.proto"; - -option java_package = "io.envoyproxy.envoy.v3"; -option java_outer_classname = "SampleProto"; -option java_multiple_files = true; -option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; - -enum SomeEnum { - reserved 2, 4, 5, 6; - - reserved "BAR", "OLD", "DEP", "VERY_OLD"; - - DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 [deprecated = true]; - FOO = 1; - WOW = 3; -} - -// [#next-free-field: 7] -message Sample { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample"; - - message Entry { - option (udpa.annotations.versioning).previous_message_type = "envoy.v2.Sample.Entry"; - - string key = 1; - - string value = 2; - } - - reserved 2, 4, 5, 6; - - reserved "will_deprecated", "old_deprecated", "new_deprecated", "very_old_deprecated"; - - repeated Entry entries = 1; - - string renamed_component = 3; -} diff --git a/tools/type_whisperer/BUILD b/tools/type_whisperer/BUILD index e7d9f455e7fbe..37eead8dc9066 100644 --- a/tools/type_whisperer/BUILD +++ b/tools/type_whisperer/BUILD @@ -56,7 +56,7 @@ py_binary( label_flag( name = "api_type_db_target", - build_setting_default = "@envoy_api_canonical//versioning:active_protos", + build_setting_default = "@envoy_api//versioning:active_protos", visibility = ["//visibility:public"], ) @@ -68,14 +68,14 @@ type_database( file_descriptor_set_text( name = "all_protos_pb_text", - deps = ["@envoy_api_canonical//:all_protos"], + deps = ["@envoy_api//:all_protos"], ) file_descriptor_set_text( name = "all_protos_with_ext_pb_text", with_external_deps = True, deps = [ - "@envoy_api_canonical//:all_protos", + "@envoy_api//:all_protos", ], ) diff --git a/tools/type_whisperer/file_descriptor_set_text.bzl b/tools/type_whisperer/file_descriptor_set_text.bzl index 18a5c2e720503..1ae43200956a3 100644 --- a/tools/type_whisperer/file_descriptor_set_text.bzl +++ b/tools/type_whisperer/file_descriptor_set_text.bzl @@ -29,7 +29,7 @@ file_descriptor_set_text = rule( doc = "List of all proto_library deps to be included.", ), "proto_repositories": attr.string_list( - default = ["envoy_api_canonical"], + default = ["envoy_api"], allow_empty = False, ), "with_external_deps": attr.bool( diff --git a/tools/type_whisperer/proto_cc_source.bzl b/tools/type_whisperer/proto_cc_source.bzl index 9c4522a502c6a..de6a18c5a5da6 100644 --- a/tools/type_whisperer/proto_cc_source.bzl +++ b/tools/type_whisperer/proto_cc_source.bzl @@ -24,7 +24,7 @@ proto_cc_source = rule( doc = "List of all text protos to be included.", ), "proto_repositories": attr.string_list( - default = ["envoy_api_canonical"], + default = ["envoy_api"], allow_empty = False, ), "_proto_cc_source_gen": attr.label( diff --git a/tools/type_whisperer/type_database.bzl b/tools/type_whisperer/type_database.bzl index 76c75c440ef40..bc0e63a1c2427 100644 --- a/tools/type_whisperer/type_database.bzl +++ b/tools/type_whisperer/type_database.bzl @@ -28,7 +28,7 @@ type_database = rule( doc = "List of all proto_library target to be included.", ), "proto_repositories": attr.string_list( - default = ["envoy_api_canonical"], + default = ["envoy_api"], allow_empty = False, ), "_type_db_gen": attr.label( diff --git a/tools/type_whisperer/typedb_gen.py b/tools/type_whisperer/typedb_gen.py index b89e3efdcbde1..5d418c3355912 100644 --- a/tools/type_whisperer/typedb_gen.py +++ b/tools/type_whisperer/typedb_gen.py @@ -10,6 +10,8 @@ from tools.type_whisperer.api_type_db_pb2 import TypeDb from tools.type_whisperer.types_pb2 import Types, TypeDescription +# TODO(htuch): cleanup this file, remove type upgrade, simplify. + # Regexes governing v3upgrades. TODO(htuch): The regex approach will have # to be rethought as we go beyond v3, this is WiP. TYPE_UPGRADE_REGEXES = [ @@ -174,16 +176,6 @@ def next_version_upgrade(type_name, type_map, next_version_upgrade_memo, visited type_desc = type_db.types[t] type_desc.qualified_package = type_map[t].qualified_package type_desc.proto_path = type_map[t].proto_path - if type_desc.qualified_package in next_versions_pkgs: - type_desc.next_version_type_name = upgraded_type(t, type_map[t]) - assert (type_desc.next_version_type_name != t) - next_proto_info[type_map[t].proto_path] = ( - type_map[type_desc.next_version_type_name].proto_path, - type_map[type_desc.next_version_type_name].qualified_package) - for proto_path, (next_proto_path, next_package) in sorted(next_proto_info.items()): - if not next_package.endswith('.v4alpha'): - type_db.next_version_protos[proto_path].proto_path = next_proto_path - type_db.next_version_protos[proto_path].qualified_package = next_package # Write out proto text. with open(out_path, 'w') as f: From 367244de85f022586849bfb2d5944ea936021b77 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Tue, 14 Sep 2021 04:09:26 +0900 Subject: [PATCH 028/121] stats: introduce CustomStatNamespaces. (#17357) Signed-off-by: Takeshi Yoneda Signed-off-by: gayang --- envoy/api/BUILD | 1 + envoy/api/api.h | 6 + envoy/stats/BUILD | 5 + envoy/stats/custom_stat_namespaces.h | 51 ++++++++ source/common/api/BUILD | 1 + source/common/api/api_impl.h | 4 + source/common/stats/BUILD | 12 ++ .../stats/custom_stat_namespaces_impl.cc | 31 +++++ .../stats/custom_stat_namespaces_impl.h | 25 ++++ .../extensions/access_loggers/wasm/config.cc | 2 + source/extensions/bootstrap/wasm/config.cc | 3 +- source/extensions/common/wasm/BUILD | 1 + source/extensions/common/wasm/context.cc | 13 +- source/extensions/common/wasm/stats_handler.h | 4 + source/extensions/common/wasm/wasm.cc | 7 +- source/extensions/common/wasm/wasm.h | 2 + source/extensions/filters/http/wasm/config.cc | 2 + .../extensions/filters/network/wasm/config.cc | 2 + source/extensions/stat_sinks/wasm/config.cc | 2 + source/server/admin/BUILD | 1 + source/server/admin/prometheus_stats.cc | 95 +++++++-------- source/server/admin/prometheus_stats.h | 30 ++--- source/server/admin/stats_handler.cc | 2 +- test/common/stats/BUILD | 8 ++ .../stats/custom_stat_namespaces_impl_test.cc | 34 ++++++ .../access_loggers/wasm/config_test.cc | 3 + test/extensions/bootstrap/wasm/config_test.cc | 3 + test/extensions/bootstrap/wasm/wasm_test.cc | 32 +++-- .../filters/http/common/fuzz/uber_filter.h | 3 + .../http/common/fuzz/uber_per_filter.cc | 2 + .../filters/http/wasm/config_test.cc | 2 + .../filters/network/wasm/config_test.cc | 2 + .../stats_sinks/wasm/config_test.cc | 3 + test/mocks/api/mocks.h | 1 + test/server/admin/prometheus_stats_test.cc | 113 +++++++++++------- 35 files changed, 378 insertions(+), 130 deletions(-) create mode 100644 envoy/stats/custom_stat_namespaces.h create mode 100644 source/common/stats/custom_stat_namespaces_impl.cc create mode 100644 source/common/stats/custom_stat_namespaces_impl.h create mode 100644 test/common/stats/custom_stat_namespaces_impl_test.cc diff --git a/envoy/api/BUILD b/envoy/api/BUILD index 904e5fff75f8a..cbdc13440690c 100644 --- a/envoy/api/BUILD +++ b/envoy/api/BUILD @@ -17,6 +17,7 @@ envoy_cc_library( "//envoy/event:scaled_range_timer_manager_interface", "//envoy/filesystem:filesystem_interface", "//envoy/server:process_context_interface", + "//envoy/stats:custom_stat_namespaces_interface", "//envoy/thread:thread_interface", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], diff --git a/envoy/api/api.h b/envoy/api/api.h index c83198beed8be..e567ac51c8e6d 100644 --- a/envoy/api/api.h +++ b/envoy/api/api.h @@ -10,6 +10,7 @@ #include "envoy/event/scaled_range_timer_manager.h" #include "envoy/filesystem/filesystem.h" #include "envoy/server/process_context.h" +#include "envoy/stats/custom_stat_namespaces.h" #include "envoy/stats/store.h" #include "envoy/thread/thread.h" @@ -89,6 +90,11 @@ class Api { * @return the bootstrap Envoy started with. */ virtual const envoy::config::bootstrap::v3::Bootstrap& bootstrap() const PURE; + + /** + * @return a reference to the Stats::CustomStatNamespaces. + */ + virtual Stats::CustomStatNamespaces& customStatNamespaces() PURE; }; using ApiPtr = std::unique_ptr; diff --git a/envoy/stats/BUILD b/envoy/stats/BUILD index 0516c76b1552b..ae9b06419d568 100644 --- a/envoy/stats/BUILD +++ b/envoy/stats/BUILD @@ -78,3 +78,8 @@ envoy_cc_library( hdrs = ["primitive_stats_macros.h"], deps = [":primitive_stats_interface"], ) + +envoy_cc_library( + name = "custom_stat_namespaces_interface", + hdrs = ["custom_stat_namespaces.h"], +) diff --git a/envoy/stats/custom_stat_namespaces.h b/envoy/stats/custom_stat_namespaces.h new file mode 100644 index 0000000000000..a966fb950a49e --- /dev/null +++ b/envoy/stats/custom_stat_namespaces.h @@ -0,0 +1,51 @@ +#pragma once + +#include "envoy/common/pure.h" + +#include "absl/container/flat_hash_set.h" + +namespace Envoy { +namespace Stats { + +/** + * CustomStatNamespaces manages custom stat namespaces. Custom stat namespaces are registered + * by extensions that create user-defined metrics, and these metrics are all prefixed + * by the namespace. For example, Wasm extension registers "wasmcustom" as a custom stat namespace, + * and all the metrics created by user Wasm programs are prefixed by "wasmcustom." internally. + * This is mainly for distinguishing these "custom metrics" defined outside Envoy codebase from + * the native metrics defined by Envoy codebase, and this way stat sinks are able to determine + * how to expose these two kinds of metrics. + * Note that the implementation will not be thread-safe so users of this class must be in the main + * thread. + */ +class CustomStatNamespaces { +public: + virtual ~CustomStatNamespaces() = default; + + /** + * @param name is the name to check. + * @return true if the given name is registered as a custom stat namespace, false otherwise. + */ + virtual bool registered(const absl::string_view name) const PURE; + + /** + * Used to register a custom namespace by extensions. + * @param name is the name to register. + */ + virtual void registerStatNamespace(const absl::string_view name) PURE; + + /** + * Strips the registered custom stat namespace from the given stat name's prefix if it lives in a + * registered custom stat namespace, and the stripped string is returned. Otherwise return + * nullopt. + * @param stat_name is the view to modify. If it is not in any custom registered namespaces, it + * will never be modified. + * @return the stripped string if stat_name has a registered custom stat namespace. Otherwise, + * return nullopt. + */ + virtual absl::optional + stripRegisteredPrefix(const absl::string_view stat_name) const PURE; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/api/BUILD b/source/common/api/BUILD index 950ea63a165cc..07443785ab89f 100644 --- a/source/common/api/BUILD +++ b/source/common/api/BUILD @@ -18,6 +18,7 @@ envoy_cc_library( "//source/common/common:thread_lib", "//source/common/event:dispatcher_lib", "//source/common/network:socket_lib", + "//source/common/stats:custom_stat_namespaces_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/source/common/api/api_impl.h b/source/common/api/api_impl.h index 9a9e1e3fad096..7c904aa9a24b2 100644 --- a/source/common/api/api_impl.h +++ b/source/common/api/api_impl.h @@ -10,6 +10,8 @@ #include "envoy/network/socket.h" #include "envoy/thread/thread.h" +#include "source/common/stats/custom_stat_namespaces_impl.h" + namespace Envoy { namespace Api { @@ -36,6 +38,7 @@ class Impl : public Api { TimeSource& timeSource() override { return time_system_; } Stats::Scope& rootScope() override { return store_; } Random::RandomGenerator& randomGenerator() override { return random_generator_; } + Stats::CustomStatNamespaces& customStatNamespaces() override { return custom_stat_namespaces_; } const envoy::config::bootstrap::v3::Bootstrap& bootstrap() const override { return bootstrap_; } ProcessContextOptRef processContext() override { return process_context_; } @@ -46,6 +49,7 @@ class Impl : public Api { Filesystem::Instance& file_system_; Random::RandomGenerator& random_generator_; const envoy::config::bootstrap::v3::Bootstrap& bootstrap_; + Stats::CustomStatNamespacesImpl custom_stat_namespaces_; ProcessContextOptRef process_context_; const Buffer::WatermarkFactorySharedPtr watermark_factory_; }; diff --git a/source/common/stats/BUILD b/source/common/stats/BUILD index f71156c712426..7e3ef609fba5b 100644 --- a/source/common/stats/BUILD +++ b/source/common/stats/BUILD @@ -24,6 +24,18 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "custom_stat_namespaces_lib", + srcs = ["custom_stat_namespaces_impl.cc"], + hdrs = ["custom_stat_namespaces_impl.h"], + deps = [ + "//envoy/stats:custom_stat_namespaces_interface", + "//source/common/common:assert_lib", + "//source/common/common:macros", + "//source/common/common:thread_lib", + ], +) + envoy_cc_library( name = "histogram_lib", srcs = ["histogram_impl.cc"], diff --git a/source/common/stats/custom_stat_namespaces_impl.cc b/source/common/stats/custom_stat_namespaces_impl.cc new file mode 100644 index 0000000000000..f59e52ef0078e --- /dev/null +++ b/source/common/stats/custom_stat_namespaces_impl.cc @@ -0,0 +1,31 @@ +#include "source/common/stats/custom_stat_namespaces_impl.h" + +#include "source/common/common/assert.h" +#include "source/common/common/thread.h" + +namespace Envoy { +namespace Stats { + +bool CustomStatNamespacesImpl::registered(const absl::string_view name) const { + ASSERT(Thread::MainThread::isMainThread()); + return namespaces_.find(name) != namespaces_.end(); +} + +void CustomStatNamespacesImpl::registerStatNamespace(const absl::string_view name) { + ASSERT(Thread::MainThread::isMainThread()); + namespaces_.insert(std::string(name)); +}; + +absl::optional +CustomStatNamespacesImpl::stripRegisteredPrefix(const absl::string_view stat_name) const { + ASSERT(Thread::MainThread::isMainThread()); + const auto pos = stat_name.find_first_of('.'); + if (pos != std::string::npos && registered(stat_name.substr(0, pos))) { + // Trim the custom namespace. + return stat_name.substr(pos + 1); + } + return absl::nullopt; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/common/stats/custom_stat_namespaces_impl.h b/source/common/stats/custom_stat_namespaces_impl.h new file mode 100644 index 0000000000000..0f90b28d5ba43 --- /dev/null +++ b/source/common/stats/custom_stat_namespaces_impl.h @@ -0,0 +1,25 @@ +#pragma once + +#include "envoy/stats/custom_stat_namespaces.h" + +#include "absl/container/flat_hash_set.h" + +namespace Envoy { +namespace Stats { + +class CustomStatNamespacesImpl : public CustomStatNamespaces { +public: + ~CustomStatNamespacesImpl() override = default; + + // CustomStatNamespaces + bool registered(const absl::string_view name) const override; + void registerStatNamespace(const absl::string_view name) override; + absl::optional + stripRegisteredPrefix(const absl::string_view stat_name) const override; + +private: + absl::flat_hash_set namespaces_; +}; + +} // namespace Stats +} // namespace Envoy diff --git a/source/extensions/access_loggers/wasm/config.cc b/source/extensions/access_loggers/wasm/config.cc index d200351dc227d..721b0c062f46a 100644 --- a/source/extensions/access_loggers/wasm/config.cc +++ b/source/extensions/access_loggers/wasm/config.cc @@ -48,6 +48,8 @@ AccessLog::InstanceSharedPtr WasmAccessLogFactory::createAccessLogInstance( fmt::format("Unable to create Wasm access log {}", plugin->name_)); } + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); return access_log; } diff --git a/source/extensions/bootstrap/wasm/config.cc b/source/extensions/bootstrap/wasm/config.cc index f2771f596814c..e87ddecebd34f 100644 --- a/source/extensions/bootstrap/wasm/config.cc +++ b/source/extensions/bootstrap/wasm/config.cc @@ -65,7 +65,8 @@ WasmFactory::createBootstrapExtension(const Protobuf::Message& config, auto typed_config = MessageUtil::downcastAndValidate( config, context.messageValidationContext().staticValidationVisitor()); - + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); return std::make_unique(typed_config, context); } diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index 6612ca0b49160..49cf9725d091e 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -33,6 +33,7 @@ envoy_cc_library( "//envoy/http:codes_interface", "//envoy/http:filter_interface", "//envoy/server:lifecycle_notifier_interface", + "//envoy/stats:custom_stat_namespaces_interface", "//envoy/thread_local:thread_local_object", "//envoy/upstream:cluster_manager_interface", "//source/common/config:datasource_lib", diff --git a/source/extensions/common/wasm/context.cc b/source/extensions/common/wasm/context.cc index 41cd1b7a84d81..07447110f2106 100644 --- a/source/extensions/common/wasm/context.cc +++ b/source/extensions/common/wasm/context.cc @@ -1237,23 +1237,30 @@ WasmResult Context::defineMetric(uint32_t metric_type, std::string_view name, // TODO: Consider rethinking the scoping policy as it does not help in this case. Stats::StatNameManagedStorage storage(toAbslStringView(name), wasm()->scope_->symbolTable()); Stats::StatName stat_name = storage.statName(); + // We prefix the given name with custom_stat_name_ so that these user-defined + // custom metrics can be distinguished from native Envoy metrics. if (type == MetricType::Counter) { auto id = wasm()->nextCounterMetricId(); - auto c = &wasm()->scope_->counterFromStatName(stat_name); + Stats::Counter* c = &Stats::Utility::counterFromElements( + *wasm()->scope_, {wasm()->custom_stat_namespace_, stat_name}); wasm()->counters_.emplace(id, c); *metric_id_ptr = id; return WasmResult::Ok; } if (type == MetricType::Gauge) { auto id = wasm()->nextGaugeMetricId(); - auto g = &wasm()->scope_->gaugeFromStatName(stat_name, Stats::Gauge::ImportMode::Accumulate); + Stats::Gauge* g = &Stats::Utility::gaugeFromStatNames( + *wasm()->scope_, {wasm()->custom_stat_namespace_, stat_name}, + Stats::Gauge::ImportMode::Accumulate); wasm()->gauges_.emplace(id, g); *metric_id_ptr = id; return WasmResult::Ok; } // (type == MetricType::Histogram) { auto id = wasm()->nextHistogramMetricId(); - auto h = &wasm()->scope_->histogramFromStatName(stat_name, Stats::Histogram::Unit::Unspecified); + Stats::Histogram* h = &Stats::Utility::histogramFromStatNames( + *wasm()->scope_, {wasm()->custom_stat_namespace_, stat_name}, + Stats::Histogram::Unit::Unspecified); wasm()->histograms_.emplace(id, h); *metric_id_ptr = id; return WasmResult::Ok; diff --git a/source/extensions/common/wasm/stats_handler.h b/source/extensions/common/wasm/stats_handler.h index f196f071e17a1..ad03f3c118209 100644 --- a/source/extensions/common/wasm/stats_handler.h +++ b/source/extensions/common/wasm/stats_handler.h @@ -15,6 +15,10 @@ namespace Extensions { namespace Common { namespace Wasm { +// The custom stat namespace which prepends all the user-defined metrics. +// Note that the prefix is removed from the final output of /stats endpoints. +constexpr absl::string_view CustomStatNamespace = "wasmcustom"; + #define CREATE_WASM_STATS(COUNTER, GAUGE) \ COUNTER(remote_load_cache_hits) \ COUNTER(remote_load_cache_negative_hits) \ diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc index 290890f83dd4b..5afbb8a898014 100644 --- a/source/extensions/common/wasm/wasm.cc +++ b/source/extensions/common/wasm/wasm.cc @@ -78,7 +78,9 @@ Wasm::Wasm(WasmConfig& config, absl::string_view vm_key, const Stats::ScopeShare createWasmVm(config.config().vm_config().runtime()), config.config().vm_config().vm_id(), MessageUtil::anyToBytes(config.config().vm_config().configuration()), toStdStringView(vm_key), config.environmentVariables(), config.allowedCapabilities()), - scope_(scope), cluster_manager_(cluster_manager), dispatcher_(dispatcher), + scope_(scope), stat_name_pool_(scope_->symbolTable()), + custom_stat_namespace_(stat_name_pool_.add(CustomStatNamespace)), + cluster_manager_(cluster_manager), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), lifecycle_stats_handler_(LifecycleStatsHandler( scope, config.config().vm_config().runtime())) { lifecycle_stats_handler_.onEvent(WasmEvent::VmCreated); @@ -92,7 +94,8 @@ Wasm::Wasm(WasmHandleSharedPtr base_wasm_handle, Event::Dispatcher& dispatcher) "envoy.wasm.runtime.", toAbslStringView(base_wasm_handle->wasm()->wasm_vm()->runtime()))); }), - scope_(getWasm(base_wasm_handle)->scope_), + scope_(getWasm(base_wasm_handle)->scope_), stat_name_pool_(scope_->symbolTable()), + custom_stat_namespace_(stat_name_pool_.add(CustomStatNamespace)), cluster_manager_(getWasm(base_wasm_handle)->clusterManager()), dispatcher_(dispatcher), time_source_(dispatcher.timeSource()), lifecycle_stats_handler_(getWasm(base_wasm_handle)->lifecycle_stats_handler_) { diff --git a/source/extensions/common/wasm/wasm.h b/source/extensions/common/wasm/wasm.h index 82e6c29a5f418..3ef156ca77f72 100644 --- a/source/extensions/common/wasm/wasm.h +++ b/source/extensions/common/wasm/wasm.h @@ -98,6 +98,8 @@ class Wasm : public WasmBase, Logger::Loggable { proxy_wasm::WasmCallVoid<2> on_stats_update_; Stats::ScopeSharedPtr scope_; + Stats::StatNamePool stat_name_pool_; + const Stats::StatName custom_stat_namespace_; Upstream::ClusterManager& cluster_manager_; Event::Dispatcher& dispatcher_; Event::PostCb server_shutdown_post_cb_; diff --git a/source/extensions/filters/http/wasm/config.cc b/source/extensions/filters/http/wasm/config.cc index 06a02611e6cc7..e50184df677a9 100644 --- a/source/extensions/filters/http/wasm/config.cc +++ b/source/extensions/filters/http/wasm/config.cc @@ -16,6 +16,8 @@ namespace Wasm { Http::FilterFactoryCb WasmFilterConfig::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::wasm::v3::Wasm& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); auto filter_config = std::make_shared(proto_config, context); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { auto filter = filter_config->createFilter(); diff --git a/source/extensions/filters/network/wasm/config.cc b/source/extensions/filters/network/wasm/config.cc index 925e538cb67eb..43045f7a53a16 100644 --- a/source/extensions/filters/network/wasm/config.cc +++ b/source/extensions/filters/network/wasm/config.cc @@ -16,6 +16,8 @@ namespace Wasm { Network::FilterFactoryCb WasmFilterConfig::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::network::wasm::v3::Wasm& proto_config, Server::Configuration::FactoryContext& context) { + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); auto filter_config = std::make_shared(proto_config, context); return [filter_config](Network::FilterManager& filter_manager) -> void { auto filter = filter_config->createFilter(); diff --git a/source/extensions/stat_sinks/wasm/config.cc b/source/extensions/stat_sinks/wasm/config.cc index 1eec57ece7efa..96c9814505683 100644 --- a/source/extensions/stat_sinks/wasm/config.cc +++ b/source/extensions/stat_sinks/wasm/config.cc @@ -48,6 +48,8 @@ WasmSinkFactory::createStatsSink(const Protobuf::Message& proto_config, fmt::format("Unable to create Wasm Stat Sink {}", plugin->name_)); } + context.api().customStatNamespaces().registerStatNamespace( + Extensions::Common::Wasm::CustomStatNamespace); return wasm_sink; } diff --git a/source/server/admin/BUILD b/source/server/admin/BUILD index 19463acd7a35b..365a5b8109a9a 100644 --- a/source/server/admin/BUILD +++ b/source/server/admin/BUILD @@ -117,6 +117,7 @@ envoy_cc_library( hdrs = ["prometheus_stats.h"], deps = [ ":utils_lib", + "//envoy/stats:custom_stat_namespaces_interface", "//source/common/buffer:buffer_lib", "//source/common/stats:histogram_lib", ], diff --git a/source/server/admin/prometheus_stats.cc b/source/server/admin/prometheus_stats.cc index b0b01dc9759ac..8d343189fb250 100644 --- a/source/server/admin/prometheus_stats.cc +++ b/source/server/admin/prometheus_stats.cc @@ -11,10 +11,9 @@ namespace Server { namespace { +// TODO(mathetake) replace with re2 for speed and safety, +// and change the signature of sanitizeName so it accepts string_view. const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } -const std::regex& namespaceRegex() { - CONSTRUCT_ON_FIRST_USE(std::regex, "^[a-zA-Z_][a-zA-Z0-9]*$"); -} /** * Take a string and sanitize it according to Prometheus conventions. @@ -67,7 +66,7 @@ uint64_t outputStatType( const std::vector>& metrics, const std::function& generate_output, - absl::string_view type) { + absl::string_view type, const Stats::CustomStatNamespaces& custom_namespaces) { /* * From @@ -112,10 +111,16 @@ uint64_t outputStatType( groups[metric->tagExtractedStatName()].push_back(metric.get()); } + auto result = groups.size(); for (auto& group : groups) { - const std::string prefixed_tag_extracted_name = - PrometheusStatsFormatter::metricName(global_symbol_table.toString(group.first)); - response.add(fmt::format("# TYPE {0} {1}\n", prefixed_tag_extracted_name, type)); + const absl::optional prefixed_tag_extracted_name = + PrometheusStatsFormatter::metricName(global_symbol_table.toString(group.first), + custom_namespaces); + if (!prefixed_tag_extracted_name.has_value()) { + --result; + continue; + } + response.add(fmt::format("# TYPE {0} {1}\n", prefixed_tag_extracted_name.value(), type)); // Sort before producing the final output to satisfy the "preferred" ordering from the // prometheus spec: metrics will be sorted by their tags' textual representation, which will @@ -123,11 +128,11 @@ uint64_t outputStatType( std::sort(group.second.begin(), group.second.end(), MetricLessThan()); for (const auto& metric : group.second) { - response.add(generate_output(*metric, prefixed_tag_extracted_name)); + response.add(generate_output(*metric, prefixed_tag_extracted_name.value())); } response.add("\n"); } - return groups.size(); + return result; } /* @@ -176,10 +181,6 @@ std::string generateHistogramOutput(const Stats::ParentHistogram& histogram, return output; }; -absl::flat_hash_set& prometheusNamespaces() { - MUTABLE_CONSTRUCT_ON_FIRST_USE(absl::flat_hash_set); -} - } // namespace std::string PrometheusStatsFormatter::formattedTags(const std::vector& tags) { @@ -191,20 +192,30 @@ std::string PrometheusStatsFormatter::formattedTags(const std::vector +PrometheusStatsFormatter::metricName(const std::string& extracted_name, + const Stats::CustomStatNamespaces& custom_namespaces) { + const absl::optional custom_namespace_stripped = + custom_namespaces.stripRegisteredPrefix(extracted_name); + if (custom_namespace_stripped.has_value()) { + // This case the name has a custom namespace, and it is a custom metric. + const std::string sanitized_name = sanitizeName(std::string(custom_namespace_stripped.value())); + // We expose these metrics without modifying (e.g. without "envoy_"), + // so we have to check the "user-defined" stat name complies with the Prometheus naming + // convention. Specifically the name must start with the "[a-zA-Z_]" pattern. + // All the characters in sanitized_name are already in "[a-zA-Z0-9_]" pattern + // thanks to sanitizeName above, so the only thing we have to do is check + // if it does not start with digits. + if (sanitized_name.empty() || absl::ascii_isdigit(sanitized_name.front())) { + return absl::nullopt; + } return sanitized_name; } - // Add namespacing prefix to avoid conflicts, as per best practice: - // https://prometheus.io/docs/practices/naming/#metric-names - // Also, naming conventions on https://prometheus.io/docs/concepts/data_model/ - return absl::StrCat("envoy_", sanitized_name); + // If it does not have a custom namespace, add namespacing prefix to avoid conflicts, as per best + // practice: https://prometheus.io/docs/practices/naming/#metric-names Also, naming conventions on + // https://prometheus.io/docs/concepts/data_model/ + return absl::StrCat("envoy_", sanitizeName(extracted_name)); } // TODO(efimki): Add support of text readouts stats. @@ -212,38 +223,24 @@ uint64_t PrometheusStatsFormatter::statsAsPrometheus( const std::vector& counters, const std::vector& gauges, const std::vector& histograms, Buffer::Instance& response, - const bool used_only, const absl::optional& regex) { + const bool used_only, const absl::optional& regex, + const Stats::CustomStatNamespaces& custom_namespaces) { uint64_t metric_name_count = 0; - metric_name_count += outputStatType( - response, used_only, regex, counters, generateNumericOutput, "counter"); + metric_name_count += outputStatType(response, used_only, regex, counters, + generateNumericOutput, + "counter", custom_namespaces); - metric_name_count += outputStatType(response, used_only, regex, gauges, - generateNumericOutput, "gauge"); + metric_name_count += + outputStatType(response, used_only, regex, gauges, + generateNumericOutput, "gauge", custom_namespaces); - metric_name_count += outputStatType( - response, used_only, regex, histograms, generateHistogramOutput, "histogram"); + metric_name_count += outputStatType(response, used_only, regex, + histograms, generateHistogramOutput, + "histogram", custom_namespaces); return metric_name_count; } -bool PrometheusStatsFormatter::registerPrometheusNamespace(absl::string_view prometheus_namespace) { - if (std::regex_match(prometheus_namespace.begin(), prometheus_namespace.end(), - namespaceRegex())) { - return prometheusNamespaces().insert(std::string(prometheus_namespace)).second; - } - return false; -} - -bool PrometheusStatsFormatter::unregisterPrometheusNamespace( - absl::string_view prometheus_namespace) { - auto it = prometheusNamespaces().find(prometheus_namespace); - if (it == prometheusNamespaces().end()) { - return false; - } - prometheusNamespaces().erase(it); - return true; -} - } // namespace Server } // namespace Envoy diff --git a/source/server/admin/prometheus_stats.h b/source/server/admin/prometheus_stats.h index 6e45db166db5e..ea1d53a0ccb4f 100644 --- a/source/server/admin/prometheus_stats.h +++ b/source/server/admin/prometheus_stats.h @@ -4,6 +4,7 @@ #include #include "envoy/buffer/buffer.h" +#include "envoy/stats/custom_stat_namespaces.h" #include "envoy/stats/histogram.h" #include "envoy/stats/stats.h" @@ -25,7 +26,8 @@ class PrometheusStatsFormatter { const std::vector& gauges, const std::vector& histograms, Buffer::Instance& response, const bool used_only, - const absl::optional& regex); + const absl::optional& regex, + const Stats::CustomStatNamespaces& custom_namespaces); /** * Format the given tags, returning a string as a comma-separated list * of ="" pairs. @@ -33,26 +35,14 @@ class PrometheusStatsFormatter { static std::string formattedTags(const std::vector& tags); /** - * Format the given metric name, prefixed with "envoy_". + * Format the given metric name, and prefixed with "envoy_" if it does not have a custom + * stat namespace. If it has a custom stat namespace AND the name without the custom namespace + * has a valid prometheus namespace, the trimmed name is returned. + * Otherwise, return nullopt. */ - static std::string metricName(const std::string& extracted_name); - - /** - * Register a prometheus namespace, stats starting with the namespace will not be - * automatically prefixed with envoy namespace. - * This method must be called from the main thread. - * @returns bool if a new namespace is registered, false if the namespace is already - * registered or the namespace is invalid. - */ - static bool registerPrometheusNamespace(absl::string_view prometheus_namespace); - - /** - * Unregister a prometheus namespace registered by `registerPrometheusNamespace` - * This method must be called from the main thread. - * @returns bool if the Prometheus namespace is unregistered. false if the namespace - * wasn't registered. - */ - static bool unregisterPrometheusNamespace(absl::string_view prometheus_namespace); + static absl::optional + metricName(const std::string& extracted_name, + const Stats::CustomStatNamespaces& custom_namespace_factory); }; } // namespace Server diff --git a/source/server/admin/stats_handler.cc b/source/server/admin/stats_handler.cc index d8426f10abc57..0a3c4a5ea0d01 100644 --- a/source/server/admin/stats_handler.cc +++ b/source/server/admin/stats_handler.cc @@ -138,7 +138,7 @@ Http::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query } PrometheusStatsFormatter::statsAsPrometheus(server_.stats().counters(), server_.stats().gauges(), server_.stats().histograms(), response, used_only, - regex); + regex, server_.api().customStatNamespaces()); return Http::Code::OK; } diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index cc3e4b21f360d..3427be0dcce23 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -22,6 +22,14 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "custom_stat_namespaces_impl_test", + srcs = ["custom_stat_namespaces_impl_test.cc"], + deps = [ + "//source/common/stats:custom_stat_namespaces_lib", + ], +) + envoy_cc_test( name = "isolated_store_impl_test", srcs = ["isolated_store_impl_test.cc"], diff --git a/test/common/stats/custom_stat_namespaces_impl_test.cc b/test/common/stats/custom_stat_namespaces_impl_test.cc new file mode 100644 index 0000000000000..0bc09bf4121b6 --- /dev/null +++ b/test/common/stats/custom_stat_namespaces_impl_test.cc @@ -0,0 +1,34 @@ +#include "source/common/stats/custom_stat_namespaces_impl.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Stats { + +TEST(CustomStatNamespacesImpl, Registration) { + CustomStatNamespacesImpl namespaces; + const std::string name = "foo"; + EXPECT_FALSE(namespaces.registered(name)); + namespaces.registerStatNamespace(name); + EXPECT_TRUE(namespaces.registered(name)); + EXPECT_FALSE(namespaces.registered("bar")); +} + +TEST(CustomStatNamespacesImpl, StripRegisteredPrefix) { + CustomStatNamespacesImpl namespaces; + namespaces.registerStatNamespace("foo"); + // namespace is not registered. + EXPECT_FALSE(namespaces.stripRegisteredPrefix("bar.my.value").has_value()); + EXPECT_FALSE(namespaces.stripRegisteredPrefix("foobar.my.value").has_value()); + // "." is not present in the stat name - we skip these cases. + EXPECT_FALSE(namespaces.stripRegisteredPrefix("foo").has_value()); + EXPECT_FALSE(namespaces.stripRegisteredPrefix("bar").has_value()); + // Should be stripped. + const absl::optional actual = + namespaces.stripRegisteredPrefix("foo.my.extension.metric"); + EXPECT_TRUE(actual.has_value()); + EXPECT_EQ(actual.value(), "my.extension.metric"); +} + +} // namespace Stats +} // namespace Envoy diff --git a/test/extensions/access_loggers/wasm/config_test.cc b/test/extensions/access_loggers/wasm/config_test.cc index 690834d707a84..f403bddf3ee73 100644 --- a/test/extensions/access_loggers/wasm/config_test.cc +++ b/test/extensions/access_loggers/wasm/config_test.cc @@ -92,6 +92,9 @@ TEST_P(WasmAccessLogConfigTest, CreateWasmFromWASM) { factory->createAccessLogInstance(config, std::move(filter), context); EXPECT_NE(nullptr, instance); EXPECT_NE(nullptr, dynamic_cast(instance.get())); + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api->customStatNamespaces().registered("wasmcustom")); + Http::TestRequestHeaderMapImpl request_header; Http::TestResponseHeaderMapImpl response_header; Http::TestResponseTrailerMapImpl response_trailer; diff --git a/test/extensions/bootstrap/wasm/config_test.cc b/test/extensions/bootstrap/wasm/config_test.cc index 2995eedc00ebe..bacb5dc3d2cfe 100644 --- a/test/extensions/bootstrap/wasm/config_test.cc +++ b/test/extensions/bootstrap/wasm/config_test.cc @@ -84,6 +84,9 @@ TEST_P(WasmFactoryTest, CreateWasmFromWasm) { initializeWithConfig(config_); EXPECT_NE(extension_, nullptr); + + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api_->customStatNamespaces().registered("wasmcustom")); } TEST_P(WasmFactoryTest, CreateWasmFromWasmPerThread) { diff --git a/test/extensions/bootstrap/wasm/wasm_test.cc b/test/extensions/bootstrap/wasm/wasm_test.cc index f073c62893227..9286aec3d8b79 100644 --- a/test/extensions/bootstrap/wasm/wasm_test.cc +++ b/test/extensions/bootstrap/wasm/wasm_test.cc @@ -285,8 +285,10 @@ TEST_P(WasmNullTest, Stats) { EXPECT_CALL(*context, log_(spdlog::level::err, Eq("get histogram = Unsupported"))); EXPECT_TRUE(wasm_->configure(context, plugin_)); - EXPECT_EQ(scope_->counterFromString("test_counter").value(), 5); - EXPECT_EQ(scope_->gaugeFromString("test_gauge", Stats::Gauge::ImportMode::Accumulate).value(), 2); + EXPECT_EQ(scope_->counterFromString("wasmcustom.test_counter").value(), 5); + EXPECT_EQ(scope_->gaugeFromString("wasmcustom.test_gauge", Stats::Gauge::ImportMode::Accumulate) + .value(), + 2); } TEST_P(WasmNullTest, StatsHigherLevel) { @@ -312,11 +314,12 @@ TEST_P(WasmNullTest, StatsHigherLevel) { wasm_->setTimerPeriod(1, std::chrono::milliseconds(10)); wasm_->tickHandler(1); - EXPECT_EQ(scope_->counterFromString("counter_tag.test_tag.test_counter").value(), 5); - EXPECT_EQ( - scope_->gaugeFromString("gauge_int_tag.9.test_gauge", Stats::Gauge::ImportMode::Accumulate) - .value(), - 2); + EXPECT_EQ(scope_->counterFromString("wasmcustom.counter_tag.test_tag.test_counter").value(), 5); + EXPECT_EQ(scope_ + ->gaugeFromString("wasmcustom.gauge_int_tag.9.test_gauge", + Stats::Gauge::ImportMode::Accumulate) + .value(), + 2); } TEST_P(WasmNullTest, StatsHighLevel) { @@ -346,13 +349,16 @@ TEST_P(WasmNullTest, StatsHighLevel) { // EXPECT_CALL(*context, log_(spdlog::level::err, Eq("stack_h = 3"))); context->onLog(); EXPECT_EQ( - scope_->counterFromString("string_tag.test_tag.int_tag.7.bool_tag.true.test_counter").value(), + scope_ + ->counterFromString("wasmcustom.string_tag.test_tag.int_tag.7.bool_tag.true.test_counter") + .value(), 5); - EXPECT_EQ(scope_ - ->gaugeFromString("string_tag1.test_tag1.string_tag2.test_tag2.test_gauge", - Stats::Gauge::ImportMode::Accumulate) - .value(), - 2); + EXPECT_EQ( + scope_ + ->gaugeFromString("wasmcustom.string_tag1.test_tag1.string_tag2.test_tag2.test_gauge", + Stats::Gauge::ImportMode::Accumulate) + .value(), + 2); } } // namespace Wasm diff --git a/test/extensions/filters/http/common/fuzz/uber_filter.h b/test/extensions/filters/http/common/fuzz/uber_filter.h index 3c9c0958438d2..30c0870e06aa4 100644 --- a/test/extensions/filters/http/common/fuzz/uber_filter.h +++ b/test/extensions/filters/http/common/fuzz/uber_filter.h @@ -1,5 +1,7 @@ #pragma once +#include "source/common/stats/custom_stat_namespaces_impl.h" + #include "test/extensions/filters/http/common/fuzz/http_filter_fuzzer.h" #include "test/fuzz/utility.h" #include "test/mocks/buffer/mocks.h" @@ -45,6 +47,7 @@ class UberFilterFuzzer : public HttpFilterFuzzer { envoy::config::core::v3::Metadata listener_metadata_; NiceMock stream_info_; TestScopedRuntime scoped_runtime_; + Stats::CustomStatNamespacesImpl custom_stat_namespaces_; // Filter constructed from the config. Http::StreamDecoderFilterSharedPtr decoder_filter_; diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index 366bcacb5b245..5a0265132bb28 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -136,6 +136,8 @@ void UberFilterFuzzer::perFilterSetup() { // Prepare expectations for WASM filter. ON_CALL(factory_context_, listenerMetadata()) .WillByDefault(testing::ReturnRef(listener_metadata_)); + ON_CALL(factory_context_.api_, customStatNamespaces()) + .WillByDefault(testing::ReturnRef(custom_stat_namespaces_)); } } // namespace HttpFilters diff --git a/test/extensions/filters/http/wasm/config_test.cc b/test/extensions/filters/http/wasm/config_test.cc index 5f6312fa02e20..52d3a737c98ff 100644 --- a/test/extensions/filters/http/wasm/config_test.cc +++ b/test/extensions/filters/http/wasm/config_test.cc @@ -151,6 +151,8 @@ TEST_P(WasmFilterConfigTest, YamlLoadFromFileWasm) { // Check if the context still holds a valid Wasm even after the factory is destroyed. EXPECT_TRUE(context); EXPECT_TRUE(context->wasm()); + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api_->customStatNamespaces().registered("wasmcustom")); } TEST_P(WasmFilterConfigTest, YamlLoadFromFileWasmFailOpenOk) { diff --git a/test/extensions/filters/network/wasm/config_test.cc b/test/extensions/filters/network/wasm/config_test.cc index 66132102bd929..e77a38efad53e 100644 --- a/test/extensions/filters/network/wasm/config_test.cc +++ b/test/extensions/filters/network/wasm/config_test.cc @@ -101,6 +101,8 @@ TEST_P(WasmNetworkFilterConfigTest, YamlLoadFromFileWasm) { // Check if the context still holds a valid Wasm even after the factory is destroyed. EXPECT_TRUE(context); EXPECT_TRUE(context->wasm()); + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api_->customStatNamespaces().registered("wasmcustom")); } TEST_P(WasmNetworkFilterConfigTest, YamlLoadInlineWasm) { diff --git a/test/extensions/stats_sinks/wasm/config_test.cc b/test/extensions/stats_sinks/wasm/config_test.cc index 64a7d4333832d..342b25218a880 100644 --- a/test/extensions/stats_sinks/wasm/config_test.cc +++ b/test/extensions/stats_sinks/wasm/config_test.cc @@ -90,6 +90,9 @@ TEST_P(WasmStatSinkConfigTest, CreateWasmFromWASM) { initializeWithConfig(config_); EXPECT_NE(sink_, nullptr); + // Check if the custom stat namespace is registered during the initialization. + EXPECT_TRUE(api_->customStatNamespaces().registered("wasmcustom")); + NiceMock snapshot; sink_->flush(snapshot); NiceMock histogram; diff --git a/test/mocks/api/mocks.h b/test/mocks/api/mocks.h index ef6f02c999cbd..d2d54c13ff3f7 100644 --- a/test/mocks/api/mocks.h +++ b/test/mocks/api/mocks.h @@ -50,6 +50,7 @@ class MockApi : public Api { MOCK_METHOD(Random::RandomGenerator&, randomGenerator, ()); MOCK_METHOD(const envoy::config::bootstrap::v3::Bootstrap&, bootstrap, (), (const)); MOCK_METHOD(ProcessContextOptRef, processContext, ()); + MOCK_METHOD(Stats::CustomStatNamespaces&, customStatNamespaces, ()); testing::NiceMock file_system_; Event::GlobalTimeSystem time_system_; diff --git a/test/server/admin/prometheus_stats_test.cc b/test/server/admin/prometheus_stats_test.cc index 7824218e28bb7..92c40e7d386ca 100644 --- a/test/server/admin/prometheus_stats_test.cc +++ b/test/server/admin/prometheus_stats_test.cc @@ -1,5 +1,6 @@ #include +#include "source/common/stats/custom_stat_namespaces_impl.h" #include "source/server/admin/prometheus_stats.h" #include "test/mocks/stats/mocks.h" @@ -100,40 +101,47 @@ class PrometheusStatsFormatterTest : public testing::Test { }; TEST_F(PrometheusStatsFormatterTest, MetricName) { + Stats::CustomStatNamespacesImpl custom_namespaces; std::string raw = "vulture.eats-liver"; std::string expected = "envoy_vulture_eats_liver"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_TRUE(actual.has_value()); + EXPECT_EQ(expected, actual.value()); } TEST_F(PrometheusStatsFormatterTest, SanitizeMetricName) { + Stats::CustomStatNamespacesImpl custom_namespaces; std::string raw = "An.artist.plays-violin@019street"; std::string expected = "envoy_An_artist_plays_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_EQ(expected, actual.value()); } TEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) { + Stats::CustomStatNamespacesImpl custom_namespaces; std::string raw = "3.artists.play-violin@019street"; std::string expected = "envoy_3_artists_play_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_TRUE(actual.has_value()); + EXPECT_EQ(expected, actual.value()); } -TEST_F(PrometheusStatsFormatterTest, NamespaceRegistry) { - std::string raw = "vulture.eats-liver"; +TEST_F(PrometheusStatsFormatterTest, CustomNamespace) { + Stats::CustomStatNamespacesImpl custom_namespaces; + custom_namespaces.registerStatNamespace("promstattest"); + std::string raw = "promstattest.vulture.eats-liver"; std::string expected = "vulture_eats_liver"; + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_TRUE(actual.has_value()); + EXPECT_EQ(expected, actual.value()); +} - EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace("3vulture")); - EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace(".vulture")); - - EXPECT_FALSE(PrometheusStatsFormatter::unregisterPrometheusNamespace("vulture")); - EXPECT_TRUE(PrometheusStatsFormatter::registerPrometheusNamespace("vulture")); - EXPECT_FALSE(PrometheusStatsFormatter::registerPrometheusNamespace("vulture")); - EXPECT_EQ(expected, PrometheusStatsFormatter::metricName(raw)); - EXPECT_TRUE(PrometheusStatsFormatter::unregisterPrometheusNamespace("vulture")); - - EXPECT_EQ("envoy_" + expected, PrometheusStatsFormatter::metricName(raw)); +TEST_F(PrometheusStatsFormatterTest, CustomNamespaceWithInvalidPromnamespace) { + Stats::CustomStatNamespacesImpl custom_namespaces; + custom_namespaces.registerStatNamespace("promstattest"); + std::string raw = "promstattest.1234abcd.eats-liver"; + auto actual = PrometheusStatsFormatter::metricName(raw, custom_namespaces); + EXPECT_FALSE(actual.has_value()); } TEST_F(PrometheusStatsFormatterTest, FormattedTags) { @@ -148,6 +156,7 @@ TEST_F(PrometheusStatsFormatterTest, FormattedTags) { } TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { + Stats::CustomStatNamespacesImpl custom_namespaces; // Create two counters and two gauges with each pair having the same name, // but having different tag names and values. @@ -163,12 +172,13 @@ TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(2UL, size); } TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { + Stats::CustomStatNamespacesImpl custom_namespaces; // Create two counters and two gauges, all with unique names. // statsAsPrometheus() should return four implying it found @@ -184,12 +194,13 @@ TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(4UL, size); } TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { + Stats::CustomStatNamespacesImpl custom_namespaces; HistogramWrapper h1_cumulative; h1_cumulative.setHistogramValues(std::vector(0)); Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); @@ -200,8 +211,8 @@ TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { addHistogram(histogram); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram @@ -234,6 +245,7 @@ envoy_histogram1_count{} 0 } TEST_F(PrometheusStatsFormatterTest, HistogramWithNonDefaultBuckets) { + Stats::CustomStatNamespacesImpl custom_namespaces; HistogramWrapper h1_cumulative; h1_cumulative.setHistogramValues(std::vector(0)); Stats::ConstSupportedBuckets buckets{10, 20}; @@ -245,8 +257,8 @@ TEST_F(PrometheusStatsFormatterTest, HistogramWithNonDefaultBuckets) { addHistogram(histogram); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram @@ -262,6 +274,7 @@ envoy_histogram1_count{} 0 } TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { + Stats::CustomStatNamespacesImpl custom_namespaces; HistogramWrapper h1_cumulative; // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation. @@ -279,8 +292,8 @@ TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { addHistogram(histogram); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram @@ -313,14 +326,21 @@ envoy_histogram1_count{} 101100000 } TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { + Stats::CustomStatNamespacesImpl custom_namespaces; + custom_namespaces.registerStatNamespace("promtest"); + addCounter("cluster.test_1.upstream_cx_total", {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); addCounter("cluster.test_2.upstream_cx_total", {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addCounter("promtest.myapp.test.foo", {{makeStat("tag_name"), makeStat("tag-value")}}); addGauge("cluster.test_3.upstream_cx_total", {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); addGauge("cluster.test_4.upstream_cx_total", {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + addGauge("promtest.MYAPP.test.bar", {{makeStat("tag_name"), makeStat("tag-value")}}); + // Metric with invalid prometheus namespace in the custom metric must be excluded in the output. + addGauge("promtest.1234abcd.test.bar", {{makeStat("tag_name"), makeStat("tag-value")}}); const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; HistogramWrapper h1_cumulative; @@ -335,9 +355,9 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(5UL, size); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); + EXPECT_EQ(7UL, size); const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 @@ -345,12 +365,18 @@ envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 # TYPE envoy_cluster_test_2_upstream_cx_total counter envoy_cluster_test_2_upstream_cx_total{another_tag_name="another_tag-value"} 0 +# TYPE myapp_test_foo counter +myapp_test_foo{tag_name="tag-value"} 0 + # TYPE envoy_cluster_test_3_upstream_cx_total gauge envoy_cluster_test_3_upstream_cx_total{another_tag_name_3="another_tag_3-value"} 0 # TYPE envoy_cluster_test_4_upstream_cx_total gauge envoy_cluster_test_4_upstream_cx_total{another_tag_name_4="another_tag_4-value"} 0 +# TYPE MYAPP_test_bar gauge +MYAPP_test_bar{tag_name="tag-value"} 0 + # TYPE envoy_cluster_test_1_upstream_rq_time histogram envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 @@ -385,6 +411,7 @@ envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 // should be sorted by their tags; the format specifies that it is preferred that metrics // are always grouped in the same order, and sorting is an easy way to ensure this. TEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) { + Stats::CustomStatNamespacesImpl custom_namespaces; const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; HistogramWrapper h1_cumulative; h1_cumulative.setHistogramValues(h1_values); @@ -410,8 +437,8 @@ TEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) { } Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, absl::nullopt, custom_namespaces); EXPECT_EQ(6UL, size); const std::string expected_output = R"EOF(# TYPE envoy_cluster_upstream_cx_connect_fail counter @@ -576,6 +603,7 @@ envoy_cluster_upstream_rq_time_count{cluster="ccc"} 7 } TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { + Stats::CustomStatNamespacesImpl custom_namespaces; addCounter("cluster.test_1.upstream_cx_total", {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); addCounter("cluster.test_2.upstream_cx_total", @@ -598,8 +626,8 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - true, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, true, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_rq_time histogram @@ -632,6 +660,7 @@ envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 } TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { + Stats::CustomStatNamespacesImpl custom_namespaces; const std::vector h1_values = {}; HistogramWrapper h1_cumulative; h1_cumulative.setHistogramValues(h1_values); @@ -649,8 +678,8 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { EXPECT_CALL(*histogram1, cumulativeStatistics()).Times(0); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, used_only, absl::nullopt, custom_namespaces); EXPECT_EQ(0UL, size); } @@ -659,13 +688,14 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { EXPECT_CALL(*histogram1, cumulativeStatistics()).WillOnce(ReturnRef(h1_cumulative_statistics)); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, used_only, absl::nullopt, custom_namespaces); EXPECT_EQ(1UL, size); } } TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { + Stats::CustomStatNamespacesImpl custom_namespaces; addCounter("cluster.test_1.upstream_cx_total", {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); addCounter("cluster.test_2.upstream_cx_total", @@ -687,9 +717,10 @@ TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { addHistogram(histogram1); Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus( + const uint64_t size = PrometheusStatsFormatter::statsAsPrometheus( counters_, gauges_, histograms_, response, false, - absl::optional{std::regex("cluster.test_1.upstream_cx_total")}); + absl::optional{std::regex("cluster.test_1.upstream_cx_total")}, + custom_namespaces); EXPECT_EQ(1UL, size); const std::string expected_output = From 799f04d94f9b6b0dfe1797bd5b55547ba3b872e2 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 13 Sep 2021 16:40:30 -0400 Subject: [PATCH 029/121] dns_cache: tracking ttl but not yet using it (#17951) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- .../dynamic_forward_proxy/dns_cache_impl.cc | 124 ++++++++++++------ .../dynamic_forward_proxy/dns_cache_impl.h | 14 +- test/common/http/http2/codec_impl_test.cc | 2 +- .../dns_cache_impl_test.cc | 30 ++++- .../local_ratelimit/local_ratelimit_test.cc | 32 ++--- .../proxy_filter_integration_test.cc | 4 +- .../network/thrift_proxy/router_test.cc | 4 +- test/mocks/event/mocks.cc | 1 + test/mocks/event/mocks.h | 7 +- 9 files changed, 149 insertions(+), 69 deletions(-) diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index 604004c565cdf..64b091090c36b 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -194,25 +194,28 @@ void DnsCacheImpl::startCacheLoad(const std::string& host, uint16_t default_port return; } - const auto host_attributes = Http::Utility::parseAuthority(host); + primary_host = createHost(host, default_port); + startResolve(host, *primary_host); +} +DnsCacheImpl::PrimaryHostInfo* DnsCacheImpl::createHost(const std::string& host, + uint16_t default_port) { + const auto host_attributes = Http::Utility::parseAuthority(host); // TODO(mattklein123): Right now, the same host with different ports will become two // independent primary hosts with independent DNS resolutions. I'm not sure how much this will // matter, but we could consider collapsing these down and sharing the underlying DNS resolution. { absl::WriterMutexLock writer_lock{&primary_hosts_lock_}; - primary_host = primary_hosts_ - // try_emplace() is used here for direct argument forwarding. - .try_emplace(host, std::make_unique( - *this, std::string(host_attributes.host_), - host_attributes.port_.value_or(default_port), - host_attributes.is_ip_address_, - [this, host]() { onReResolve(host); }, - [this, host]() { onResolveTimeout(host); })) - .first->second.get(); + return primary_hosts_ + // try_emplace() is used here for direct argument forwarding. + .try_emplace(host, + std::make_unique( + *this, std::string(host_attributes.host_), + host_attributes.port_.value_or(default_port), + host_attributes.is_ip_address_, [this, host]() { onReResolve(host); }, + [this, host]() { onResolveTimeout(host); })) + .first->second.get(); } - - startResolve(host, *primary_host); } DnsCacheImpl::PrimaryHostInfo& DnsCacheImpl::getPrimaryHost(const std::string& host) { @@ -288,13 +291,15 @@ void DnsCacheImpl::startResolve(const std::string& host, PrimaryHostInfo& host_i void DnsCacheImpl::finishResolve(const std::string& host, Network::DnsResolver::ResolutionStatus status, - std::list&& response, bool from_cache) { + std::list&& response, + absl::optional resolution_time) { ASSERT(main_thread_dispatcher_.isThreadSafe()); ENVOY_LOG_EVENT(debug, "dns_cache_finish_resolve", "main thread resolve complete for host '{}': {}", host, accumulateToString(response, [](const auto& dns_response) { return dns_response.address_->asString(); })); + const bool from_cache = resolution_time.has_value(); // Functions like this one that modify primary_hosts_ are only called in the main thread so we // know it is safe to use the PrimaryHostInfo pointers outside of the lock. @@ -305,9 +310,19 @@ void DnsCacheImpl::finishResolve(const std::string& host, return primary_host_it->second.get(); }(); - const bool first_resolve = !primary_host_info->host_info_->firstResolveComplete(); - primary_host_info->timeout_timer_->disableTimer(); - primary_host_info->active_query_ = nullptr; + bool first_resolve = false; + + if (!from_cache) { + first_resolve = !primary_host_info->host_info_->firstResolveComplete(); + primary_host_info->timeout_timer_->disableTimer(); + primary_host_info->active_query_ = nullptr; + + if (status == Network::DnsResolver::ResolutionStatus::Failure) { + stats_.dns_query_failure_.inc(); + } else { + stats_.dns_query_success_.inc(); + } + } // If the DNS resolver successfully resolved with an empty response list, the dns cache does not // update. This ensures that a potentially previously resolved address does not stabilize back to @@ -317,12 +332,6 @@ void DnsCacheImpl::finishResolve(const std::string& host, primary_host_info->port_) : nullptr; - if (status == Network::DnsResolver::ResolutionStatus::Failure) { - stats_.dns_query_failure_.inc(); - } else { - stats_.dns_query_success_.inc(); - } - // Only the change the address if: // 1) The new address is valid && // 2a) The host doesn't yet have an address || @@ -333,11 +342,6 @@ void DnsCacheImpl::finishResolve(const std::string& host, bool address_changed = false; auto current_address = primary_host_info->host_info_->address(); if (new_address != nullptr && (current_address == nullptr || *current_address != *new_address)) { - if (!from_cache) { - addCacheEntry(host, new_address); - } - // TODO(alyssawilk) don't immediately push cached entries to threads. - // Only serve stale entries if a configured resolve timeout has fired. ENVOY_LOG(debug, "host '{}' address has changed", host); primary_host_info->host_info_->setAddress(new_address); runAddUpdateCallbacks(host, primary_host_info->host_info_); @@ -345,14 +349,30 @@ void DnsCacheImpl::finishResolve(const std::string& host, stats_.host_address_changed_.inc(); } - if (first_resolve || address_changed) { + if (!resolution_time.has_value()) { + resolution_time = main_thread_dispatcher_.timeSource().monotonicTime(); + } + if (new_address) { + // Update the cache entry and staleness any time the ttl changes. + if (!from_cache) { + addCacheEntry(host, new_address, response.front().ttl_); + } + primary_host_info->host_info_->updateStale(resolution_time.value(), response.front().ttl_); + } + + if (first_resolve) { primary_host_info->host_info_->setFirstResolveComplete(); + } + if (first_resolve || address_changed) { + // TODO(alyssawilk) only notify threads of stale results after a resolution + // timeout. notifyThreads(host, primary_host_info->host_info_); } // Kick off the refresh timer. // TODO(mattklein123): Consider jitter here. It may not be necessary since the initial host // is populated dynamically. + // TODO(alyssawilk) also consider TTL here. if (status == Network::DnsResolver::ResolutionStatus::Success) { failure_backoff_strategy_->reset(); primary_host_info->refresh_timer_->enableTimer(refresh_interval_); @@ -429,12 +449,16 @@ DnsCacheImpl::PrimaryHostInfo::~PrimaryHostInfo() { } void DnsCacheImpl::addCacheEntry(const std::string& host, - const Network::Address::InstanceConstSharedPtr& address) { + const Network::Address::InstanceConstSharedPtr& address, + const std::chrono::seconds ttl) { if (!key_value_store_) { return; } - // TODO(alyssawilk) cache data should include TTL, or some other indicator. - const std::string value = absl::StrCat(address->asString()); + MonotonicTime now = main_thread_dispatcher_.timeSource().monotonicTime(); + uint64_t seconds_since_epoch = + std::chrono::duration_cast(now.time_since_epoch()).count(); + const std::string value = + absl::StrCat(address->asString(), "|", ttl.count(), "|", seconds_since_epoch); key_value_store_->addOrUpdate(host, value); } @@ -455,18 +479,42 @@ void DnsCacheImpl::loadCacheEntries( key_value_store_ = factory.createStore(config.key_value_config(), validation_visitor_, main_thread_dispatcher_, file_system_); KeyValueStore::ConstIterateCb load = [this](const std::string& key, const std::string& value) { - auto address = Network::Utility::parseInternetAddressAndPortNoThrow(value); - if (address == nullptr) { + Network::Address::InstanceConstSharedPtr address; + const auto parts = StringUtil::splitToken(value, "|"); + std::chrono::seconds ttl(0); + absl::optional resolution_time; + if (parts.size() == 3) { + address = Network::Utility::parseInternetAddressAndPortNoThrow(std::string(parts[0])); + if (address == nullptr) { + ENVOY_LOG(warn, "{} is not a valid address", parts[0]); + } + uint64_t ttl_int; + if (absl::SimpleAtoi(parts[1], &ttl_int) && ttl_int != 0) { + ttl = std::chrono::seconds(ttl_int); + } else { + ENVOY_LOG(warn, "{} is not a valid ttl", parts[1]); + } + uint64_t epoch_int; + if (absl::SimpleAtoi(parts[2], &epoch_int)) { + MonotonicTime now = main_thread_dispatcher_.timeSource().monotonicTime(); + const std::chrono::seconds seconds_since_epoch = + std::chrono::duration_cast(now.time_since_epoch()); + resolution_time = main_thread_dispatcher_.timeSource().monotonicTime() - + (seconds_since_epoch - std::chrono::seconds(epoch_int)); + } + } else { + ENVOY_LOG(warn, "Incorrect number of tokens in the cache line"); + } + if (address == nullptr || ttl == std::chrono::seconds(0) || !resolution_time.has_value()) { ENVOY_LOG(warn, "Unable to parse cache line '{}'", value); return KeyValueStore::Iterate::Break; } stats_.cache_load_.inc(); std::list response; - // TODO(alyssawilk) change finishResolve to actually use the TTL rather than - // putting 0 here, return the remaining TTL or indicate the result is stale. - response.emplace_back(Network::DnsResponse(address, std::chrono::seconds(0) /* ttl */)); - startCacheLoad(key, address->ip()->port()); - finishResolve(key, Network::DnsResolver::ResolutionStatus::Success, std::move(response), true); + createHost(key, address->ip()->port()); + response.emplace_back(Network::DnsResponse(address, ttl)); + finishResolve(key, Network::DnsResolver::ResolutionStatus::Success, std::move(response), + resolution_time); return KeyValueStore::Iterate::Continue; }; key_value_store_->iterate(load); diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index 28614a0181736..4180313e09262 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -100,7 +100,8 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable last_used_time_; + std::atomic stale_at_time_; bool first_resolve_complete_ ABSL_GUARDED_BY(resolve_lock_){false}; }; @@ -177,7 +182,8 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable&& response, bool from_cache = false); + std::list&& response, + absl::optional resolution_time = {}); void runAddUpdateCallbacks(const std::string& host, const DnsHostInfoSharedPtr& host_info); void runRemoveCallbacks(const std::string& host); void notifyThreads(const std::string& host, const DnsHostInfoImplSharedPtr& resolved_info); @@ -186,10 +192,12 @@ class DnsCacheImpl : public DnsCache, Logger::LoggablenewStream(response_decoder_); - client_connection_.dispatcher_.time_system_.advanceTimeAsyncImpl(std::chrono::seconds(2)); + client_connection_.dispatcher_.globalTimeSystem().advanceTimeAsyncImpl(std::chrono::seconds(2)); EXPECT_CALL(*timeout_timer, enableTimer(_, _)).Times(0); EXPECT_CALL(request_decoder_, decodeHeaders_(_, true)); EXPECT_TRUE(request_encoder2->encodeHeaders(request_headers, true).ok()); diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index 5b823d55118f2..022a9b5e7ec6e 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -1025,6 +1025,9 @@ TEST(UtilityTest, PrepareDnsRefreshStrategy) { } TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { + auto* time_source = new NiceMock(); + dispatcher_.time_system_.reset(time_source); + // Configure the cache. MockKeyValueStoreFactory factory; EXPECT_CALL(factory, createEmptyConfigProto()).WillRepeatedly(Invoke([]() { @@ -1064,14 +1067,14 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { EXPECT_CALL(*timeout_timer, disableTimer()); // Make sure the store gets the first insert. - EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80")); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80|30|0")); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.1"})); + TestUtility::makeDnsResponse({"10.0.0.1"}, std::chrono::seconds(30))); checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); @@ -1087,9 +1090,10 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { // Address does not change. EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.1:80|30|0")); EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.1"})); + TestUtility::makeDnsResponse({"10.0.0.1"}, std::chrono::seconds(30))); checkStats(2 /* attempt */, 2 /* success */, 0 /* failure */, 1 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); @@ -1105,15 +1109,31 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { EXPECT_CALL(*timeout_timer, disableTimer()); // Make sure the store gets the updated address. - EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80")); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.2:80", "foo.com", false))); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80|30|0")); EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Success, - TestUtility::makeDnsResponse({"10.0.0.2"})); + TestUtility::makeDnsResponse({"10.0.0.2"}, std::chrono::seconds(30))); checkStats(3 /* attempt */, 3 /* success */, 0 /* failure */, 2 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Now do one more resolve, where the address does not change but the time + // does. + + // Re-resolve timer. + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); + EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + resolve_timer->invokeCallback(); + + // Address does not change. + EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(*store, addOrUpdate("foo.com", "10.0.0.2:80|40|0")); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"10.0.0.2"}, std::chrono::seconds(40))); } } // namespace diff --git a/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc b/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc index c857bbe74a1ca..3965930cfb336 100644 --- a/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc +++ b/test/extensions/filters/common/local_ratelimit/local_ratelimit_test.cc @@ -247,8 +247,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, CasEdgeCasesDescriptor) { synchronizer().enable(); // Start a thread and start the fill callback. This will wait pre-CAS. - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); synchronizer().waitOn("on_fill_timer_pre_cas"); std::thread t1([&] { EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); @@ -296,8 +296,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketDescriptor2) { EXPECT_TRUE(rate_limiter_->requestAllowed(descriptor_)); EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); } // Verify token bucket functionality with a single token. @@ -311,8 +311,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketDescriptor) { EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); // 0 -> 1 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); @@ -321,14 +321,14 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketDescriptor) { EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); // 0 -> 1 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); // 1 -> 1 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); @@ -349,8 +349,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketMultipleTokensPerFillDescr EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); // 0 -> 2 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); @@ -358,8 +358,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketMultipleTokensPerFillDescr EXPECT_TRUE(rate_limiter_->requestAllowed(descriptor_)); // 1 -> 2 tokens - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(100), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); @@ -383,8 +383,8 @@ TEST_F(LocalRateLimiterDescriptorImplTest, TokenBucketDifferentDescriptorDiffere EXPECT_FALSE(rate_limiter_->requestAllowed(descriptor_)); // 0 -> 1 tokens for descriptor2_ - dispatcher_.time_system_.advanceTimeAndRun(std::chrono::milliseconds(50), dispatcher_, - Envoy::Event::Dispatcher::RunType::NonBlock); + dispatcher_.globalTimeSystem().advanceTimeAndRun(std::chrono::milliseconds(50), dispatcher_, + Envoy::Event::Dispatcher::RunType::NonBlock); EXPECT_CALL(*fill_timer_, enableTimer(std::chrono::milliseconds(50), nullptr)); fill_timer_->invokeCallback(); diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index a621e3c1ad15f..4172b76b30e10 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -122,7 +122,8 @@ name: envoy.clusters.dynamic_forward_proxy if (write_cache_file_) { std::string host = fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port()); - std::string value = fake_upstreams_[0]->localAddress()->asString(); + std::string value = + absl::StrCat(fake_upstreams_[0]->localAddress()->asString(), "|1000000|0"); TestEnvironment::writeStringToFileForTest( "dns_cache.txt", absl::StrCat(host.length(), "\n", host, value.length(), "\n", value)); } @@ -359,7 +360,6 @@ TEST_P(ProxyFilterIntegrationTest, UseCacheFile) { sendRequestAndWaitForResponse(request_headers, 1024, default_response_headers_, 1024); checkSimpleRequestSuccess(1024, 1024, response.get()); EXPECT_EQ(1, test_server_->counter("dns_cache.foo.cache_load")->value()); - EXPECT_EQ(1, test_server_->counter("dns_cache.foo.dns_query_attempt")->value()); EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); } #endif diff --git a/test/extensions/filters/network/thrift_proxy/router_test.cc b/test/extensions/filters/network/thrift_proxy/router_test.cc index 3bcbb4bff7d52..22bcf7ca0910e 100644 --- a/test/extensions/filters/network/thrift_proxy/router_test.cc +++ b/test/extensions/filters/network/thrift_proxy/router_test.cc @@ -1122,7 +1122,7 @@ TEST_F(ThriftRouterTest, PoolTimeoutUpstreamTimeMeasurement) { startRequest(MessageType::Call); - dispatcher_.time_system_.advanceTimeWait(std::chrono::milliseconds(500)); + dispatcher_.globalTimeSystem().advanceTimeWait(std::chrono::milliseconds(500)); EXPECT_CALL(cluster_scope, histogram("thrift.upstream_rq_time", Stats::Histogram::Unit::Milliseconds)) .Times(0); @@ -1219,7 +1219,7 @@ TEST_P(ThriftRouterFieldTypeTest, CallWithUpstreamRqTime) { sendTrivialStruct(field_type); completeRequest(); - dispatcher_.time_system_.advanceTimeWait(std::chrono::milliseconds(500)); + dispatcher_.globalTimeSystem().advanceTimeWait(std::chrono::milliseconds(500)); EXPECT_CALL(cluster_scope, histogram("thrift.upstream_rq_time", Stats::Histogram::Unit::Milliseconds)); EXPECT_CALL(cluster_scope, diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index f70fc602d5480..9a8f04cc23d1c 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -19,6 +19,7 @@ namespace Event { MockDispatcher::MockDispatcher() : MockDispatcher("test_thread") {} MockDispatcher::MockDispatcher(const std::string& name) : name_(name) { + time_system_ = std::make_unique(); ON_CALL(*this, initializeStats(_, _)).WillByDefault(Return()); ON_CALL(*this, clearDeferredDeleteList()).WillByDefault(Invoke([this]() -> void { to_delete_.clear(); diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index 26b1559e5ff95..fe7bb325436a2 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -37,7 +37,10 @@ class MockDispatcher : public Dispatcher { // Dispatcher const std::string& name() override { return name_; } - TimeSource& timeSource() override { return time_system_; } + TimeSource& timeSource() override { return *time_system_; } + GlobalTimeSystem& globalTimeSystem() { + return *(dynamic_cast(time_system_.get())); + } Network::ServerConnectionPtr createServerConnection(Network::ConnectionSocketPtr&& socket, Network::TransportSocketPtr&& transport_socket, @@ -162,7 +165,7 @@ class MockDispatcher : public Dispatcher { MOCK_METHOD(void, updateApproximateMonotonicTime, ()); MOCK_METHOD(void, shutdown, ()); - GlobalTimeSystem time_system_; + std::unique_ptr time_system_; std::list to_delete_; testing::NiceMock buffer_factory_; bool allow_null_callback_{}; From 6ff09a8eb2485b24d8c014e9d0ed6fa1ccf9ed8f Mon Sep 17 00:00:00 2001 From: phlax Date: Tue, 14 Sep 2021 01:53:28 +0100 Subject: [PATCH 030/121] bazel: Add @envoy_repo target with VERSION AND PATH (#17293) Signed-off-by: Ryan Northey Signed-off-by: gayang --- bazel/repositories.bzl | 51 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index ae04a8918212c..cb8967cd6faee 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -52,6 +52,54 @@ _default_envoy_build_config = repository_rule( }, ) +def _envoy_repo_impl(repository_ctx): + """This provides information about the Envoy repository + + You can access the current version and path to the repository in .bzl/BUILD + files as follows: + + ```starlark + load("@envoy_repo//:version.bzl", "VERSION") + ``` + + `VERSION` can be used to derive version-specific rules and can be passed + to the rules. + + The `VERSION` and also the local `PATH` to the repo can be accessed in + python libraries/binaries. By adding `@envoy_repo` to `deps` they become + importable through the `envoy_repo` namespace. + + As the `PATH` is local to the machine, it is generally only useful for + jobs that will run locally. + + This can be useful for example, for tooling that needs to check the + repository, or to run bazel queries that cannot be run within the + constraints of a `genquery`. + + """ + repo_path = repository_ctx.path(repository_ctx.attr.envoy_root).dirname + version = repository_ctx.read(repo_path.get_child("VERSION")).strip() + repository_ctx.file("version.bzl", "VERSION = '%s'" % version) + repository_ctx.file("__init__.py", "PATH = '%s'\nVERSION = '%s'" % (repo_path, version)) + repository_ctx.file("WORKSPACE", "") + repository_ctx.file("BUILD", """ +load("@rules_python//python:defs.bzl", "py_library") + +py_library(name = "envoy_repo", srcs = ["__init__.py"], visibility = ["//visibility:public"]) + +""") + +_envoy_repo = repository_rule( + implementation = _envoy_repo_impl, + attrs = { + "envoy_root": attr.label(default = "@envoy//:BUILD"), + }, +) + +def envoy_repo(): + if "envoy_repo" not in native.existing_rules().keys(): + _envoy_repo(name = "envoy_repo") + # Python dependencies. def _python_deps(): # TODO(htuch): convert these to pip3_import. @@ -100,6 +148,9 @@ def _rust_deps(): external_http_archive("rules_rust") def envoy_dependencies(skip_targets = []): + # Add a binding for repository variables. + envoy_repo() + # Setup Envoy developer tools. envoy_dev_binding() From 1dcd77932a4f8e5694d43438ff3b783b00605469 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Tue, 14 Sep 2021 10:08:54 +0900 Subject: [PATCH 031/121] access_logger: correctly throw exception for grpc logger (#17908) Signed-off-by: Takeshi Yoneda Signed-off-by: gayang --- envoy/upstream/cluster_manager.h | 6 ++ .../common/grpc/async_client_manager_impl.cc | 11 +-- .../common/upstream/cluster_manager_impl.cc | 10 +++ source/common/upstream/cluster_manager_impl.h | 2 + .../common/grpc_access_logger.h | 10 ++- .../access_loggers/grpc/http_config.cc | 4 + .../access_loggers/grpc/tcp_config.cc | 4 + .../grpc/async_client_manager_impl_test.cc | 38 ++------- .../upstream/cluster_manager_impl_test.cc | 45 ++++++++++ .../common/grpc_access_logger_test.cc | 2 +- test/extensions/access_loggers/grpc/BUILD | 14 +++- .../grpc/grpc_access_log_impl_test.cc | 2 +- .../access_loggers/grpc/http_config_test.cc | 38 ++++++--- .../access_loggers/grpc/tcp_config_test.cc | 83 +++++++++++++++++++ .../grpc_access_log_impl_test.cc | 2 +- test/mocks/upstream/cluster_manager.h | 1 + 16 files changed, 215 insertions(+), 57 deletions(-) create mode 100644 test/extensions/access_loggers/grpc/tcp_config_test.cc diff --git a/envoy/upstream/cluster_manager.h b/envoy/upstream/cluster_manager.h index 20ca0acca4df0..16f79117b674f 100644 --- a/envoy/upstream/cluster_manager.h +++ b/envoy/upstream/cluster_manager.h @@ -321,6 +321,12 @@ class ClusterManager { * Drain all connection pool connections owned by all clusters in the cluster manager. */ virtual void drainConnections() PURE; + + /** + * Check if the cluster is active and statically configured, and if not, throw excetion. + * @param cluster, the cluster to check. + */ + virtual void checkActiveStaticCluster(const std::string& cluster) PURE; }; using ClusterManagerPtr = std::unique_ptr; diff --git a/source/common/grpc/async_client_manager_impl.cc b/source/common/grpc/async_client_manager_impl.cc index 92c368ebb6f41..ff712fca14bf0 100644 --- a/source/common/grpc/async_client_manager_impl.cc +++ b/source/common/grpc/async_client_manager_impl.cc @@ -45,16 +45,7 @@ AsyncClientFactoryImpl::AsyncClientFactoryImpl(Upstream::ClusterManager& cm, if (skip_cluster_check) { return; } - - const std::string& cluster_name = config.envoy_grpc().cluster_name(); - auto all_clusters = cm_.clusters(); - const auto& it = all_clusters.active_clusters_.find(cluster_name); - if (it == all_clusters.active_clusters_.end()) { - throw EnvoyException(fmt::format("Unknown gRPC client cluster '{}'", cluster_name)); - } - if (it->second.get().info()->addedViaApi()) { - throw EnvoyException(fmt::format("gRPC client cluster '{}' is not static", cluster_name)); - } + cm_.checkActiveStaticCluster(config.envoy_grpc().cluster_name()); } AsyncClientManagerImpl::AsyncClientManagerImpl(Upstream::ClusterManager& cm, diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 6e1f4e1b73587..d8d7507fd7948 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -971,6 +971,16 @@ void ClusterManagerImpl::drainConnections() { }); } +void ClusterManagerImpl::checkActiveStaticCluster(const std::string& cluster) { + const auto& it = active_clusters_.find(cluster); + if (it == active_clusters_.end()) { + throw EnvoyException(fmt::format("Unknown gRPC client cluster '{}'", cluster)); + } + if (it->second->added_via_api_) { + throw EnvoyException(fmt::format("gRPC client cluster '{}' is not static", cluster)); + } +} + void ClusterManagerImpl::postThreadLocalRemoveHosts(const Cluster& cluster, const HostVector& hosts_removed) { tls_.runOnAllThreads([name = cluster.info()->name(), diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 1540defd97712..ce5480b1ea920 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -319,6 +319,8 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggablesecond; } + // We pass skip_cluster_check=true to factoryForGrpcService in order to avoid throwing + // exceptions in worker threads. Call sites of this getOrCreateLogger must check the cluster + // availability via ClusterManager::checkActiveStaticCluster beforehand, and throw exceptions in + // the main thread if necessary. + auto client = async_client_manager_.factoryForGrpcService(config.grpc_service(), scope_, true) + ->createUncachedRawAsyncClient(); const auto logger = createLogger( - config, - async_client_manager_.factoryForGrpcService(config.grpc_service(), scope_, false) - ->createUncachedRawAsyncClient(), + config, std::move(client), std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, buffer_flush_interval, 1000)), PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, buffer_size_bytes, 16384), cache.dispatcher_, scope); diff --git a/source/extensions/access_loggers/grpc/http_config.cc b/source/extensions/access_loggers/grpc/http_config.cc index 4d333f6d91fdc..6692a73f1d6d6 100644 --- a/source/extensions/access_loggers/grpc/http_config.cc +++ b/source/extensions/access_loggers/grpc/http_config.cc @@ -27,6 +27,10 @@ AccessLog::InstanceSharedPtr HttpGrpcAccessLogFactory::createAccessLogInstance( const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig&>( config, context.messageValidationVisitor()); + const auto service_config = proto_config.common_config().grpc_service(); + if (service_config.has_envoy_grpc()) { + context.clusterManager().checkActiveStaticCluster(service_config.envoy_grpc().cluster_name()); + } return std::make_shared(std::move(filter), proto_config, context.threadLocal(), GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), context.scope()); diff --git a/source/extensions/access_loggers/grpc/tcp_config.cc b/source/extensions/access_loggers/grpc/tcp_config.cc index 185a76e934d8a..e259a2c5f779e 100644 --- a/source/extensions/access_loggers/grpc/tcp_config.cc +++ b/source/extensions/access_loggers/grpc/tcp_config.cc @@ -27,6 +27,10 @@ AccessLog::InstanceSharedPtr TcpGrpcAccessLogFactory::createAccessLogInstance( const envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig&>( config, context.messageValidationVisitor()); + const auto service_config = proto_config.common_config().grpc_service(); + if (service_config.has_envoy_grpc()) { + context.clusterManager().checkActiveStaticCluster(service_config.envoy_grpc().cluster_name()); + } return std::make_shared(std::move(filter), proto_config, context.threadLocal(), GrpcCommon::getGrpcAccessLoggerCacheSingleton(context), context.scope()); diff --git a/test/common/grpc/async_client_manager_impl_test.cc b/test/common/grpc/async_client_manager_impl_test.cc index fc8a365f98f9c..c0fe6baddae8d 100644 --- a/test/common/grpc/async_client_manager_impl_test.cc +++ b/test/common/grpc/async_client_manager_impl_test.cc @@ -39,14 +39,7 @@ class AsyncClientManagerImplTest : public testing::Test { TEST_F(AsyncClientManagerImplTest, EnvoyGrpcOk) { envoy::config::core::v3::GrpcService grpc_service; grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); - - Upstream::ClusterManager::ClusterInfoMaps cluster_maps; - Upstream::MockClusterMockPrioritySet cluster; - cluster_maps.active_clusters_.emplace("foo", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_maps)); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()); - + EXPECT_CALL(cm_, checkActiveStaticCluster("foo")).WillOnce(Return()); async_client_manager_.factoryForGrpcService(grpc_service, scope_, false); } @@ -89,30 +82,15 @@ TEST_F(AsyncClientManagerImplTest, EnableRawAsyncClientCache) { EXPECT_NE(foo_client1.get(), bar_client.get()); } -TEST_F(AsyncClientManagerImplTest, EnvoyGrpcUnknown) { - envoy::config::core::v3::GrpcService grpc_service; - grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); - - EXPECT_CALL(cm_, clusters()); - EXPECT_THROW_WITH_MESSAGE( - async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException, - "Unknown gRPC client cluster 'foo'"); -} - -TEST_F(AsyncClientManagerImplTest, EnvoyGrpcDynamicCluster) { +TEST_F(AsyncClientManagerImplTest, EnvoyGrpcInvalid) { envoy::config::core::v3::GrpcService grpc_service; grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); - - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("foo", cluster); - EXPECT_CALL(cm_, clusters()) - .WillOnce(Return(Upstream::ClusterManager::ClusterInfoMaps{cluster_map, {}})); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()).WillOnce(Return(true)); + EXPECT_CALL(cm_, checkActiveStaticCluster("foo")).WillOnce(Invoke([](const std::string&) { + throw EnvoyException("fake exception"); + })); EXPECT_THROW_WITH_MESSAGE( async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException, - "gRPC client cluster 'foo' is not static"); + "fake exception"); } TEST_F(AsyncClientManagerImplTest, GoogleGrpc) { @@ -187,11 +165,11 @@ TEST_F(AsyncClientManagerImplTest, GoogleGrpcIllegalCharsInValue) { #endif } -TEST_F(AsyncClientManagerImplTest, EnvoyGrpcUnknownOk) { +TEST_F(AsyncClientManagerImplTest, EnvoyGrpcUnknownSkipClusterCheck) { envoy::config::core::v3::GrpcService grpc_service; grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); - EXPECT_CALL(cm_, clusters()).Times(0); + EXPECT_CALL(cm_, checkActiveStaticCluster(_)).Times(0); ASSERT_NO_THROW(async_client_manager_.factoryForGrpcService(grpc_service, scope_, true)); } diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 06b776d8d03fb..bfadeb85b292b 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -4957,6 +4957,51 @@ TEST_F(ClusterManagerImplTest, ConnectionPoolPerDownstreamConnection) { Http::Protocol::Http11, &lb_context))); } +TEST_F(ClusterManagerImplTest, CheckActiveStaticCluster) { + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: good + connect_timeout: 0.250s + lb_policy: ROUND_ROBIN + type: STATIC + load_assignment: + cluster_name: good + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 + )EOF"; + create(parseBootstrapFromV3Yaml(yaml)); + const std::string added_via_api_yaml = R"EOF( + name: added_via_api + connect_timeout: 0.250s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: added_via_api + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 11001 + )EOF"; + EXPECT_TRUE( + cluster_manager_->addOrUpdateCluster(parseClusterFromV3Yaml(added_via_api_yaml), "v1")); + + EXPECT_EQ(2, cluster_manager_->clusters().active_clusters_.size()); + EXPECT_NO_THROW(cluster_manager_->checkActiveStaticCluster("good")); + EXPECT_THROW_WITH_MESSAGE(cluster_manager_->checkActiveStaticCluster("nonexist"), EnvoyException, + "Unknown gRPC client cluster 'nonexist'"); + EXPECT_THROW_WITH_MESSAGE(cluster_manager_->checkActiveStaticCluster("added_via_api"), + EnvoyException, "gRPC client cluster 'added_via_api' is not static"); +} + class PreconnectTest : public ClusterManagerImplTest { public: void initialize(float ratio) { diff --git a/test/extensions/access_loggers/common/grpc_access_logger_test.cc b/test/extensions/access_loggers/common/grpc_access_logger_test.cc index f2e125df17e06..168a749052163 100644 --- a/test/extensions/access_loggers/common/grpc_access_logger_test.cc +++ b/test/extensions/access_loggers/common/grpc_access_logger_test.cc @@ -336,7 +336,7 @@ class GrpcAccessLoggerCacheTest : public testing::Test { void expectClientCreation() { factory_ = new Grpc::MockAsyncClientFactory; async_client_ = new Grpc::MockAsyncClient; - EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, false)) + EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, true)) .WillOnce(Invoke([this](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { EXPECT_CALL(*factory_, createUncachedRawAsyncClient()).WillOnce(Invoke([this] { return Grpc::RawAsyncClientPtr{async_client_}; diff --git a/test/extensions/access_loggers/grpc/BUILD b/test/extensions/access_loggers/grpc/BUILD index 31616c4a41874..0484f14e7ddff 100644 --- a/test/extensions/access_loggers/grpc/BUILD +++ b/test/extensions/access_loggers/grpc/BUILD @@ -92,10 +92,22 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "tcp_config_test", + srcs = ["tcp_config_test.cc"], + extension_names = ["envoy.access_loggers.tcp_grpc"], + deps = [ + "//source/extensions/access_loggers/grpc:tcp_config", + "//test/mocks/server:factory_context_mocks", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/access_loggers/grpc/v3:pkg_cc_proto", + ], +) + envoy_extension_cc_test( name = "tcp_grpc_access_log_integration_test", srcs = ["tcp_grpc_access_log_integration_test.cc"], - extension_names = ["envoy.access_loggers.http_grpc"], + extension_names = ["envoy.access_loggers.tcp_grpc"], deps = [ "//source/common/buffer:zero_copy_input_stream_lib", "//source/common/grpc:codec_lib", diff --git a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc index 3e5e4f58f9008..737bbf3982f6f 100644 --- a/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/grpc_access_log_impl_test.cc @@ -128,7 +128,7 @@ class GrpcAccessLoggerCacheImplTest : public testing::Test { : async_client_(new Grpc::MockAsyncClient), factory_(new Grpc::MockAsyncClientFactory), logger_cache_(async_client_manager_, scope_, tls_, local_info_), grpc_access_logger_impl_test_helper_(local_info_, async_client_) { - EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, false)) + EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, true)) .WillOnce(Invoke([this](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { EXPECT_CALL(*factory_, createUncachedRawAsyncClient()).WillOnce(Invoke([this] { return Grpc::RawAsyncClientPtr{async_client_}; diff --git a/test/extensions/access_loggers/grpc/http_config_test.cc b/test/extensions/access_loggers/grpc/http_config_test.cc index c4d7db133a78a..933a4a69d967e 100644 --- a/test/extensions/access_loggers/grpc/http_config_test.cc +++ b/test/extensions/access_loggers/grpc/http_config_test.cc @@ -30,17 +30,37 @@ class HttpGrpcAccessLogConfigTest : public testing::Test { message_ = factory_->createEmptyConfigProto(); ASSERT_NE(nullptr, message_); + } - EXPECT_CALL(context_.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _)) - .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { - return std::make_unique>(); + void run(const std::string cluster_name) { + const auto good_cluster = "good_cluster"; + EXPECT_CALL(context_.cluster_manager_, checkActiveStaticCluster(cluster_name)) + .WillOnce(Invoke([good_cluster](const std::string& cluster_name) { + if (cluster_name != good_cluster) { + throw EnvoyException("fake"); + } })); auto* common_config = http_grpc_access_log_.mutable_common_config(); common_config->set_log_name("foo"); - common_config->mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name("bar"); + common_config->mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name(cluster_name); common_config->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); TestUtility::jsonConvert(http_grpc_access_log_, *message_); + + if (cluster_name == good_cluster) { + EXPECT_CALL(context_.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { + return std::make_unique>(); + })); + AccessLog::InstanceSharedPtr instance = + factory_->createAccessLogInstance(*message_, std::move(filter_), context_); + EXPECT_NE(nullptr, instance); + EXPECT_NE(nullptr, dynamic_cast(instance.get())); + } else { + EXPECT_THROW_WITH_MESSAGE( + factory_->createAccessLogInstance(*message_, std::move(filter_), context_), + EnvoyException, "fake"); + } } AccessLog::FilterPtr filter_; @@ -51,12 +71,10 @@ class HttpGrpcAccessLogConfigTest : public testing::Test { }; // Normal OK configuration. -TEST_F(HttpGrpcAccessLogConfigTest, Ok) { - AccessLog::InstanceSharedPtr instance = - factory_->createAccessLogInstance(*message_, std::move(filter_), context_); - EXPECT_NE(nullptr, instance); - EXPECT_NE(nullptr, dynamic_cast(instance.get())); -} +TEST_F(HttpGrpcAccessLogConfigTest, Ok) { run("good_cluster"); } + +// Wrong configuration with invalid clusters. +TEST_F(HttpGrpcAccessLogConfigTest, InvalidCluster) { run("invalid"); } } // namespace } // namespace HttpGrpc diff --git a/test/extensions/access_loggers/grpc/tcp_config_test.cc b/test/extensions/access_loggers/grpc/tcp_config_test.cc new file mode 100644 index 0000000000000..9889c337de476 --- /dev/null +++ b/test/extensions/access_loggers/grpc/tcp_config_test.cc @@ -0,0 +1,83 @@ +#include "envoy/config/core/v3/grpc_service.pb.h" +#include "envoy/extensions/access_loggers/grpc/v3/als.pb.h" +#include "envoy/registry/registry.h" +#include "envoy/server/access_log_config.h" +#include "envoy/stats/scope.h" + +#include "source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h" + +#include "test/mocks/server/factory_context.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Invoke; + +namespace Envoy { +namespace Extensions { +namespace AccessLoggers { +namespace TcpGrpc { +namespace { + +class TcpGrpcAccessLogConfigTest : public testing::Test { +public: + void SetUp() override { + factory_ = + Registry::FactoryRegistry::getFactory( + "envoy.access_loggers.tcp_grpc"); + ASSERT_NE(nullptr, factory_); + + message_ = factory_->createEmptyConfigProto(); + ASSERT_NE(nullptr, message_); + } + + void run(const std::string cluster_name) { + const auto good_cluster = "good_cluster"; + EXPECT_CALL(context_.cluster_manager_, checkActiveStaticCluster(cluster_name)) + .WillOnce(Invoke([good_cluster](const std::string& cluster_name) { + if (cluster_name != good_cluster) { + throw EnvoyException("fake"); + } + })); + + auto* common_config = tcp_grpc_access_log_.mutable_common_config(); + common_config->set_log_name("foo"); + common_config->mutable_grpc_service()->mutable_envoy_grpc()->set_cluster_name(cluster_name); + common_config->set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); + TestUtility::jsonConvert(tcp_grpc_access_log_, *message_); + + if (cluster_name == good_cluster) { + EXPECT_CALL(context_.cluster_manager_.async_client_manager_, factoryForGrpcService(_, _, _)) + .WillOnce(Invoke([](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { + return std::make_unique>(); + })); + AccessLog::InstanceSharedPtr instance = + factory_->createAccessLogInstance(*message_, std::move(filter_), context_); + EXPECT_NE(nullptr, instance); + EXPECT_NE(nullptr, dynamic_cast(instance.get())); + } else { + EXPECT_THROW_WITH_MESSAGE( + factory_->createAccessLogInstance(*message_, std::move(filter_), context_), + EnvoyException, "fake"); + } + } + + AccessLog::FilterPtr filter_; + NiceMock context_; + envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig tcp_grpc_access_log_; + ProtobufTypes::MessagePtr message_; + Server::Configuration::AccessLogInstanceFactory* factory_{}; +}; + +// Normal OK configuration. +TEST_F(TcpGrpcAccessLogConfigTest, Ok) { run("good_cluster"); } + +// Wrong configuration with invalid clusters. +TEST_F(TcpGrpcAccessLogConfigTest, InvalidCluster) { run("invalid"); } + +} // namespace +} // namespace TcpGrpc +} // namespace AccessLoggers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc b/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc index 850ae1dfa4cdf..5736c94515d96 100644 --- a/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/open_telemetry/grpc_access_log_impl_test.cc @@ -151,7 +151,7 @@ class GrpcAccessLoggerCacheImplTest : public testing::Test { : async_client_(new Grpc::MockAsyncClient), factory_(new Grpc::MockAsyncClientFactory), logger_cache_(async_client_manager_, scope_, tls_, local_info_), grpc_access_logger_impl_test_helper_(local_info_, async_client_) { - EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, false)) + EXPECT_CALL(async_client_manager_, factoryForGrpcService(_, _, true)) .WillOnce(Invoke([this](const envoy::config::core::v3::GrpcService&, Stats::Scope&, bool) { EXPECT_CALL(*factory_, createUncachedRawAsyncClient()).WillOnce(Invoke([this] { return Grpc::RawAsyncClientPtr{async_client_}; diff --git a/test/mocks/upstream/cluster_manager.h b/test/mocks/upstream/cluster_manager.h index f8b43ddb76557..08f4c1c563283 100644 --- a/test/mocks/upstream/cluster_manager.h +++ b/test/mocks/upstream/cluster_manager.h @@ -70,6 +70,7 @@ class MockClusterManager : public ClusterManager { } MOCK_METHOD(void, drainConnections, (const std::string& cluster)); MOCK_METHOD(void, drainConnections, ()); + MOCK_METHOD(void, checkActiveStaticCluster, (const std::string& cluster)); NiceMock thread_local_cluster_; envoy::config::core::v3::BindConfig bind_config_; From 852f309bfc74d3397bdd2e7c26800a331021708b Mon Sep 17 00:00:00 2001 From: phlax Date: Tue, 14 Sep 2021 07:43:32 +0100 Subject: [PATCH 032/121] bazel: Switch `py_script` -> `rules_python.entry_point` (#18046) Signed-off-by: Ryan Northey Signed-off-by: gayang --- tools/base/BUILD | 4 --- tools/base/base_command.py | 13 --------- tools/base/envoy_python.bzl | 55 ------------------------------------- tools/distribution/BUILD | 39 +++++++++++++------------- 4 files changed, 19 insertions(+), 92 deletions(-) delete mode 100644 tools/base/base_command.py diff --git a/tools/base/BUILD b/tools/base/BUILD index 8de9977da9dcd..c1d243c119ed7 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -6,10 +6,6 @@ licenses(["notice"]) # Apache 2 envoy_package() -exports_files([ - "base_command.py", -]) - envoy_py_library( "tools.base.aio", deps = [ diff --git a/tools/base/base_command.py b/tools/base/base_command.py deleted file mode 100644 index 41cd5675da16f..0000000000000 --- a/tools/base/base_command.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python3 - -import sys - -from __UPSTREAM_PACKAGE__ import main as upstream_main - - -def main(*args: str) -> int: - return upstream_main(*args) - - -if __name__ == "__main__": - sys.exit(main(*sys.argv[1:])) diff --git a/tools/base/envoy_python.bzl b/tools/base/envoy_python.bzl index 3af2b50323834..550ff901768de 100644 --- a/tools/base/envoy_python.bzl +++ b/tools/base/envoy_python.bzl @@ -71,58 +71,3 @@ def envoy_py_binary( if test: envoy_py_test(name, package, visibility, envoy_prefix = envoy_prefix) - -def envoy_py_script( - name, - entry_point, - deps = [], - data = [], - visibility = ["//visibility:public"], - envoy_prefix = "@envoy"): - """This generates a `py_binary` from an entry_point in a python package - - Currently, the actual entrypoint callable is hard-coded to `main`. - - For example, if you wish to make use of a `console_script` in an upstream - package that resolves as `envoy.code_format.python.command.main` from a - package named `envoy.code_format.python`, you can use this macro as - follows: - - ```skylark - - envoy_py_script( - name = "tools.code_format.python", - entry_point = "envoy.code_format.python.command", - deps = [requirement("envoy.code_format.python")], - ``` - - You will then be able to use the console script from bazel. - - Separate args to be passed to the console_script with `--`, eg: - - ```console - - $ bazel run //tools/code_format:python -- -h - ``` - - """ - py_file = "%s.py" % name.split(".")[-1] - output = "$(@D)/%s" % py_file - template_rule = "%s//tools/base:base_command.py" % envoy_prefix - template = "$(location %s)" % template_rule - - native.genrule( - name = "py_script_%s" % py_file, - cmd = "sed s/__UPSTREAM_PACKAGE__/%s/ %s > \"%s\"" % (entry_point, template, output), - tools = [template_rule], - outs = [py_file], - ) - - envoy_py_binary( - name = name, - deps = deps, - data = data, - visibility = visibility, - envoy_prefix = envoy_prefix, - test = False, - ) diff --git a/tools/distribution/BUILD b/tools/distribution/BUILD index 6b60dda875708..e00e257bdc4ea 100644 --- a/tools/distribution/BUILD +++ b/tools/distribution/BUILD @@ -1,6 +1,5 @@ load("//bazel:envoy_build_system.bzl", "envoy_package") -load("//tools/base:envoy_python.bzl", "envoy_py_script") -load("@base_pip3//:requirements.bzl", "requirement") +load("@base_pip3//:requirements.bzl", "entry_point") licenses(["notice"]) # Apache 2 @@ -10,26 +9,26 @@ exports_files([ "distrotest.sh", ]) -envoy_py_script( - name = "tools.distribution.release", - entry_point = "envoy.distribution.release", - deps = [ - requirement("envoy.distribution.release"), - ], +alias( + name = "release", + actual = entry_point( + pkg = "envoy.distribution.release", + script = "envoy.distribution.release", + ), ) -envoy_py_script( - name = "tools.distribution.sign", - entry_point = "envoy.gpg.sign", - deps = [ - requirement("envoy.gpg.sign"), - ], +alias( + name = "sign", + actual = entry_point( + pkg = "envoy.gpg.sign", + script = "envoy.gpg.sign", + ), ) -envoy_py_script( - name = "tools.distribution.verify", - entry_point = "envoy.distribution.verify", - deps = [ - requirement("envoy.distribution.verify"), - ], +alias( + name = "verify", + actual = entry_point( + pkg = "envoy.distribution.verify", + script = "envoy.distribution.verify", + ), ) From 6e49776c3d4e6ee380b242ba15a84b3cf4a8a896 Mon Sep 17 00:00:00 2001 From: code Date: Tue, 14 Sep 2021 16:25:51 +0800 Subject: [PATCH 033/121] fix print deps (#18084) Signed-off-by: wbpcode Signed-off-by: gayang --- tools/print_dependencies.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tools/print_dependencies.py b/tools/print_dependencies.py index c8c4eec0f284c..966ee0a15cc54 100755 --- a/tools/print_dependencies.py +++ b/tools/print_dependencies.py @@ -2,14 +2,15 @@ # Quick-and-dirty python to fetch dependency information -import imp +import importlib import json import re import subprocess import sys -API_DEPS = imp.load_source('api', 'api/bazel/repository_locations.bzl') -DEPS = imp.load_source('deps', 'bazel/repository_locations.bzl') +API_DEPS = importlib.machinery.SourceFileLoader('api', + 'api/bazel/repository_locations.bzl').load_module() +DEPS = importlib.machinery.SourceFileLoader('deps', 'bazel/repository_locations.bzl').load_module() def print_deps(deps): @@ -19,14 +20,14 @@ def print_deps(deps): if __name__ == '__main__': deps = [] - DEPS.REPOSITORY_LOCATIONS.update(API_DEPS.REPOSITORY_LOCATIONS) + DEPS.REPOSITORY_LOCATIONS_SPEC.update(API_DEPS.REPOSITORY_LOCATIONS_SPEC) - for key, loc in DEPS.REPOSITORY_LOCATIONS.items(): + for key, loc in DEPS.REPOSITORY_LOCATIONS_SPEC.items(): deps.append({ 'identifier': key, - 'file-sha256': loc.get('sha256'), - 'file-url': loc.get('urls')[0], - 'file-prefix': loc.get('strip_prefix', ''), + 'description': loc.get('project_desc'), + 'project': loc.get('project_url'), + 'version': loc.get("version"), }) deps = sorted(deps, key=lambda k: k['identifier']) From 6392f8e2389115c53774329c98c49b1babed48ee Mon Sep 17 00:00:00 2001 From: Kevin Baichoo Date: Tue, 14 Sep 2021 01:32:58 -0700 Subject: [PATCH 034/121] Escape underscores, and group together exponents using curly braces. (#18102) Signed-off-by: Kevin Baichoo Signed-off-by: gayang --- .../operations/overload_manager/overload_manager.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/root/configuration/operations/overload_manager/overload_manager.rst b/docs/root/configuration/operations/overload_manager/overload_manager.rst index 12ae610921c09..7dacd28b323b0 100644 --- a/docs/root/configuration/operations/overload_manager/overload_manager.rst +++ b/docs/root/configuration/operations/overload_manager/overload_manager.rst @@ -199,9 +199,9 @@ threshold for tracking and a single overload action entry that resets streams: ... We will only track streams using >= -:math:`2^minimum_account_to_track_power_of_two` worth of allocated memory in +:math:`2^{minimum\_account\_to\_track\_power\_of\_two}` worth of allocated memory in buffers. In this case, by setting the `minimum_account_to_track_power_of_two` -to `20` we will track streams using >= 1MiB since :math:`2^20` is 1MiB. Streams +to `20` we will track streams using >= 1MiB since :math:`2^{20}` is 1MiB. Streams using >= 1MiB will be classified into 8 power of two sized buckets. Currently, the number of buckets is hardcoded to 8. For this example, the buckets are as follows: @@ -240,7 +240,7 @@ of streams that end up getting reset and to prevent the worker thread from locking up and triggering the Watchdog system. Given that there are only 8 buckets, we partition the space with a gradation of -:math:`gradation = (saturation_threshold - scaling_threshold)/8`. Hence at 85% +:math:`gradation = (saturation\_threshold - scaling\_threshold)/8`. Hence at 85% heap usage we reset streams in the last bucket e.g. those using `>= 128MiB`. At :math:`85% + 1 * gradation` heap usage we reset streams in the last two buckets e.g. those using `>= 64MiB`, prioritizing the streams in the last bucket since From 3897121f9cf996728915e8941825b1a394d1401e Mon Sep 17 00:00:00 2001 From: danzh Date: Tue, 14 Sep 2021 08:32:17 -0400 Subject: [PATCH 035/121] quiche: handle connection close during Http::Http3::ActiveClient creation (#18056) Commit Message: Quic connection might get closed due to write error during connect(). This will cause the client gets disconnected during creation while assuming it's connecting. This PR fixes it by explicitly checking connection state and fail client creation and checking for early detaching in various place during initialize(). Additional Message: Use getSystemErrorCode() which returns the actual errno in convertToQuicWriteResult() instead of getErrorCode() which returns the corresponding Envoy enum. Risk Level: low Testing: added new conn_pool_grid unit tests Signed-off-by: Dan Zhang Signed-off-by: gayang --- envoy/api/io_error.h | 2 + envoy/common/platform.h | 2 + source/common/http/codec_client.cc | 3 ++ source/common/http/http3/conn_pool.cc | 13 ++++- source/common/network/io_socket_error_impl.cc | 2 + .../common/quic/envoy_quic_packet_writer.cc | 2 +- .../quic_filter_manager_connection_impl.h | 4 ++ test/common/http/BUILD | 1 + test/common/http/conn_pool_grid_test.cc | 50 +++++++++++++++++++ test/common/quic/envoy_quic_writer_test.cc | 6 +-- 10 files changed, 80 insertions(+), 5 deletions(-) diff --git a/envoy/api/io_error.h b/envoy/api/io_error.h index f5de759194d18..049d14053b24f 100644 --- a/envoy/api/io_error.h +++ b/envoy/api/io_error.h @@ -35,6 +35,8 @@ class IoError { BadFd, // An existing connection was forcibly closed by the remote host. ConnectionReset, + // Network is unreachable due to network settings. + NetworkUnreachable, // Other error codes cannot be mapped to any one above in getErrorCode(). UnknownError }; diff --git a/envoy/common/platform.h b/envoy/common/platform.h index e610caccb7ee1..96ed53f8562a2 100644 --- a/envoy/common/platform.h +++ b/envoy/common/platform.h @@ -150,6 +150,7 @@ struct msghdr { #define SOCKET_ERROR_ADDR_IN_USE WSAEADDRINUSE #define SOCKET_ERROR_BADF WSAEBADF #define SOCKET_ERROR_CONNRESET WSAECONNRESET +#define SOCKET_ERROR_NETUNREACH WSAENETUNREACH #define HANDLE_ERROR_PERM ERROR_ACCESS_DENIED #define HANDLE_ERROR_INVALID ERROR_INVALID_HANDLE @@ -259,6 +260,7 @@ typedef int signal_t; // NOLINT(modernize-use-using) #define SOCKET_ERROR_ADDR_IN_USE EADDRINUSE #define SOCKET_ERROR_BADF EBADF #define SOCKET_ERROR_CONNRESET ECONNRESET +#define SOCKET_ERROR_NETUNREACH ENETUNREACH // Mapping POSIX file errors to common error names #define HANDLE_ERROR_PERM EACCES diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 29aa601384a44..e2d242f543265 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -117,6 +117,9 @@ void CodecClient::onEvent(Network::ConnectionEvent event) { StreamInfo::ResponseFlag::UpstreamProtocolError); } } + } else { + ENVOY_CONN_LOG(warn, "Connection is closed by {} during connecting.", *connection_, + (event == Network::ConnectionEvent::RemoteClose ? "peer" : "self")); } while (!active_requests_.empty()) { // Fake resetting all active streams so that reset() callbacks get invoked. diff --git a/source/common/http/http3/conn_pool.cc b/source/common/http/http3/conn_pool.cc index 49ed2e1a7b3c4..9bba3fa0221c7 100644 --- a/source/common/http/http3/conn_pool.cc +++ b/source/common/http/http3/conn_pool.cc @@ -65,10 +65,15 @@ allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_ host, priority, dispatcher, options, transport_socket_options, random_generator, state, [&quic_stat_names, &scope](HttpConnPoolImplBase* pool) -> ::Envoy::ConnectionPool::ActiveClientPtr { + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::pool), debug, + "Creating Http/3 client"); // If there's no ssl context, the secrets are not loaded. Fast-fail by returning null. auto factory = &pool->host()->transportSocketFactory(); ASSERT(dynamic_cast(factory) != nullptr); if (static_cast(factory)->sslCtx() == nullptr) { + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::pool), warn, + "Failed to create Http/3 client. Transport socket " + "factory is not configured correctly."); return nullptr; } Http3ConnPoolImpl* h3_pool = reinterpret_cast(pool); @@ -82,7 +87,13 @@ allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_ data.connection_ = Quic::createQuicNetworkConnection(h3_pool->quicInfo(), pool->dispatcher(), host_address, source_address, quic_stat_names, scope); - return std::make_unique(*pool, data); + // Store a handle to connection as it will be moved during client construction. + Network::Connection& connection = *data.connection_; + auto client = std::make_unique(*pool, data); + if (connection.state() == Network::Connection::State::Closed) { + return nullptr; + } + return client; }, [](Upstream::Host::CreateConnectionData& data, HttpConnPoolImplBase* pool) { CodecClientPtr codec{new CodecClientProd(CodecType::HTTP3, std::move(data.connection_), diff --git a/source/common/network/io_socket_error_impl.cc b/source/common/network/io_socket_error_impl.cc index 8d664be8e1f47..a3e955f4d68a2 100644 --- a/source/common/network/io_socket_error_impl.cc +++ b/source/common/network/io_socket_error_impl.cc @@ -58,6 +58,8 @@ Api::IoError::IoErrorCode IoSocketError::errorCodeFromErrno(int sys_errno) { return IoErrorCode::BadFd; case SOCKET_ERROR_CONNRESET: return IoErrorCode::ConnectionReset; + case SOCKET_ERROR_NETUNREACH: + return IoErrorCode::NetworkUnreachable; default: ENVOY_LOG_MISC(debug, "Unknown error code {} details {}", sys_errno, errorDetails(sys_errno)); return IoErrorCode::UnknownError; diff --git a/source/common/quic/envoy_quic_packet_writer.cc b/source/common/quic/envoy_quic_packet_writer.cc index 6a3d358bae017..e2f53bd2df5bc 100644 --- a/source/common/quic/envoy_quic_packet_writer.cc +++ b/source/common/quic/envoy_quic_packet_writer.cc @@ -16,7 +16,7 @@ quic::WriteResult convertToQuicWriteResult(Api::IoCallUint64Result& result) { quic::WriteStatus status = result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again ? quic::WRITE_STATUS_BLOCKED : quic::WRITE_STATUS_ERROR; - return {status, static_cast(result.err_->getErrorCode())}; + return {status, static_cast(result.err_->getSystemErrorCode())}; } } // namespace diff --git a/source/common/quic/quic_filter_manager_connection_impl.h b/source/common/quic/quic_filter_manager_connection_impl.h index 6fabaa5ded3ce..f2e112297f828 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.h +++ b/source/common/quic/quic_filter_manager_connection_impl.h @@ -82,6 +82,10 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, void setConnectionStats(const Network::Connection::ConnectionStats& stats) override { // TODO(danzh): populate stats. Network::ConnectionImplBase::setConnectionStats(stats); + if (network_connection_ == nullptr) { + ENVOY_CONN_LOG(error, "Quic connection has been detached.", *this); + return; + } network_connection_->setConnectionStats(stats); } Ssl::ConnectionInfoConstSharedPtr ssl() const override; diff --git a/test/common/http/BUILD b/test/common/http/BUILD index edd7e75664edf..93b3cc90f28b4 100644 --- a/test/common/http/BUILD +++ b/test/common/http/BUILD @@ -446,6 +446,7 @@ envoy_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/stats:stats_mocks", "//test/mocks/server:transport_socket_factory_context_mocks", + "//test/test_common:threadsafe_singleton_injector_lib", "//source/common/quic:quic_factory_lib", "//source/common/quic:quic_transport_socket_factory_lib", "//source/common/quic:client_connection_factory_lib", diff --git a/test/common/http/conn_pool_grid_test.cc b/test/common/http/conn_pool_grid_test.cc index 873a813be4501..0e03bc9cda652 100644 --- a/test/common/http/conn_pool_grid_test.cc +++ b/test/common/http/conn_pool_grid_test.cc @@ -14,6 +14,7 @@ #include "test/mocks/ssl/mocks.h" #include "test/mocks/upstream/cluster_info.h" #include "test/test_common/simulated_time_system.h" +#include "test/test_common/threadsafe_singleton_injector.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -680,6 +681,55 @@ TEST_F(ConnectivityGridTest, RealGrid) { auto optional_it3 = ConnectivityGridForTest::forceCreateNextPool(grid); ASSERT_FALSE(optional_it3.has_value()); } + +TEST_F(ConnectivityGridTest, ConnectionCloseDuringCreation) { + EXPECT_CALL(*cluster_, connectTimeout()).WillRepeatedly(Return(std::chrono::seconds(10))); + + testing::InSequence s; + dispatcher_.allow_null_callback_ = true; + // Set the cluster up to have a quic transport socket. + Envoy::Ssl::ClientContextConfigPtr config(new NiceMock()); + NiceMock factory_context; + Ssl::ClientContextSharedPtr ssl_context(new Ssl::MockClientContext()); + EXPECT_CALL(factory_context.context_manager_, createSslClientContext(_, _, _)) + .WillOnce(Return(ssl_context)); + auto factory = + std::make_unique(std::move(config), factory_context); + factory->initialize(); + ASSERT_FALSE(factory->usesProxyProtocolOptions()); + auto& matcher = + static_cast(*cluster_->transport_socket_matcher_); + EXPECT_CALL(matcher, resolve(_)) + .WillRepeatedly( + Return(Upstream::TransportSocketMatcher::MatchData(*factory, matcher.stats_, "test"))); + + ConnectivityGrid grid(dispatcher_, random_, + Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000", simTime()), + Upstream::ResourcePriority::Default, socket_options_, + transport_socket_options_, state_, simTime(), alternate_protocols_, + std::chrono::milliseconds(300), options_, quic_stat_names_, store_); + + // Create the HTTP/3 pool. + auto optional_it1 = ConnectivityGridForTest::forceCreateNextPool(grid); + ASSERT_TRUE(optional_it1.has_value()); + EXPECT_EQ("HTTP/3", (**optional_it1)->protocolDescription()); + + Api::MockOsSysCalls os_sys_calls; + TestThreadsafeSingletonInjector os_calls(&os_sys_calls); + EXPECT_CALL(os_sys_calls, socket(_, _, _)).WillOnce(Return(Api::SysCallSocketResult{1, 0})); +#if defined(__APPLE__) || defined(WIN32) + EXPECT_CALL(os_sys_calls, setsocketblocking(1, false)) + .WillOnce(Return(Api::SysCallIntResult{1, 0})); +#endif + EXPECT_CALL(os_sys_calls, bind(_, _, _)).WillOnce(Return(Api::SysCallIntResult{1, 0})); + EXPECT_CALL(os_sys_calls, setsockopt_(_, _, _, _, _)).WillRepeatedly(Return(0)); + EXPECT_CALL(os_sys_calls, sendmsg(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 101})); + + EXPECT_CALL(os_sys_calls, close(1)).WillOnce(Return(Api::SysCallIntResult{0, 0})); + ConnectionPool::Cancellable* cancel = (**optional_it1)->newStream(decoder_, callbacks_); + EXPECT_EQ(nullptr, cancel); +} + #endif } // namespace diff --git a/test/common/quic/envoy_quic_writer_test.cc b/test/common/quic/envoy_quic_writer_test.cc index 3908fb82ba568..f6a21ffce8920 100644 --- a/test/common/quic/envoy_quic_writer_test.cc +++ b/test/common/quic/envoy_quic_writer_test.cc @@ -90,7 +90,7 @@ TEST_F(EnvoyQuicWriterTest, SendBlocked) { quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); EXPECT_EQ(quic::WRITE_STATUS_BLOCKED, result.status); - EXPECT_EQ(static_cast(Api::IoError::IoErrorCode::Again), result.error_code); + EXPECT_EQ(SOCKET_ERROR_AGAIN, result.error_code); EXPECT_TRUE(envoy_quic_writer_.IsWriteBlocked()); // Writing while blocked is not allowed. #ifdef NDEBUG @@ -117,7 +117,7 @@ TEST_F(EnvoyQuicWriterTest, SendFailure) { quic::WriteResult result = envoy_quic_writer_.WritePacket(str.data(), str.length(), self_address_, peer_address_, nullptr); EXPECT_EQ(quic::WRITE_STATUS_ERROR, result.status); - EXPECT_EQ(static_cast(Api::IoError::IoErrorCode::NoSupport), result.error_code); + EXPECT_EQ(SOCKET_ERROR_NOT_SUP, result.error_code); EXPECT_FALSE(envoy_quic_writer_.IsWriteBlocked()); } @@ -133,7 +133,7 @@ TEST_F(EnvoyQuicWriterTest, SendFailureMessageTooBig) { // Currently MessageSize should be propagated through error_code. This test // would fail if QUICHE changes to propagate through status in the future. EXPECT_EQ(quic::WRITE_STATUS_ERROR, result.status); - EXPECT_EQ(static_cast(Api::IoError::IoErrorCode::MessageTooBig), result.error_code); + EXPECT_EQ(SOCKET_ERROR_MSG_SIZE, result.error_code); EXPECT_FALSE(envoy_quic_writer_.IsWriteBlocked()); } From 2e8f0a4fe997ad20fe4ae64bdc2043cf494a6900 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Tue, 14 Sep 2021 21:55:00 +0900 Subject: [PATCH 036/121] Fix invalid link in CDN-loop (#18109) This docs https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/cdn_loop_filter#configuration has an 404 link. It seems to need :ref: tag but also the link should point to extensions.filters.http.cdn_loop.v3alpha.CdnLoopConfig. This patch fixes it. Risk Level: low Testing: n/a Docs Changes: yes Release Notes: n/a Platform Specific Features: n/a Signed-off-by: Kenjiro Nakayama Signed-off-by: gayang --- docs/root/configuration/http/http_filters/cdn_loop_filter.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/root/configuration/http/http_filters/cdn_loop_filter.rst b/docs/root/configuration/http/http_filters/cdn_loop_filter.rst index 5b81d1be25226..e3016f972d62c 100644 --- a/docs/root/configuration/http/http_filters/cdn_loop_filter.rst +++ b/docs/root/configuration/http/http_filters/cdn_loop_filter.rst @@ -26,7 +26,7 @@ Configuration The filter is configured with the name *envoy.filters.http.cdn_loop*. -The `filter config `_ has two fields. +The :ref:`filter config ` has two fields. * The *cdn_id* field sets the identifier that the filter will look for within and append to the CDN-Loop header. RFC 8586 calls this field the "cdn-id"; "cdn-id" can either be a pseudonym or a From 9b01b3bd6af941ea516fc6bb4342d2ecd0568397 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 14 Sep 2021 11:04:56 -0400 Subject: [PATCH 037/121] http3: adding connect support (#17877) This includes validation for upgrade connects per Ryan's offline advice. n.b. this should be a no-op for HTTP (where there is no mechanism to send both) and HTTP/2 (where nghttp2 validates) so not currently calling out in release notes. Risk Level: low Testing: new integration tests Docs Changes: inline Release Notes: n/a (quic alpha) co-author: @DavidSchinazi Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- api/envoy/config/core/v3/protocol.proto | 9 + .../intro/arch_overview/http/upgrades.rst | 9 +- docs/root/version_history/current.rst | 1 + source/common/http/header_utility.cc | 12 ++ .../common/quic/envoy_quic_server_stream.cc | 8 +- source/common/runtime/runtime_features.cc | 1 + source/common/tcp_proxy/upstream.cc | 10 +- source/common/tcp_proxy/upstream.h | 3 +- .../upstreams/tcp/generic/config.cc | 13 +- test/common/http/header_utility_test.cc | 28 +++ test/common/http/http1/codec_impl_test.cc | 6 +- test/common/http/http2/codec_impl_test.cc | 1 + test/common/router/router_test.cc | 3 + test/config/utility.cc | 5 +- test/config/utility.h | 3 +- .../upstreams/tcp/generic/config_test.cc | 25 +++ test/integration/BUILD | 2 + test/integration/fake_upstream.h | 1 + test/integration/http_integration.cc | 1 + test/integration/http_integration.h | 17 +- test/integration/protocol_integration_test.cc | 25 +-- .../tcp_tunneling_integration_test.cc | 161 ++++++++++-------- 22 files changed, 230 insertions(+), 114 deletions(-) diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 8f2347eb55179..1b96b88be7cd9 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -473,6 +473,7 @@ message GrpcProtocolOptions { } // A message which allows using HTTP/3. +// [#next-free-field: 6] message Http3ProtocolOptions { QuicProtocolOptions quic_protocol_options = 1; @@ -483,6 +484,14 @@ message Http3ProtocolOptions { // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging // `. google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 2; + + // Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using + // the header mechanisms from the `HTTP/2 extended connect RFC + // `_ + // and settings `proposed for HTTP/3 + // `_ + // [#alpha:] as HTTP/3 CONNECT is not yet an RFC. + bool allow_extended_connect = 5; } // A message to control transformations to the :scheme header diff --git a/docs/root/intro/arch_overview/http/upgrades.rst b/docs/root/intro/arch_overview/http/upgrades.rst index cefa72e67b807..f0c92d42a531b 100644 --- a/docs/root/intro/arch_overview/http/upgrades.rst +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -52,10 +52,13 @@ a deployment of the form: In this case, if a client is for example using WebSocket, we want the Websocket to arrive at the upstream server functionally intact, which means it needs to traverse the HTTP/2+ hop. -This is accomplished via `Extended CONNECT (RFC8441) `_ support, +This is accomplished for HTTP/2 via `Extended CONNECT (RFC8441) `_ support, turned on by setting :ref:`allow_connect ` -true at the second layer Envoy. The -WebSocket request will be transformed into an HTTP/2+ CONNECT stream, with :protocol header +true at the second layer Envoy. For HTTP/3 there is parallel support configured by the alpha option +:ref:`allow_extended_connect ` as +there is no formal RFC yet. + +The WebSocket request will be transformed into an HTTP/2+ CONNECT stream, with :protocol header indicating the original upgrade, traverse the HTTP/2+ hop, and be downgraded back into an HTTP/1 WebSocket Upgrade. This same Upgrade-CONNECT-Upgrade transformation will be performed on any HTTP/2+ hop, with the documented flaw that the HTTP/1.1 method is always assumed to be GET. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index d340597c8366a..2089506696c3d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -107,6 +107,7 @@ New Features * http: added :ref:`x-envoy-upstream-stream-duration-ms ` that allows configuring the max stream duration via a request header. * http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. * http: sanitizing the referer header as documented :ref:`here `. This feature can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.sanitize_http_header_referer`` to false. +* http: validating outgoing HTTP/2 CONNECT requests to ensure that if ``:path`` is set that ``:protocol`` is present. This behavior can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.validate_connect`` to false. * jwt_authn: added support for :ref:`Jwt Cache ` and its size can be specified by :ref:`jwt_cache_size `. * jwt_authn: added support for extracting JWTs from request cookies using :ref:`from_cookies `. * listener: new listener metric ``downstream_cx_transport_socket_connect_timeout`` to track transport socket timeouts. diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 06badafc13e66..211aeed6aa1f0 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -339,6 +339,18 @@ Http::Status HeaderUtility::checkRequiredRequestHeaders(const Http::RequestHeade return absl::InvalidArgumentError( absl::StrCat("missing required header: ", Envoy::Http::Headers::get().Host.get())); } + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.validate_connect")) { + if (headers.Path() && !headers.Protocol()) { + // Path and Protocol header should only be present for CONNECT for upgrade style CONNECT. + return absl::InvalidArgumentError( + absl::StrCat("missing required header: ", Envoy::Http::Headers::get().Protocol.get())); + } + if (!headers.Path() && headers.Protocol()) { + // Path and Protocol header should only be present for CONNECT for upgrade style CONNECT. + return absl::InvalidArgumentError( + absl::StrCat("missing required header: ", Envoy::Http::Headers::get().Path.get())); + } + } } else { if (!headers.Path()) { // :path header must be present for non-CONNECT requests. diff --git a/source/common/quic/envoy_quic_server_stream.cc b/source/common/quic/envoy_quic_server_stream.cc index f5249c30375c1..b256cc9f3aa23 100644 --- a/source/common/quic/envoy_quic_server_stream.cc +++ b/source/common/quic/envoy_quic_server_stream.cc @@ -46,6 +46,8 @@ EnvoyQuicServerStream::EnvoyQuicServerStream( headers_with_underscores_action_(headers_with_underscores_action) { ASSERT(static_cast(GetReceiveWindow().value()) > 8 * 1024, "Send buffer limit should be larger than 8KB."); + // TODO(alyssawilk, danzh) if http3_options_.allow_extended_connect() is true, + // send the correct SETTINGS. } void EnvoyQuicServerStream::encode100ContinueHeaders(const Http::ResponseHeaderMap& headers) { @@ -167,7 +169,9 @@ void EnvoyQuicServerStream::OnInitialHeadersComplete(bool fin, size_t frame_len, onStreamError(close_connection_upon_invalid_header_, rst); return; } - if (Http::HeaderUtility::requestHeadersValid(*headers) != absl::nullopt) { + if (Http::HeaderUtility::requestHeadersValid(*headers) != absl::nullopt || + Http::HeaderUtility::checkRequiredRequestHeaders(*headers) != Http::okStatus() || + (headers->Protocol() && !http3_options_.allow_extended_connect())) { details_ = Http3ResponseCodeDetailValues::invalid_http_header; onStreamError(absl::nullopt); return; @@ -392,7 +396,7 @@ void EnvoyQuicServerStream::onStreamError(absl::optional should_close_conn !http3_options_.override_stream_error_on_invalid_http_message().value(); } if (close_connection_upon_invalid_header) { - stream_delegate()->OnStreamError(quic::QUIC_HTTP_FRAME_ERROR, "Invalid headers"); + stream_delegate()->OnStreamError(quic::QUIC_HTTP_FRAME_ERROR, std::string(details_)); } else { Reset(rst); } diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 50cba15839226..0abb465135964 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -95,6 +95,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.unquote_log_string_values", "envoy.reloadable_features.upstream_host_weight_change_causes_rebuild", "envoy.reloadable_features.use_observable_cluster_name", + "envoy.reloadable_features.validate_connect", "envoy.reloadable_features.vhds_heartbeats", "envoy.reloadable_features.wasm_cluster_name_envoy_grpc", "envoy.reloadable_features.upstream_http2_flood_checks", diff --git a/source/common/tcp_proxy/upstream.cc b/source/common/tcp_proxy/upstream.cc index 8a66a0ad8161b..8982729206000 100644 --- a/source/common/tcp_proxy/upstream.cc +++ b/source/common/tcp_proxy/upstream.cc @@ -196,8 +196,14 @@ HttpConnPool::HttpConnPool(Upstream::ThreadLocalCluster& thread_local_cluster, Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks, Http::CodecType type) : config_(config), type_(type), upstream_callbacks_(upstream_callbacks) { - conn_pool_data_ = thread_local_cluster.httpConnPool(Upstream::ResourcePriority::Default, - absl::nullopt, context); + absl::optional protocol; + if (type_ == Http::CodecType::HTTP3) { + protocol = Http::Protocol::Http3; + } else if (type_ == Http::CodecType::HTTP2) { + protocol = Http::Protocol::Http2; + } + conn_pool_data_ = + thread_local_cluster.httpConnPool(Upstream::ResourcePriority::Default, protocol, context); } HttpConnPool::~HttpConnPool() { diff --git a/source/common/tcp_proxy/upstream.h b/source/common/tcp_proxy/upstream.h index 3f2bfd9c67785..017257b5bcaa0 100644 --- a/source/common/tcp_proxy/upstream.h +++ b/source/common/tcp_proxy/upstream.h @@ -53,8 +53,7 @@ class HttpConnPool : public GenericConnPool, public Http::ConnectionPool::Callba Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks, Http::CodecType type); ~HttpConnPool() override; - // HTTP/3 upstreams are not supported at the moment. - bool valid() const { return conn_pool_data_.has_value() && type_ <= Http::CodecType::HTTP2; } + bool valid() const { return conn_pool_data_.has_value(); } // GenericConnPool void newStream(GenericConnectionPoolCallbacks& callbacks) override; diff --git a/source/extensions/upstreams/tcp/generic/config.cc b/source/extensions/upstreams/tcp/generic/config.cc index 77625e5fd0404..491f0569185d9 100644 --- a/source/extensions/upstreams/tcp/generic/config.cc +++ b/source/extensions/upstreams/tcp/generic/config.cc @@ -16,10 +16,15 @@ TcpProxy::GenericConnPoolPtr GenericConnPoolFactory::createGenericConnPool( const absl::optional& config, Upstream::LoadBalancerContext* context, Envoy::Tcp::ConnectionPool::UpstreamCallbacks& upstream_callbacks) const { if (config.has_value()) { - auto pool_type = - ((thread_local_cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP2) != 0) - ? Http::CodecType::HTTP2 - : Http::CodecType::HTTP1; + Http::CodecType pool_type; + if ((thread_local_cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP2) != 0) { + pool_type = Http::CodecType::HTTP2; + } else if ((thread_local_cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP3) != + 0) { + pool_type = Http::CodecType::HTTP3; + } else { + pool_type = Http::CodecType::HTTP1; + } auto ret = std::make_unique( thread_local_cluster, context, config.value(), upstream_callbacks, pool_type); return (ret->valid() ? std::move(ret) : nullptr); diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 7e9ed68e23df8..5951dced3199d 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -886,6 +886,34 @@ TEST(ValidateHeaders, HeaderNameWithUnderscores) { rejected)); } +TEST(ValidateHeaders, Connect) { + { + // Basic connect. + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":authority", "foo.com:80"}}; + EXPECT_EQ(Http::okStatus(), HeaderUtility::checkRequiredRequestHeaders(headers)); + } + { + // Extended connect. + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, + {":authority", "foo.com:80"}, + {":path", "/"}, + {":protocol", "websocket"}}; + EXPECT_EQ(Http::okStatus(), HeaderUtility::checkRequiredRequestHeaders(headers)); + } + { + // Missing path. + TestRequestHeaderMapImpl headers{ + {":method", "CONNECT"}, {":authority", "foo.com:80"}, {":protocol", "websocket"}}; + EXPECT_NE(Http::okStatus(), HeaderUtility::checkRequiredRequestHeaders(headers)); + } + { + // Missing protocol. + TestRequestHeaderMapImpl headers{ + {":method", "CONNECT"}, {":authority", "foo.com:80"}, {":path", "/"}}; + EXPECT_NE(Http::okStatus(), HeaderUtility::checkRequiredRequestHeaders(headers)); + } +} + TEST(ValidateHeaders, ContentLength) { bool should_close_connection; EXPECT_EQ(HeaderUtility::HeaderValidationResult::ACCEPT, diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 433acb4473a11..d70c3be17a8b2 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -2757,7 +2757,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponse) { NiceMock response_decoder; Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); - TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":path", "/"}, {":authority", "host"}}; + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":authority", "host"}}; EXPECT_TRUE(request_encoder.encodeHeaders(headers, true).ok()); // Send response headers @@ -2788,7 +2788,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectResponseWithEarlyData) { NiceMock response_decoder; Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); - TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":path", "/"}, {":authority", "host"}}; + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":authority", "host"}}; EXPECT_TRUE(request_encoder.encodeHeaders(headers, true).ok()); // Send response headers and payload @@ -2807,7 +2807,7 @@ TEST_F(Http1ClientConnectionImplTest, ConnectRejected) { NiceMock response_decoder; Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder); - TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":path", "/"}, {":authority", "host"}}; + TestRequestHeaderMapImpl headers{{":method", "CONNECT"}, {":authority", "host"}}; EXPECT_TRUE(request_encoder.encodeHeaders(headers, true).ok()); EXPECT_CALL(response_decoder, decodeHeaders_(_, false)); diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 4e97573c91e1e..51c460a4d2f36 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -3037,6 +3037,7 @@ TEST_P(Http2CodecImplTest, ConnectTest) { TestRequestHeaderMapImpl request_headers; HttpTestUtility::addDefaultHeaders(request_headers); request_headers.setReferenceKey(Headers::get().Method, Http::Headers::get().MethodValues.Connect); + request_headers.setReferenceKey(Headers::get().Protocol, "bytestream"); TestRequestHeaderMapImpl expected_headers; HttpTestUtility::addDefaultHeaders(expected_headers); expected_headers.setReferenceKey(Headers::get().Method, diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 5963d3063a277..3583a8159f2a9 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -5968,6 +5968,7 @@ TEST_F(RouterTest, ConnectPauseAndResume) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); headers.setMethod("CONNECT"); + headers.removePath(); router_.decodeHeaders(headers, false); // Make sure any early data does not go upstream. @@ -6040,6 +6041,7 @@ TEST_F(RouterTest, ConnectPauseNoResume) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); headers.setMethod("CONNECT"); + headers.removePath(); router_.decodeHeaders(headers, false); // Make sure any early data does not go upstream. @@ -6070,6 +6072,7 @@ TEST_F(RouterTest, ConnectExplicitTcpUpstream) { Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); headers.setMethod("CONNECT"); + headers.removePath(); router_.decodeHeaders(headers, false); router_.onDestroy(); diff --git a/test/config/utility.cc b/test/config/utility.cc index 0928efd680b86..4cbe9cdf3a4f9 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -643,7 +643,7 @@ void ConfigHelper::addClusterFilterMetadata(absl::string_view metadata_yaml, void ConfigHelper::setConnectConfig( envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm, - bool terminate_connect, bool allow_post) { + bool terminate_connect, bool allow_post, bool http3) { auto* route_config = hcm.mutable_route_config(); ASSERT_EQ(1, route_config->virtual_hosts_size()); auto* route = route_config->mutable_virtual_hosts(0)->mutable_routes(0); @@ -671,6 +671,9 @@ void ConfigHelper::setConnectConfig( hcm.add_upgrade_configs()->set_upgrade_type("CONNECT"); hcm.mutable_http2_protocol_options()->set_allow_connect(true); + if (http3) { + hcm.mutable_http3_protocol_options()->set_allow_extended_connect(true); + } } void ConfigHelper::applyConfigModifiers() { diff --git a/test/config/utility.h b/test/config/utility.h index f421c95ba8c10..501d08f0e969a 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -313,7 +313,8 @@ class ConfigHelper { // Given an HCM with the default config, set the matcher to be a connect matcher and enable // CONNECT requests. - static void setConnectConfig(HttpConnectionManager& hcm, bool terminate_connect, bool allow_post); + static void setConnectConfig(HttpConnectionManager& hcm, bool terminate_connect, bool allow_post, + bool http3 = false); void setLocalReply( const envoy::extensions::filters::network::http_connection_manager::v3::LocalReplyConfig& diff --git a/test/extensions/upstreams/tcp/generic/config_test.cc b/test/extensions/upstreams/tcp/generic/config_test.cc index 2e8a638356db5..dd222bedcc0cc 100644 --- a/test/extensions/upstreams/tcp/generic/config_test.cc +++ b/test/extensions/upstreams/tcp/generic/config_test.cc @@ -7,6 +7,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::AnyNumber; using testing::NiceMock; using testing::Return; @@ -31,6 +32,30 @@ TEST_F(TcpConnPoolTest, TestNoConnPool) { factory_.createGenericConnPool(thread_local_cluster_, config, nullptr, callbacks_)); } +TEST_F(TcpConnPoolTest, Http2Config) { + auto info = std::make_shared(); + EXPECT_CALL(*info, features()).WillOnce(Return(Upstream::ClusterInfo::Features::HTTP2)); + EXPECT_CALL(thread_local_cluster_, info).WillOnce(Return(info)); + envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy_TunnelingConfig config; + config.set_hostname("host"); + EXPECT_CALL(thread_local_cluster_, httpConnPool(_, _, _)).WillOnce(Return(absl::nullopt)); + EXPECT_EQ(nullptr, + factory_.createGenericConnPool(thread_local_cluster_, config, nullptr, callbacks_)); +} + +TEST_F(TcpConnPoolTest, Http3Config) { + auto info = std::make_shared(); + EXPECT_CALL(*info, features()) + .Times(AnyNumber()) + .WillRepeatedly(Return(Upstream::ClusterInfo::Features::HTTP3)); + EXPECT_CALL(thread_local_cluster_, info).Times(AnyNumber()).WillRepeatedly(Return(info)); + envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy_TunnelingConfig config; + config.set_hostname("host"); + EXPECT_CALL(thread_local_cluster_, httpConnPool(_, _, _)).WillOnce(Return(absl::nullopt)); + EXPECT_EQ(nullptr, + factory_.createGenericConnPool(thread_local_cluster_, config, nullptr, callbacks_)); +} + } // namespace Generic } // namespace Tcp } // namespace Upstreams diff --git a/test/integration/BUILD b/test/integration/BUILD index 247216e7340d5..08e2cf881426d 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -1330,12 +1330,14 @@ envoy_cc_test( envoy_cc_test( name = "tcp_tunneling_integration_test", + size = "large", srcs = [ "tcp_tunneling_integration_test.cc", ], data = [ "//test/config/integration/certs", ], + shard_count = 3, deps = [ ":http_integration_lib", ":http_protocol_integration_lib", diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index a64ba2dab4b15..54a5f56f531e5 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -570,6 +570,7 @@ struct FakeUpstreamConfig { // Legacy options which are always set. http2_options_.set_allow_connect(true); http2_options_.set_allow_metadata(true); + http3_options_.set_allow_extended_connect(true); } Event::TestTimeSystem& time_system_; diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 4403d39b23171..dc63faba92e39 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -257,6 +257,7 @@ IntegrationCodecClientPtr HttpIntegrationTest::makeRawHttpConnection( } else { cluster->http3_options_ = ConfigHelper::http2ToHttp3ProtocolOptions( http2_options.value(), quic::kStreamReceiveWindowLimit); + cluster->http3_options_.set_allow_extended_connect(true); #endif } cluster->http2_options_ = http2_options.value(); diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index a872292747a60..68b61321a82bf 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -89,13 +89,24 @@ using IntegrationCodecClientPtr = std::unique_ptr; */ class HttpIntegrationTest : public BaseIntegrationTest { public: + HttpIntegrationTest(Http::CodecType downstream_protocol, Network::Address::IpVersion version) + : HttpIntegrationTest( + downstream_protocol, version, + ConfigHelper::httpProxyConfig(/*downstream_use_quic=*/downstream_protocol == + Http::CodecType::HTTP3)) {} HttpIntegrationTest(Http::CodecType downstream_protocol, Network::Address::IpVersion version, - const std::string& config = ConfigHelper::httpProxyConfig()); + const std::string& config); HttpIntegrationTest(Http::CodecType downstream_protocol, const InstanceConstSharedPtrFn& upstream_address_fn, - Network::Address::IpVersion version, - const std::string& config = ConfigHelper::httpProxyConfig()); + Network::Address::IpVersion version) + : HttpIntegrationTest( + downstream_protocol, upstream_address_fn, version, + ConfigHelper::httpProxyConfig(/*downstream_use_quic=*/downstream_protocol == + Http::CodecType::HTTP3)) {} + HttpIntegrationTest(Http::CodecType downstream_protocol, + const InstanceConstSharedPtrFn& upstream_address_fn, + Network::Address::IpVersion version, const std::string& config); ~HttpIntegrationTest() override; void initialize() override; diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index e6349bd3b8f88..133dc3f87f0fb 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -581,8 +581,6 @@ TEST_P(DownstreamProtocolIntegrationTest, DownstreamRequestWithFaultyFilter) { } TEST_P(DownstreamProtocolIntegrationTest, FaultyFilterWithConnect) { - // TODO(danzh) re-enable after adding http3 option "allow_connect". - EXCLUDE_DOWNSTREAM_HTTP3; if (upstreamProtocol() == Http::CodecType::HTTP3) { // For QUIC, even through the headers are not sent upstream, the stream will // be created. Use the autonomous upstream and allow incomplete streams. @@ -592,15 +590,10 @@ TEST_P(DownstreamProtocolIntegrationTest, FaultyFilterWithConnect) { // Faulty filter that removed host in a CONNECT request. config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { ConfigHelper::setConnectConfig(hcm, false, false); }); - config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - // Clone the whole listener. - auto static_resources = bootstrap.mutable_static_resources(); - auto* old_listener = static_resources->mutable_listeners(0); - auto* cloned_listener = static_resources->add_listeners(); - cloned_listener->CopyFrom(*old_listener); - old_listener->set_name("http_forward"); - }); + hcm) -> void { + ConfigHelper::setConnectConfig(hcm, false, false, + downstreamProtocol() == Http::CodecType::HTTP3); + }); useAccessLog("%RESPONSE_CODE_DETAILS%"); config_helper_.prependFilter("{ name: invalid-header-filter, typed_config: { \"@type\": " "type.googleapis.com/google.protobuf.Empty } }"); @@ -611,9 +604,7 @@ TEST_P(DownstreamProtocolIntegrationTest, FaultyFilterWithConnect) { auto headers = Http::TestRequestHeaderMapImpl{ {":method", "CONNECT"}, {":scheme", "http"}, {":authority", "www.host.com:80"}}; - auto response = (downstream_protocol_ == Http::CodecType::HTTP1) - ? std::move((codec_client_->startRequest(headers)).second) - : codec_client_->makeHeaderOnlyRequest(headers); + auto response = std::move((codec_client_->startRequest(headers)).second); ASSERT_TRUE(response->waitForEndStream()); EXPECT_TRUE(response->complete()); @@ -2666,8 +2657,6 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectIsBlocked) { // Make sure that with override_stream_error_on_invalid_http_message true, CONNECT // results in stream teardown not connection teardown. TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { - // TODO(danzh) add "allow_connect" to http3 options. - EXCLUDE_DOWNSTREAM_HTTP3; if (downstreamProtocol() == Http::CodecType::HTTP1) { return; } @@ -2684,8 +2673,8 @@ TEST_P(DownstreamProtocolIntegrationTest, ConnectStreamRejection) { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); - auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ - {":method", "CONNECT"}, {":path", "/"}, {":authority", "host"}}); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", "CONNECT"}, {":authority", "host"}}); ASSERT_TRUE(response->waitForReset()); EXPECT_FALSE(codec_client_->disconnected()); diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index d45f72d59910c..a8505323b5956 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -12,19 +12,16 @@ namespace Envoy { namespace { // Terminating CONNECT and sending raw TCP upstream. -class ConnectTerminationIntegrationTest - : public testing::TestWithParam, - public HttpIntegrationTest { +class ConnectTerminationIntegrationTest : public HttpProtocolIntegrationTest { public: - ConnectTerminationIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP2, GetParam()) { - enableHalfClose(true); - } + ConnectTerminationIntegrationTest() { enableHalfClose(true); } void initialize() override { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) { - ConfigHelper::setConnectConfig(hcm, true, allow_post_); + ConfigHelper::setConnectConfig(hcm, true, allow_post_, + downstream_protocol_ == Http::CodecType::HTTP3); if (enable_timeout_) { hcm.mutable_stream_idle_timeout()->set_seconds(0); @@ -69,6 +66,30 @@ class ConnectTerminationIntegrationTest {":protocol", "bytestream"}, {":scheme", "https"}, {":authority", "host:80"}}; + void clearExtendedConnectHeaders() { + connect_headers_.removeProtocol(); + connect_headers_.removePath(); + } + + void sendBidirectionalDataAndCleanShutdown() { + sendBidirectionalData("hello", "hello", "there!", "there!"); + // Send a second set of data to make sure for example headers are only sent once. + sendBidirectionalData(",bye", "hello,bye", "ack", "there!ack"); + + // Send an end stream. This should result in half close upstream. + codec_client_->sendData(*request_encoder_, "", true); + ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); + + // Now send a FIN from upstream. This should result in clean shutdown downstream. + ASSERT_TRUE(fake_raw_upstream_connection_->close()); + if (downstream_protocol_ == Http::CodecType::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + ASSERT_TRUE(response_->waitForEndStream()); + ASSERT_FALSE(response_->reset()); + } + } + FakeRawConnectionPtr fake_raw_upstream_connection_; IntegrationStreamDecoderPtr response_; bool enable_timeout_{}; @@ -76,22 +97,19 @@ class ConnectTerminationIntegrationTest bool allow_post_{}; }; -TEST_P(ConnectTerminationIntegrationTest, Basic) { +TEST_P(ConnectTerminationIntegrationTest, OriginalStyle) { initialize(); + clearExtendedConnectHeaders(); setUpConnection(); - sendBidirectionalData("hello", "hello", "there!", "there!"); - // Send a second set of data to make sure for example headers are only sent once. - sendBidirectionalData(",bye", "hello,bye", "ack", "there!ack"); + sendBidirectionalDataAndCleanShutdown(); +} - // Send an end stream. This should result in half close upstream. - codec_client_->sendData(*request_encoder_, "", true); - ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); +TEST_P(ConnectTerminationIntegrationTest, Basic) { + initialize(); - // Now send a FIN from upstream. This should result in clean shutdown downstream. - ASSERT_TRUE(fake_raw_upstream_connection_->close()); - ASSERT_TRUE(response_->waitForEndStream()); - ASSERT_FALSE(response_->reset()); + setUpConnection(); + sendBidirectionalDataAndCleanShutdown(); } TEST_P(ConnectTerminationIntegrationTest, BasicAllowPost) { @@ -103,18 +121,7 @@ TEST_P(ConnectTerminationIntegrationTest, BasicAllowPost) { connect_headers_.removeProtocol(); setUpConnection(); - sendBidirectionalData("hello", "hello", "there!", "there!"); - // Send a second set of data to make sure for example headers are only sent once. - sendBidirectionalData(",bye", "hello,bye", "ack", "there!ack"); - - // Send an end stream. This should result in half close upstream. - codec_client_->sendData(*request_encoder_, "", true); - ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); - - // Now send a FIN from upstream. This should result in clean shutdown downstream. - ASSERT_TRUE(fake_raw_upstream_connection_->close()); - ASSERT_TRUE(response_->waitForEndStream()); - ASSERT_FALSE(response_->reset()); + sendBidirectionalDataAndCleanShutdown(); } TEST_P(ConnectTerminationIntegrationTest, UsingHostMatch) { @@ -122,20 +129,10 @@ TEST_P(ConnectTerminationIntegrationTest, UsingHostMatch) { initialize(); connect_headers_.removePath(); + connect_headers_.removeProtocol(); setUpConnection(); - sendBidirectionalData("hello", "hello", "there!", "there!"); - // Send a second set of data to make sure for example headers are only sent once. - sendBidirectionalData(",bye", "hello,bye", "ack", "there!ack"); - - // Send an end stream. This should result in half close upstream. - codec_client_->sendData(*request_encoder_, "", true); - ASSERT_TRUE(fake_raw_upstream_connection_->waitForHalfClose()); - - // Now send a FIN from upstream. This should result in clean shutdown downstream. - ASSERT_TRUE(fake_raw_upstream_connection_->close()); - ASSERT_TRUE(response_->waitForEndStream()); - ASSERT_FALSE(response_->reset()); + sendBidirectionalDataAndCleanShutdown(); } TEST_P(ConnectTerminationIntegrationTest, DownstreamClose) { @@ -150,6 +147,10 @@ TEST_P(ConnectTerminationIntegrationTest, DownstreamClose) { } TEST_P(ConnectTerminationIntegrationTest, DownstreamReset) { + if (downstream_protocol_ == Http::CodecType::HTTP1) { + // Resetting an individual stream requires HTTP/2 or later. + return; + } initialize(); setUpConnection(); @@ -168,7 +169,13 @@ TEST_P(ConnectTerminationIntegrationTest, UpstreamClose) { // Tear down by closing the upstream connection. ASSERT_TRUE(fake_raw_upstream_connection_->close()); - ASSERT_TRUE(response_->waitForReset()); + if (downstream_protocol_ == Http::CodecType::HTTP3) { + // In HTTP/3 end stream will be sent when the upstream connection is closed, and + // STOP_SENDING frame sent instead of reset. + ASSERT_TRUE(response_->waitForEndStream()); + } else { + ASSERT_TRUE(response_->waitForReset()); + } } TEST_P(ConnectTerminationIntegrationTest, TestTimeout) { @@ -183,6 +190,9 @@ TEST_P(ConnectTerminationIntegrationTest, TestTimeout) { } TEST_P(ConnectTerminationIntegrationTest, BuggyHeaders) { + if (downstream_protocol_ == Http::CodecType::HTTP1) { + return; + } initialize(); // Sending a header-only request is probably buggy, but rather than having a @@ -239,7 +249,10 @@ class ProxyingConnectIntegrationTest : public HttpProtocolIntegrationTest { void initialize() override { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) -> void { ConfigHelper::setConnectConfig(hcm, false, false); }); + hcm) -> void { + ConfigHelper::setConnectConfig(hcm, false, false, + downstream_protocol_ == Http::CodecType::HTTP3); + }); HttpProtocolIntegrationTest::initialize(); } @@ -253,7 +266,10 @@ class ProxyingConnectIntegrationTest : public HttpProtocolIntegrationTest { }; INSTANTIATE_TEST_SUITE_P(Protocols, ProxyingConnectIntegrationTest, - testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()), + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP1, Http::CodecType::HTTP2, + Http::CodecType::HTTP3}, + {Http::CodecType::HTTP1})), HttpProtocolIntegrationTest::protocolTestParamsToString); TEST_P(ProxyingConnectIntegrationTest, ProxyConnect) { @@ -437,29 +453,20 @@ TEST_P(ProxyingConnectIntegrationTest, ProxyConnectWithIP) { cleanupUpstreamAndDownstream(); } -INSTANTIATE_TEST_SUITE_P(IpVersions, ConnectTerminationIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); +INSTANTIATE_TEST_SUITE_P(HttpAndIpVersions, ConnectTerminationIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP1, Http::CodecType::HTTP2, + Http::CodecType::HTTP3}, + {Http::CodecType::HTTP1})), + HttpProtocolIntegrationTest::protocolTestParamsToString); using Params = std::tuple; // Tunneling downstream TCP over an upstream HTTP CONNECT tunnel. -class TcpTunnelingIntegrationTest : public testing::TestWithParam, - public HttpIntegrationTest { +class TcpTunnelingIntegrationTest : public HttpProtocolIntegrationTest { public: - TcpTunnelingIntegrationTest() - : HttpIntegrationTest(Http::CodecType::HTTP2, std::get<0>(GetParam())) {} - - static std::string paramsToString(const testing::TestParamInfo& p) { - return fmt::format( - "{}_{}", std::get<0>(p.param) == Network::Address::IpVersion::v4 ? "IPv4" : "IPv6", - std::get<1>(p.param) == Http::CodecType::HTTP1 ? "HTTP1Upstream" : "HTTP2Upstream"); - } - void SetUp() override { enableHalfClose(true); - setDownstreamProtocol(Http::CodecType::HTTP2); - setUpstreamProtocol(std::get<1>(GetParam())); config_helper_.addConfigModifier( [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { @@ -471,8 +478,7 @@ class TcpTunnelingIntegrationTest : public testing::TestWithParam, auto* listener = bootstrap.mutable_static_resources()->add_listeners(); listener->set_name("tcp_proxy"); auto* socket_address = listener->mutable_address()->mutable_socket_address(); - socket_address->set_address( - Network::Test::getLoopbackAddressString(std::get<0>(GetParam()))); + socket_address->set_address(Network::Test::getLoopbackAddressString(version_)); socket_address->set_port_value(0); auto* filter_chain = listener->add_filter_chains(); @@ -480,6 +486,7 @@ class TcpTunnelingIntegrationTest : public testing::TestWithParam, filter->mutable_typed_config()->PackFrom(proxy_config); filter->set_name("envoy.filters.network.tcp_proxy"); }); + HttpProtocolIntegrationTest::SetUp(); } }; @@ -735,6 +742,10 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyDownstreamFlush) { // Test that an upstream flush works correctly (all data is flushed) TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { + if (upstreamProtocol() == Http::CodecType::HTTP3) { + // TODO(alyssawilk) debug. + return; + } // Use a very large size to make sure it is larger than the kernel socket read buffer. const uint32_t size = 50 * 1024 * 1024; config_helper_.setBufferLimits(size, size); @@ -772,8 +783,8 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { } } -// Test that h2 connection is reused. -TEST_P(TcpTunnelingIntegrationTest, H2ConnectionReuse) { +// Test that h2/h3 connection is reused. +TEST_P(TcpTunnelingIntegrationTest, ConnectionReuse) { if (upstreamProtocol() == Http::CodecType::HTTP1) { return; } @@ -820,7 +831,7 @@ TEST_P(TcpTunnelingIntegrationTest, H2ConnectionReuse) { // Test that with HTTP1 we have no connection reuse with downstream close. TEST_P(TcpTunnelingIntegrationTest, H1NoConnectionReuse) { - if (upstreamProtocol() == Http::CodecType::HTTP2) { + if (upstreamProtocol() != Http::CodecType::HTTP1) { return; } initialize(); @@ -905,7 +916,7 @@ TEST_P(TcpTunnelingIntegrationTest, H1UpstreamCloseNoConnectionReuse) { } TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { - if (upstreamProtocol() == Http::CodecType::HTTP2) { + if (upstreamProtocol() != Http::CodecType::HTTP1) { return; } initialize(); @@ -935,7 +946,7 @@ TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { } TEST_P(TcpTunnelingIntegrationTest, ContentLengthHeaderIgnoredHttp1) { - if (upstreamProtocol() == Http::CodecType::HTTP2) { + if (upstreamProtocol() != Http::CodecType::HTTP1) { return; } initialize(); @@ -964,7 +975,7 @@ TEST_P(TcpTunnelingIntegrationTest, ContentLengthHeaderIgnoredHttp1) { } TEST_P(TcpTunnelingIntegrationTest, TransferEncodingHeaderIgnoredHttp1) { - if (upstreamProtocol() == Http::CodecType::HTTP2) { + if (upstreamProtocol() != Http::CodecType::HTTP1) { return; } initialize(); @@ -1066,11 +1077,11 @@ TEST_P(TcpTunnelingIntegrationTest, UpstreamDisconnectBeforeResponseReceived) { tcp_client->close(); } -INSTANTIATE_TEST_SUITE_P( - IpAndHttpVersions, TcpTunnelingIntegrationTest, - ::testing::Combine(testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - testing::Values(Http::CodecType::HTTP1, Http::CodecType::HTTP2)), - TcpTunnelingIntegrationTest::paramsToString); - +INSTANTIATE_TEST_SUITE_P(IpAndHttpVersions, TcpTunnelingIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams( + {Http::CodecType::HTTP1}, + {Http::CodecType::HTTP1, Http::CodecType::HTTP2, + Http::CodecType::HTTP3})), + HttpProtocolIntegrationTest::protocolTestParamsToString); } // namespace } // namespace Envoy From 6b888286c6797fc8e259b124416b0658a644e212 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 15 Sep 2021 00:12:33 +0900 Subject: [PATCH 038/121] stats: use re2 for prometheus metrics sanitization. (#18110) Signed-off-by: Takeshi Yoneda Signed-off-by: gayang --- source/server/admin/prometheus_stats.cc | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/source/server/admin/prometheus_stats.cc b/source/server/admin/prometheus_stats.cc index 8d343189fb250..a3d0c7657335a 100644 --- a/source/server/admin/prometheus_stats.cc +++ b/source/server/admin/prometheus_stats.cc @@ -2,6 +2,7 @@ #include "source/common/common/empty_string.h" #include "source/common/common/macros.h" +#include "source/common/common/regex.h" #include "source/common/stats/histogram_impl.h" #include "absl/strings/str_cat.h" @@ -11,18 +12,18 @@ namespace Server { namespace { -// TODO(mathetake) replace with re2 for speed and safety, -// and change the signature of sanitizeName so it accepts string_view. -const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } +const Regex::CompiledGoogleReMatcher& promRegex() { + CONSTRUCT_ON_FIRST_USE(Regex::CompiledGoogleReMatcher, "[^a-zA-Z0-9_]", false); +} /** * Take a string and sanitize it according to Prometheus conventions. */ -std::string sanitizeName(const std::string& name) { +std::string sanitizeName(const absl::string_view name) { // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/. // The initial [a-zA-Z_] constraint is always satisfied by the namespace prefix. - return std::regex_replace(name, promRegex(), "_"); + return promRegex().replaceAll(name, "_"); } /* @@ -199,7 +200,7 @@ PrometheusStatsFormatter::metricName(const std::string& extracted_name, custom_namespaces.stripRegisteredPrefix(extracted_name); if (custom_namespace_stripped.has_value()) { // This case the name has a custom namespace, and it is a custom metric. - const std::string sanitized_name = sanitizeName(std::string(custom_namespace_stripped.value())); + const std::string sanitized_name = sanitizeName(custom_namespace_stripped.value()); // We expose these metrics without modifying (e.g. without "envoy_"), // so we have to check the "user-defined" stat name complies with the Prometheus naming // convention. Specifically the name must start with the "[a-zA-Z_]" pattern. From 3353b72b95615df7325db7d9406d1a6bbc67f0b5 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 14 Sep 2021 11:14:25 -0400 Subject: [PATCH 039/121] factory: cleaning up factory APIs to allow code sharing (#18096) Clean up inspired by #17745 Risk Level: low (interface refactor) Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- envoy/server/factory_context.h | 59 +++---- envoy/upstream/cluster_factory.h | 55 +------ .../common/upstream/cluster_factory_impl.cc | 2 +- source/common/upstream/cluster_factory_impl.h | 4 +- .../clusters/dynamic_forward_proxy/cluster.cc | 3 +- .../dynamic_forward_proxy/dns_cache_impl.cc | 19 ++- .../dynamic_forward_proxy/dns_cache_impl.h | 6 +- .../dns_cache_manager_impl.cc | 12 +- .../dns_cache_manager_impl.h | 35 +--- .../http/dynamic_forward_proxy/config.cc | 3 +- .../sni_dynamic_forward_proxy/config.cc | 3 +- .../sni_dynamic_forward_proxy/proxy_filter.cc | 9 +- .../dns_cache_impl_test.cc | 155 ++++++++---------- 13 files changed, 132 insertions(+), 233 deletions(-) diff --git a/envoy/server/factory_context.h b/envoy/server/factory_context.h index 03cbfe9c23ff4..3624f32fe95b6 100644 --- a/envoy/server/factory_context.h +++ b/envoy/server/factory_context.h @@ -38,17 +38,15 @@ namespace Envoy { namespace Server { namespace Configuration { -/** - * Common interface for downstream and upstream network filters. - */ -class CommonFactoryContext { +// Shared factory context between server factories and cluster factories +class FactoryContextBase { public: - virtual ~CommonFactoryContext() = default; + virtual ~FactoryContextBase() = default; /** - * @return Upstream::ClusterManager& singleton for use by the entire server. + * @return Server::Options& the command-line options that Envoy was started with. */ - virtual Upstream::ClusterManager& clusterManager() PURE; + virtual const Options& options() PURE; /** * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used @@ -57,9 +55,9 @@ class CommonFactoryContext { virtual Event::Dispatcher& dispatcher() PURE; /** - * @return Server::Options& the command-line options that Envoy was started with. + * @return Api::Api& a reference to the api object. */ - virtual const Options& options() PURE; + virtual Api::Api& api() PURE; /** * @return information about the local environment the server is running in. @@ -67,10 +65,9 @@ class CommonFactoryContext { virtual const LocalInfo::LocalInfo& localInfo() const PURE; /** - * @return ProtobufMessage::ValidationContext& validation visitor for xDS and static configuration - * messages. + * @return Server::Admin& the server's global admin HTTP endpoint. */ - virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE; + virtual Server::Admin& admin() PURE; /** * @return Runtime::Loader& the singleton runtime loader for the server. @@ -78,47 +75,53 @@ class CommonFactoryContext { virtual Envoy::Runtime::Loader& runtime() PURE; /** - * @return Stats::Scope& the filter's stats scope. + * @return Singleton::Manager& the server-wide singleton manager. */ - virtual Stats::Scope& scope() PURE; + virtual Singleton::Manager& singletonManager() PURE; /** - * @return Singleton::Manager& the server-wide singleton manager. + * @return ProtobufMessage::ValidationVisitor& validation visitor for configuration messages. */ - virtual Singleton::Manager& singletonManager() PURE; + virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; + + /** + * @return Stats::Scope& the context's stats scope. + */ + virtual Stats::Scope& scope() PURE; /** * @return ThreadLocal::SlotAllocator& the thread local storage engine for the server. This is * used to allow runtime lockless updates to configuration, etc. across multiple threads. */ virtual ThreadLocal::SlotAllocator& threadLocal() PURE; +}; +/** + * Common interface for downstream and upstream network filters. + */ +class CommonFactoryContext : public FactoryContextBase { +public: /** - * @return Server::Admin& the server's global admin HTTP endpoint. + * @return Upstream::ClusterManager& singleton for use by the entire server. */ - virtual Server::Admin& admin() PURE; + virtual Upstream::ClusterManager& clusterManager() PURE; /** - * @return TimeSource& a reference to the time source. + * @return ProtobufMessage::ValidationContext& validation visitor for xDS and static configuration + * messages. */ - virtual TimeSource& timeSource() PURE; + virtual ProtobufMessage::ValidationContext& messageValidationContext() PURE; /** - * @return Api::Api& a reference to the api object. + * @return TimeSource& a reference to the time source. */ - virtual Api::Api& api() PURE; + virtual TimeSource& timeSource() PURE; /** * @return AccessLogManager for use by the entire server. */ virtual AccessLog::AccessLogManager& accessLogManager() PURE; - /** - * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration - * messages. - */ - virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; - /** * @return ServerLifecycleNotifier& the lifecycle notifier for the server. */ diff --git a/envoy/upstream/cluster_factory.h b/envoy/upstream/cluster_factory.h index 6196cef791576..9440e374c0edb 100644 --- a/envoy/upstream/cluster_factory.h +++ b/envoy/upstream/cluster_factory.h @@ -18,6 +18,7 @@ #include "envoy/network/dns.h" #include "envoy/runtime/runtime.h" #include "envoy/server/admin.h" +#include "envoy/server/factory_context.h" #include "envoy/server/options.h" #include "envoy/singleton/manager.h" #include "envoy/ssl/context.h" @@ -35,66 +36,28 @@ namespace Upstream { * Context passed to cluster factory to access envoy resources. Cluster factory should only access * the rest of the server through this context object. */ -class ClusterFactoryContext { +class ClusterFactoryContext : public Server::Configuration::FactoryContextBase { public: - virtual ~ClusterFactoryContext() = default; - /** * @return bool flag indicating whether the cluster is added via api. */ virtual bool addedViaApi() PURE; - /** - * @return Server::Admin& the server's admin interface. - */ - virtual Server::Admin& admin() PURE; - - /** - * @return Api::Api& a reference to the api object. - */ - virtual Api::Api& api() PURE; - /** * @return Upstream::ClusterManager& singleton for use by the entire server. */ - virtual ClusterManager& clusterManager() PURE; - - /** - * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used - * for all singleton processing. - */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Upstream::ClusterManager& clusterManager() PURE; /** * @return Network::DnsResolverSharedPtr the dns resolver for the server. */ virtual Network::DnsResolverSharedPtr dnsResolver() PURE; - /** - * @return information about the local environment the server is running in. - */ - virtual const LocalInfo::LocalInfo& localInfo() PURE; - - /** - * @return Server::Options& the command-line options that Envoy was started with. - */ - virtual const Server::Options& options() PURE; - /** * @return AccessLogManager for use by the entire server. */ virtual AccessLog::AccessLogManager& logManager() PURE; - /** - * @return Runtime::Loader& the singleton runtime loader for the server. - */ - virtual Runtime::Loader& runtime() PURE; - - /** - * @return Singleton::Manager& the server-wide singleton manager. - */ - virtual Singleton::Manager& singletonManager() PURE; - /** * @return Ssl::ContextManager& the SSL context manager. */ @@ -105,21 +68,13 @@ class ClusterFactoryContext { */ virtual Stats::Store& stats() PURE; - /** - * @return the server's TLS slot allocator. - */ - virtual ThreadLocal::SlotAllocator& tls() PURE; - /** * @return Outlier::EventLoggerSharedPtr sink for outlier detection event logs. */ virtual Outlier::EventLoggerSharedPtr outlierEventLogger() PURE; - /** - * @return ProtobufMessage::ValidationVisitor& validation visitor for filter configuration - * messages. - */ - virtual ProtobufMessage::ValidationVisitor& messageValidationVisitor() PURE; + // Server::Configuration::FactoryContextBase + Stats::Scope& scope() override { return stats(); } }; /** diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index 2e90917e28b56..7425d9e546fb6 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -128,7 +128,7 @@ ClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluste std::make_unique( context.admin(), context.sslContextManager(), *stats_scope, context.clusterManager(), context.localInfo(), context.dispatcher(), context.stats(), - context.singletonManager(), context.tls(), context.messageValidationVisitor(), + context.singletonManager(), context.threadLocal(), context.messageValidationVisitor(), context.api(), context.options()); std::pair new_cluster_pair = diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h index a1a2020cc834e..1d7f3a6bc94cb 100644 --- a/source/common/upstream/cluster_factory_impl.h +++ b/source/common/upstream/cluster_factory_impl.h @@ -70,13 +70,13 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { ClusterManager& clusterManager() override { return cluster_manager_; } Stats::Store& stats() override { return stats_; } - ThreadLocal::SlotAllocator& tls() override { return tls_; } + ThreadLocal::SlotAllocator& threadLocal() override { return tls_; } Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; } Ssl::ContextManager& sslContextManager() override { return ssl_context_manager_; } Runtime::Loader& runtime() override { return runtime_; } Event::Dispatcher& dispatcher() override { return dispatcher_; } AccessLog::AccessLogManager& logManager() override { return log_manager_; } - const LocalInfo::LocalInfo& localInfo() override { return local_info_; } + const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } const Server::Options& options() override { return options_; } Server::Admin& admin() override { return admin_; } Singleton::Manager& singletonManager() override { return singleton_manager_; } diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index cc72fc392466a..2bef1a7f333b8 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -177,8 +177,7 @@ ClusterFactory::createClusterWithConfig( Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context, Stats::ScopePtr&& stats_scope) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.tls(), context.api(), - context.runtime(), context.stats(), context.messageValidationVisitor()); + context); envoy::config::cluster::v3::Cluster cluster_config = cluster; if (!cluster_config.has_upstream_http_protocol_options()) { // This sets defaults which will only apply if using old style http config. diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index 64b091090c36b..edadd96bafd95 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -17,23 +17,24 @@ namespace Common { namespace DynamicForwardProxy { DnsCacheImpl::DnsCacheImpl( - Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Random::RandomGenerator& random, Filesystem::Instance& file_system, Runtime::Loader& loader, - Stats::Scope& root_scope, ProtobufMessage::ValidationVisitor& validation_visitor, + Server::Configuration::FactoryContextBase& context, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) - : main_thread_dispatcher_(main_thread_dispatcher), + : main_thread_dispatcher_(context.dispatcher()), dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), - resolver_(selectDnsResolver(config, main_thread_dispatcher)), tls_slot_(tls), - scope_(root_scope.createScope(fmt::format("dns_cache.{}.", config.name()))), + resolver_(selectDnsResolver(config, main_thread_dispatcher_)), + tls_slot_(context.threadLocal()), + scope_(context.scope().createScope(fmt::format("dns_cache.{}.", config.name()))), stats_(generateDnsCacheStats(*scope_)), - resource_manager_(*scope_, loader, config.name(), config.dns_cache_circuit_breaker()), + resource_manager_(*scope_, context.runtime(), config.name(), + config.dns_cache_circuit_breaker()), refresh_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_refresh_rate, 60000)), timeout_interval_(PROTOBUF_GET_MS_OR_DEFAULT(config, dns_query_timeout, 5000)), failure_backoff_strategy_( Config::Utility::prepareDnsRefreshStrategy< envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig>( - config, refresh_interval_.count(), random)), - file_system_(file_system), validation_visitor_(validation_visitor), + config, refresh_interval_.count(), context.api().randomGenerator())), + file_system_(context.api().fileSystem()), + validation_visitor_(context.messageValidationVisitor()), host_ttl_(PROTOBUF_GET_MS_OR_DEFAULT(config, host_ttl, 300000)), max_hosts_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_hosts, 1024)) { tls_slot_.set([&](Event::Dispatcher&) { return std::make_shared(*this); }); diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index 4180313e09262..c2526da13222c 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -5,6 +5,7 @@ #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" #include "envoy/http/filter.h" #include "envoy/network/dns.h" +#include "envoy/server/factory_context.h" #include "envoy/thread_local/thread_local.h" #include "source/common/common/cleanup.h" @@ -45,10 +46,7 @@ class DnsCacheImplTest; class DnsCacheImpl : public DnsCache, Logger::Loggable { public: - DnsCacheImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Random::RandomGenerator& random, Filesystem::Instance& file_system, - Runtime::Loader& loader, Stats::Scope& root_scope, - ProtobufMessage::ValidationVisitor& validation_visitor, + DnsCacheImpl(Server::Configuration::FactoryContextBase& context, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config); ~DnsCacheImpl() override; static DnsCacheStats generateDnsCacheStats(Stats::Scope& scope); diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc index 7cb28f80e68de..7dee0887fb44c 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc @@ -26,19 +26,15 @@ DnsCacheSharedPtr DnsCacheManagerImpl::getCache( return existing_cache->second.cache_; } - DnsCacheSharedPtr new_cache = - std::make_shared(main_thread_dispatcher_, tls_, random_, file_system_, loader_, - root_scope_, validation_visitor_, config); + DnsCacheSharedPtr new_cache = std::make_shared(context_, config); caches_.emplace(config.name(), ActiveCache{config, new_cache}); return new_cache; } DnsCacheManagerSharedPtr DnsCacheManagerFactoryImpl::get() { - return singleton_manager_.getTyped( - SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager), [this] { - return std::make_shared(dispatcher_, tls_, random_, file_system_, - loader_, root_scope_, validation_visitor_); - }); + return context_.singletonManager().getTyped( + SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager), + [this] { return std::make_shared(context_); }); } } // namespace DynamicForwardProxy diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h index 4b27404366a8c..657279c2323ce 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" +#include "envoy/server/factory_context.h" #include "source/extensions/common/dynamic_forward_proxy/dns_cache.h" @@ -13,13 +14,7 @@ namespace DynamicForwardProxy { class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { public: - DnsCacheManagerImpl(Event::Dispatcher& main_thread_dispatcher, ThreadLocal::SlotAllocator& tls, - Random::RandomGenerator& random, Filesystem::Instance& file_system, - Runtime::Loader& loader, Stats::Scope& root_scope, - ProtobufMessage::ValidationVisitor& validation_visitor) - : main_thread_dispatcher_(main_thread_dispatcher), tls_(tls), random_(random), - file_system_(file_system), loader_(loader), root_scope_(root_scope), - validation_visitor_(validation_visitor) {} + DnsCacheManagerImpl(Server::Configuration::FactoryContextBase& context) : context_(context) {} // DnsCacheManager DnsCacheSharedPtr getCache( @@ -35,38 +30,20 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { DnsCacheSharedPtr cache_; }; - Event::Dispatcher& main_thread_dispatcher_; - ThreadLocal::SlotAllocator& tls_; - Random::RandomGenerator& random_; - Filesystem::Instance& file_system_; - Runtime::Loader& loader_; - Stats::Scope& root_scope_; - ProtobufMessage::ValidationVisitor& validation_visitor_; + Server::Configuration::FactoryContextBase& context_; absl::flat_hash_map caches_; }; class DnsCacheManagerFactoryImpl : public DnsCacheManagerFactory { public: - DnsCacheManagerFactoryImpl(Singleton::Manager& singleton_manager, Event::Dispatcher& dispatcher, - ThreadLocal::SlotAllocator& tls, Api::Api& api, - Runtime::Loader& loader, Stats::Scope& root_scope, - ProtobufMessage::ValidationVisitor& validation_visitor) - : singleton_manager_(singleton_manager), dispatcher_(dispatcher), tls_(tls), - random_(api.randomGenerator()), file_system_(api.fileSystem()), loader_(loader), - root_scope_(root_scope), validation_visitor_(validation_visitor) {} + DnsCacheManagerFactoryImpl(Server::Configuration::FactoryContextBase& context) + : context_(context) {} DnsCacheManagerSharedPtr get() override; private: - Singleton::Manager& singleton_manager_; - Event::Dispatcher& dispatcher_; - ThreadLocal::SlotAllocator& tls_; - Random::RandomGenerator& random_; - Filesystem::Instance& file_system_; - Runtime::Loader& loader_; - Stats::Scope& root_scope_; - ProtobufMessage::ValidationVisitor& validation_visitor_; + Server::Configuration::FactoryContextBase& context_; }; } // namespace DynamicForwardProxy diff --git a/source/extensions/filters/http/dynamic_forward_proxy/config.cc b/source/extensions/filters/http/dynamic_forward_proxy/config.cc index 3f58fa19ca06a..75b69d19876d5 100644 --- a/source/extensions/filters/http/dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/http/dynamic_forward_proxy/config.cc @@ -15,8 +15,7 @@ Http::FilterFactoryCb DynamicForwardProxyFilterFactory::createFilterFactoryFromP const envoy::extensions::filters::http::dynamic_forward_proxy::v3::FilterConfig& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.threadLocal(), context.api(), - context.runtime(), context.scope(), context.messageValidationVisitor()); + context); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc index dedff7689748e..14c2396cdf459 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/config.cc @@ -19,8 +19,7 @@ SniDynamicForwardProxyNetworkFilterConfigFactory::createFilterFactoryFromProtoTy const FilterConfig& proto_config, Server::Configuration::FactoryContext& context) { Extensions::Common::DynamicForwardProxy::DnsCacheManagerFactoryImpl cache_manager_factory( - context.singletonManager(), context.dispatcher(), context.threadLocal(), context.api(), - context.runtime(), context.scope(), context.messageValidationVisitor()); + context); ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, cache_manager_factory, context.clusterManager())); diff --git a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc index 3c8f6e0798906..b04749aabef10 100644 --- a/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/sni_dynamic_forward_proxy/proxy_filter.cc @@ -51,24 +51,21 @@ Network::FilterStatus ProxyFilter::onNewConnection() { } switch (result.status_) { - case LoadDnsCacheEntryStatus::InCache: { + case LoadDnsCacheEntryStatus::InCache: ASSERT(cache_load_handle_ == nullptr); ENVOY_CONN_LOG(debug, "DNS cache entry already loaded, continuing", read_callbacks_->connection()); return Network::FilterStatus::Continue; - } - case LoadDnsCacheEntryStatus::Loading: { + case LoadDnsCacheEntryStatus::Loading: ASSERT(cache_load_handle_ != nullptr); ENVOY_CONN_LOG(debug, "waiting to load DNS cache entry", read_callbacks_->connection()); return Network::FilterStatus::StopIteration; - } - case LoadDnsCacheEntryStatus::Overflow: { + case LoadDnsCacheEntryStatus::Overflow: ASSERT(cache_load_handle_ == nullptr); ENVOY_CONN_LOG(debug, "DNS cache overflow", read_callbacks_->connection()); read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); return Network::FilterStatus::StopIteration; } - } NOT_REACHED_GCOVR_EXCL_LINE; } diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index 022a9b5e7ec6e..c077eab954dc6 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -13,6 +13,7 @@ #include "test/mocks/network/mocks.h" #include "test/mocks/protobuf/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/registry.h" #include "test/test_common/simulated_time_system.h" @@ -44,23 +45,22 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT } } - EXPECT_CALL(dispatcher_, isThreadSafe).WillRepeatedly(Return(true)); + EXPECT_CALL(context_.dispatcher_, isThreadSafe).WillRepeatedly(Return(true)); - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)).WillOnce(Return(resolver_)); - dns_cache_ = std::make_unique(dispatcher_, tls_, random_, filesystem_, loader_, - store_, validation_visitor_, config_); + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)).WillOnce(Return(resolver_)); + dns_cache_ = std::make_unique(context_, config_); update_callbacks_handle_ = dns_cache_->addUpdateCallbacks(update_callbacks_); } ~DnsCacheImplTest() override { dns_cache_.reset(); - EXPECT_EQ(0, TestUtility::findGauge(store_, "dns_cache.foo.num_hosts")->value()); + EXPECT_EQ(0, TestUtility::findGauge(context_.scope_, "dns_cache.foo.num_hosts")->value()); } void checkStats(uint64_t query_attempt, uint64_t query_success, uint64_t query_failure, uint64_t address_changed, uint64_t added, uint64_t removed, uint64_t num_hosts) { const auto counter_value = [this](const std::string& name) { - return TestUtility::findCounter(store_, "dns_cache.foo." + name)->value(); + return TestUtility::findCounter(context_.scope_, "dns_cache.foo." + name)->value(); }; EXPECT_EQ(query_attempt, counter_value("dns_query_attempt")); @@ -69,21 +69,16 @@ class DnsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedT EXPECT_EQ(address_changed, counter_value("host_address_changed")); EXPECT_EQ(added, counter_value("host_added")); EXPECT_EQ(removed, counter_value("host_removed")); - EXPECT_EQ(num_hosts, TestUtility::findGauge(store_, "dns_cache.foo.num_hosts")->value()); + EXPECT_EQ(num_hosts, + TestUtility::findGauge(context_.scope_, "dns_cache.foo.num_hosts")->value()); } + NiceMock context_; envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config_; - NiceMock dispatcher_; std::shared_ptr resolver_{std::make_shared()}; - NiceMock tls_; - NiceMock random_; - NiceMock filesystem_; - NiceMock loader_; - Stats::IsolatedStoreImpl store_; std::unique_ptr dns_cache_; MockUpdateCallbacks update_callbacks_; DnsCache::AddUpdateCallbacksHandlePtr update_callbacks_handle_; - Envoy::ProtobufMessage::MockValidationVisitor validation_visitor_; }; MATCHER_P3(DnsHostInfoEquals, address, resolved_host, is_ip_address, "") { @@ -148,8 +143,8 @@ TEST_F(DnsCacheImplTest, ResolveSuccess) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -219,8 +214,8 @@ TEST_F(DnsCacheImplTest, Ipv4Address) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("127.0.0.1", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -247,8 +242,8 @@ TEST_F(DnsCacheImplTest, Ipv4AddressWithPort) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("127.0.0.1", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -275,8 +270,8 @@ TEST_F(DnsCacheImplTest, Ipv6Address) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("::1", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -301,8 +296,8 @@ TEST_F(DnsCacheImplTest, Ipv6AddressWithPort) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("::1", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -327,8 +322,8 @@ TEST_F(DnsCacheImplTest, TTL) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -377,8 +372,8 @@ TEST_F(DnsCacheImplTest, TTL) { 1 /* added */, 1 /* removed */, 0 /* num hosts */); // Make sure we don't get a cache hit the next time the host is requested. - resolve_timer = new Event::MockTimer(&dispatcher_); - timeout_timer = new Event::MockTimer(&dispatcher_); + new Event::MockTimer(&context_.dispatcher_); // resolve_timer + timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -400,8 +395,8 @@ TEST_F(DnsCacheImplTest, TTLWithCustomParameters) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(1000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -443,14 +438,14 @@ TEST_F(DnsCacheImplTest, InlineResolve) { MockLoadDnsCacheEntryCallbacks callbacks; Event::PostCb post_cb; - EXPECT_CALL(dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb)); + EXPECT_CALL(context_.dispatcher_, post(_)).WillOnce(SaveArg<0>(&post_cb)); auto result = dns_cache_->loadDnsCacheEntry("localhost", 80, callbacks); EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_); EXPECT_NE(result.handle_, nullptr); EXPECT_EQ(absl::nullopt, result.host_info_); - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("localhost", _, _)) .WillOnce(Invoke([](const std::string&, Network::DnsLookupFamily, @@ -476,8 +471,8 @@ TEST_F(DnsCacheImplTest, ResolveTimeout) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -496,7 +491,8 @@ TEST_F(DnsCacheImplTest, ResolveTimeout) { timeout_timer->invokeCallback(); checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */, 1 /* added */, 0 /* removed */, 1 /* num hosts */); - EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.dns_query_timeout")->value()); + EXPECT_EQ(1, + TestUtility::findCounter(context_.scope_, "dns_cache.foo.dns_query_timeout")->value()); } // Resolve failure that returns no addresses. @@ -506,8 +502,8 @@ TEST_F(DnsCacheImplTest, ResolveFailure) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -555,8 +551,8 @@ TEST_F(DnsCacheImplTest, ResolveFailureWithFailureRefreshRate) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -570,7 +566,7 @@ TEST_F(DnsCacheImplTest, ResolveFailureWithFailureRefreshRate) { EXPECT_CALL(*timeout_timer, disableTimer()); EXPECT_CALL(update_callbacks_, onDnsHostAddOrUpdate(_, _)).Times(0); EXPECT_CALL(callbacks, onLoadDnsCacheComplete(DnsHostInfoAddressIsNull())); - ON_CALL(random_, random()).WillByDefault(Return(8000)); + ON_CALL(context_.api_.random_, random()).WillByDefault(Return(8000)); EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(1000), _)); resolve_cb(Network::DnsResolver::ResolutionStatus::Failure, TestUtility::makeDnsResponse({})); checkStats(1 /* attempt */, 0 /* success */, 1 /* failure */, 0 /* address changed */, @@ -601,8 +597,8 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithEmptyResult) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); @@ -824,7 +820,7 @@ TEST_F(DnsCacheImplTest, MaxHostOverflow) { EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Overflow, result.status_); EXPECT_EQ(result.handle_, nullptr); EXPECT_EQ(absl::nullopt, result.host_info_); - EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.host_overflow")->value()); + EXPECT_EQ(1, TestUtility::findCounter(context_.scope_, "dns_cache.foo.host_overflow")->value()); } TEST_F(DnsCacheImplTest, CircuitBreakersNotInvoked) { @@ -840,17 +836,18 @@ TEST_F(DnsCacheImplTest, DnsCacheCircuitBreakersOverflow) { auto raii_ptr = dns_cache_->canCreateDnsRequest(); EXPECT_EQ(raii_ptr.get(), nullptr); - EXPECT_EQ(1, TestUtility::findCounter(store_, "dns_cache.foo.dns_rq_pending_overflow")->value()); + EXPECT_EQ( + 1, + TestUtility::findCounter(context_.scope_, "dns_cache.foo.dns_rq_pending_overflow")->value()); } TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionSetDeprecatedField) { initialize(); config_.set_use_tcp_for_dns_lookups(true); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); } @@ -861,10 +858,9 @@ TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionSet) { ->mutable_dns_resolver_options() ->set_use_tcp_for_dns_lookups(true); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `true` here means dns_resolver_options.use_tcp_for_dns_lookups is set to true. EXPECT_EQ(true, dns_resolver_options.use_tcp_for_dns_lookups()); } @@ -875,10 +871,9 @@ TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionSet) { ->mutable_dns_resolver_options() ->set_no_default_search_domain(true); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `true` here means dns_resolver_options.no_default_search_domain is set to true. EXPECT_EQ(true, dns_resolver_options.no_default_search_domain()); } @@ -886,10 +881,9 @@ TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionSet) { TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionUnSet) { initialize(); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `false` here means dns_resolver_options.use_tcp_for_dns_lookups is set to false. EXPECT_EQ(false, dns_resolver_options.use_tcp_for_dns_lookups()); } @@ -897,24 +891,17 @@ TEST_F(DnsCacheImplTest, UseTcpForDnsLookupsOptionUnSet) { TEST_F(DnsCacheImplTest, NoDefaultSearchDomainOptionUnSet) { initialize(); envoy::config::core::v3::DnsResolverOptions dns_resolver_options; - EXPECT_CALL(dispatcher_, createDnsResolver(_, _)) + EXPECT_CALL(context_.dispatcher_, createDnsResolver(_, _)) .WillOnce(DoAll(SaveArg<1>(&dns_resolver_options), Return(resolver_))); - DnsCacheImpl dns_cache_(dispatcher_, tls_, random_, filesystem_, loader_, store_, - validation_visitor_, config_); + DnsCacheImpl dns_cache_(context_, config_); // `false` here means dns_resolver_options.no_default_search_domain is set to false. EXPECT_EQ(false, dns_resolver_options.no_default_search_domain()); } // DNS cache manager config tests. TEST(DnsCacheManagerImplTest, LoadViaConfig) { - NiceMock dispatcher; - NiceMock tls; - NiceMock random; - NiceMock loader; - Stats::IsolatedStoreImpl store; - NiceMock filesystem; - Envoy::ProtobufMessage::MockValidationVisitor visitor; - DnsCacheManagerImpl cache_manager(dispatcher, tls, random, filesystem, loader, store, visitor); + NiceMock context; + DnsCacheManagerImpl cache_manager(context); envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1; config1.set_name("foo"); @@ -940,30 +927,20 @@ TEST(DnsCacheManagerImplTest, LoadViaConfig) { } TEST(DnsCacheConfigOptionsTest, EmtpyDnsResolutionConfig) { - NiceMock dispatcher; + NiceMock context; + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; std::shared_ptr resolver{std::make_shared()}; - NiceMock tls; - NiceMock random; - NiceMock loader; - Stats::IsolatedStoreImpl store; - envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; std::vector expected_empty_dns_resolvers; - EXPECT_CALL(dispatcher, createDnsResolver(expected_empty_dns_resolvers, _)) + EXPECT_CALL(context.dispatcher_, createDnsResolver(expected_empty_dns_resolvers, _)) .WillOnce(Return(resolver)); - NiceMock filesystem; - Envoy::ProtobufMessage::MockValidationVisitor visitor; - DnsCacheImpl dns_cache(dispatcher, tls, random, filesystem, loader, store, visitor, config); + DnsCacheImpl dns_cache_(context, config); } TEST(DnsCacheConfigOptionsTest, NonEmptyDnsResolutionConfig) { - NiceMock dispatcher; - std::shared_ptr resolver{std::make_shared()}; - NiceMock tls; - NiceMock random; - NiceMock loader; - Stats::IsolatedStoreImpl store; + NiceMock context; envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; + std::shared_ptr resolver{std::make_shared()}; envoy::config::core::v3::Address* dns_resolvers = config.mutable_dns_resolution_config()->add_resolvers(); @@ -972,12 +949,10 @@ TEST(DnsCacheConfigOptionsTest, NonEmptyDnsResolutionConfig) { std::vector expected_dns_resolvers; expected_dns_resolvers.push_back(Network::Address::resolveProtoAddress(*dns_resolvers)); - EXPECT_CALL(dispatcher, + EXPECT_CALL(context.dispatcher_, createDnsResolver(CustomDnsResolversSizeEquals(expected_dns_resolvers), _)) .WillOnce(Return(resolver)); - NiceMock filesystem; - Envoy::ProtobufMessage::MockValidationVisitor visitor; - DnsCacheImpl dns_cache_(dispatcher, tls, random, filesystem, loader, store, visitor, config); + DnsCacheImpl dns_cache_(context, config); } // Note: this test is done here, rather than a TYPED_TEST_SUITE in @@ -1052,8 +1027,8 @@ TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { MockLoadDnsCacheEntryCallbacks callbacks; Network::DnsResolver::ResolveCb resolve_cb; - Event::MockTimer* resolve_timer = new Event::MockTimer(&dispatcher_); - Event::MockTimer* timeout_timer = new Event::MockTimer(&dispatcher_); + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); From 752e3a97dc7d3981491fd9057aefdd7478b46c37 Mon Sep 17 00:00:00 2001 From: Rohit Agrawal Date: Tue, 14 Sep 2021 11:18:32 -0400 Subject: [PATCH 040/121] router: Added alt_header_name to set an arbitrary header for populating upstream SNI (#17995) Adds a new optional param called override_auto_sni_header which can be used to populate the upstream SNI value from an arbitrary header other than Host/Authority. Signed-off-by: Rohit Agrawal Signed-off-by: gayang --- api/envoy/config/core/v3/protocol.proto | 21 ++- docs/root/faq/configuration/sni.rst | 6 +- docs/root/version_history/current.rst | 1 + source/common/router/router.cc | 31 +++- test/common/router/router_test.cc | 159 ++++++++++++------ .../proxy_filter_integration_test.cc | 42 ++++- .../http/router/auto_sni_integration_test.cc | 57 +++++-- 7 files changed, 239 insertions(+), 78 deletions(-) diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 1b96b88be7cd9..74b778b6d30a7 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -60,15 +60,26 @@ message UpstreamHttpProtocolOptions { "envoy.api.v2.core.UpstreamHttpProtocolOptions"; // Set transport socket `SNI `_ for new - // upstream connections based on the downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. + // upstream connections based on the downstream HTTP host/authority header or any other arbitrary + // header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. bool auto_sni = 1; // Automatic validate upstream presented certificate for new upstream connections based on the - // downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - // This field is intended to set with `auto_sni` field. + // downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. + // This field is intended to be set with `auto_sni` field. bool auto_san_validation = 2; + + // An optional alternative to the host/authority header to be used for setting the SNI value. + // It should be a valid downstream HTTP header, as seen by the + // :ref:`router filter `. + // If unset, host/authority header will be used for populating the SNI. If the specified header + // is not found or the value is empty, host/authority header will be used instead. + // This field is intended to be set with `auto_sni` and/or `auto_san_validation` fields. + // If none of these fields are set then setting this would be a no-op. + string override_auto_sni_header = 3 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; } // Configures the alternate protocols cache which tracks alternate protocols that can be used to diff --git a/docs/root/faq/configuration/sni.rst b/docs/root/faq/configuration/sni.rst index 9b33302c595ec..e7bbdf1fb0b9f 100644 --- a/docs/root/faq/configuration/sni.rst +++ b/docs/root/faq/configuration/sni.rst @@ -70,8 +70,10 @@ How do I configure SNI for clusters? ==================================== For clusters, a fixed SNI can be set in :ref:`UpstreamTlsContext `. -To derive SNI from HTTP ``host`` or ``:authority`` header, turn on +To derive SNI from a downstream HTTP header like, ``host`` or ``:authority``, turn on :ref:`auto_sni ` to override the fixed SNI in -`UpstreamTlsContext`. If upstream will present certificates with the hostname in SAN, turn on +`UpstreamTlsContext`. A custom header other than the ``host`` or ``:authority`` can also be supplied using the optional +:ref:`override_auto_sni_header ` field. +If upstream will present certificates with the hostname in SAN, turn on :ref:`auto_san_validation ` too. It still needs a trust CA in validation context in ``UpstreamTlsContext`` for trust anchor. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 2089506696c3d..a9fd246530972 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -115,6 +115,7 @@ New Features * overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. * rbac: added :ref:`destination_port_range ` for matching range of destination ports. * route config: added :ref:`dynamic_metadata ` for routing based on dynamic metadata. +* router: added an optional :ref:`override_auto_sni_header ` to support setting SNI value from an arbitrary header other than host/authority. * sxg_filter: added filter to transform response to SXG package to :ref:`contrib images `. This can be enabled by setting :ref:`SXG ` configuration. * thrift_proxy: added support for :ref:`mirroring requests `. diff --git a/source/common/router/router.cc b/source/common/router/router.cc index d1115c8a40df0..c8874c6923373 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -502,12 +502,33 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, // Fetch a connection pool for the upstream cluster. const auto& upstream_http_protocol_options = cluster_->upstreamHttpProtocolOptions(); - if (upstream_http_protocol_options.has_value()) { - const auto parsed_authority = Http::Utility::parseAuthority(headers.getHostValue()); - if (!parsed_authority.is_ip_address_ && upstream_http_protocol_options.value().auto_sni()) { + if (upstream_http_protocol_options.has_value() && + (upstream_http_protocol_options.value().auto_sni() || + upstream_http_protocol_options.value().auto_san_validation())) { + // Default the header to Host/Authority header. + absl::string_view header_value = headers.getHostValue(); + + // Check whether `override_auto_sni_header` is specified. + const auto override_auto_sni_header = + upstream_http_protocol_options.value().override_auto_sni_header(); + if (!override_auto_sni_header.empty()) { + // Use the header value from `override_auto_sni_header` to set the SNI value. + const auto overridden_header_value = Http::HeaderUtility::getAllOfHeaderAsString( + headers, Http::LowerCaseString(override_auto_sni_header)); + if (overridden_header_value.result().has_value() && + !overridden_header_value.result().value().empty()) { + header_value = overridden_header_value.result().value(); + } + } + const auto parsed_authority = Http::Utility::parseAuthority(header_value); + bool should_set_sni = !parsed_authority.is_ip_address_; + // `host_` returns a string_view so doing this should be safe. + absl::string_view sni_value = parsed_authority.host_; + + if (should_set_sni && upstream_http_protocol_options.value().auto_sni()) { callbacks_->streamInfo().filterState()->setData( Network::UpstreamServerName::key(), - std::make_unique(parsed_authority.host_), + std::make_unique(sni_value), StreamInfo::FilterState::StateType::Mutable); } @@ -515,7 +536,7 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, callbacks_->streamInfo().filterState()->setData( Network::UpstreamSubjectAltNames::key(), std::make_unique( - std::vector{std::string(parsed_authority.host_)}), + std::vector{std::string(sni_value)}), StreamInfo::FilterState::StateType::Mutable); } } diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 3583a8159f2a9..22c0fa2f2bbf0 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -141,65 +141,130 @@ class RouterTest : public RouterTestBase { router_.onDestroy(); } + + void testAutoSniOptions( + absl::optional dummy_option, + Envoy::Http::TestRequestHeaderMapImpl headers, std::string server_name = "host", + bool should_validate_san = false, std::string alt_server_name = "host") { + NiceMock stream_info; + ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocolOptions()) + .WillByDefault(ReturnRef(dummy_option)); + ON_CALL(callbacks_.stream_info_, filterState()) + .WillByDefault(ReturnRef(stream_info.filterState())); + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) + .WillOnce(Return(&cancellable_)); + stream_info.filterState()->setData(Network::UpstreamServerName::key(), + std::make_unique("dummy"), + StreamInfo::FilterState::StateType::Mutable); + expectResponseTimerCreate(); + + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); + EXPECT_EQ(server_name, + stream_info.filterState() + ->getDataReadOnly(Network::UpstreamServerName::key()) + .value()); + if (should_validate_san) { + EXPECT_EQ(alt_server_name, stream_info.filterState() + ->getDataReadOnly( + Network::UpstreamSubjectAltNames::key()) + .value()[0]); + } + EXPECT_CALL(cancellable_, cancel(_)); + router_.onDestroy(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + } }; -TEST_F(RouterTest, UpdateServerNameFilterState) { - NiceMock stream_info; +TEST_F(RouterTest, UpdateServerNameFilterStateWithoutHeaderOverride) { auto dummy_option = absl::make_optional(); dummy_option.value().set_auto_sni(true); - ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocolOptions()) - .WillByDefault(ReturnRef(dummy_option)); - ON_CALL(callbacks_.stream_info_, filterState()) - .WillByDefault(ReturnRef(stream_info.filterState())); - EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) - .WillOnce(Return(&cancellable_)); - stream_info.filterState()->setData(Network::UpstreamServerName::key(), - std::make_unique("dummy"), - StreamInfo::FilterState::StateType::Mutable); - expectResponseTimerCreate(); - Http::TestRequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers{}; + testAutoSniOptions(dummy_option, headers); +} - HttpTestUtility::addDefaultHeaders(headers); - router_.decodeHeaders(headers, true); - EXPECT_EQ("host", - stream_info.filterState() - ->getDataReadOnly(Network::UpstreamServerName::key()) - .value()); - EXPECT_CALL(cancellable_, cancel(_)); - router_.onDestroy(); - EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); - EXPECT_EQ(0U, - callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); - EXPECT_EQ(0U, - callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); +TEST_F(RouterTest, UpdateServerNameFilterStateWithHostHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_override_auto_sni_header(":authority"); + + Http::TestRequestHeaderMapImpl headers{}; + testAutoSniOptions(dummy_option, headers); +} + +TEST_F(RouterTest, UpdateServerNameFilterStateWithHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + const auto server_name = "foo.bar"; + Http::TestRequestHeaderMapImpl headers{{"x-host", server_name}}; + testAutoSniOptions(dummy_option, headers, server_name); +} + +TEST_F(RouterTest, UpdateServerNameFilterStateWithEmptyValueHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + Http::TestRequestHeaderMapImpl headers{{"x-host", ""}}; + testAutoSniOptions(dummy_option, headers); } -TEST_F(RouterTest, UpdateSubjectAltNamesFilterState) { - NiceMock stream_info; +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithoutHeaderOverride) { auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); dummy_option.value().set_auto_san_validation(true); - ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocolOptions()) - .WillByDefault(ReturnRef(dummy_option)); - ON_CALL(callbacks_.stream_info_, filterState()) - .WillByDefault(ReturnRef(stream_info.filterState())); - EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) - .WillOnce(Return(&cancellable_)); - expectResponseTimerCreate(); - Http::TestRequestHeaderMapImpl headers; + Http::TestRequestHeaderMapImpl headers{}; + testAutoSniOptions(dummy_option, headers, "host", true); +} - HttpTestUtility::addDefaultHeaders(headers); - router_.decodeHeaders(headers, true); - EXPECT_EQ("host", stream_info.filterState() - ->getDataReadOnly( - Network::UpstreamSubjectAltNames::key()) - .value()[0]); - EXPECT_CALL(cancellable_, cancel(_)); - router_.onDestroy(); - EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); - EXPECT_EQ(0U, - callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithHostHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_auto_san_validation(true); + dummy_option.value().set_override_auto_sni_header(":authority"); + + Http::TestRequestHeaderMapImpl headers{}; + testAutoSniOptions(dummy_option, headers, "host", true); +} + +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_auto_san_validation(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + const auto server_name = "foo.bar"; + Http::TestRequestHeaderMapImpl headers{{"x-host", server_name}}; + testAutoSniOptions(dummy_option, headers, server_name, true, server_name); +} + +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithEmptyValueHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_auto_san_validation(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + Http::TestRequestHeaderMapImpl headers{{"x-host", ""}}; + testAutoSniOptions(dummy_option, headers, "host", true); +} + +TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithIpHeaderOverride) { + auto dummy_option = absl::make_optional(); + dummy_option.value().set_auto_sni(true); + dummy_option.value().set_auto_san_validation(true); + dummy_option.value().set_override_auto_sni_header("x-host"); + + const auto server_name = "127.0.0.1"; + Http::TestRequestHeaderMapImpl headers{{"x-host", server_name}}; + testAutoSniOptions(dummy_option, headers, "dummy", true, server_name); } TEST_F(RouterTest, RouteNotFound) { diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index 4172b76b30e10..7cf64c0bbb608 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -18,7 +18,8 @@ class ProxyFilterIntegrationTest : public testing::TestWithParammutable_validate_clusters()->set_value(false); }); + [override_auto_sni_header]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + hcm.mutable_route_config()->mutable_validate_clusters()->set_value(false); + }); // Setup the initial CDS cluster. cluster_.mutable_connect_timeout()->CopyFrom( @@ -64,6 +68,10 @@ name: dynamic_forward_proxy ConfigHelper::HttpProtocolOptions protocol_options; protocol_options.mutable_upstream_http_protocol_options()->set_auto_sni(true); + if (!override_auto_sni_header.empty()) { + protocol_options.mutable_upstream_http_protocol_options()->set_override_auto_sni_header( + override_auto_sni_header); + } protocol_options.mutable_upstream_http_protocol_options()->set_auto_san_validation(true); protocol_options.mutable_explicit_http_config()->mutable_http_protocol_options(); ConfigHelper::setProtocolOptions(cluster_, protocol_options); @@ -280,6 +288,34 @@ TEST_P(ProxyFilterIntegrationTest, UpstreamTls) { checkSimpleRequestSuccess(0, 0, response.get()); } +// Verify that `override_auto_sni_header` can be used along with auto_sni to set +// SNI from an arbitrary header. +TEST_P(ProxyFilterIntegrationTest, UpstreamTlsWithAltHeaderSni) { + upstream_tls_ = true; + initializeWithArgs(1024, 1024, "x-host"); + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{ + {":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", + fmt::format("{}:{}", fake_upstreams_[0]->localAddress()->ip()->addressAsString().c_str(), + fake_upstreams_[0]->localAddress()->ip()->port())}, + {"x-host", "localhost"}}; + + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + waitForNextUpstreamRequest(); + + const Extensions::TransportSockets::Tls::SslHandshakerImpl* ssl_socket = + dynamic_cast( + fake_upstream_connection_->connection().ssl().get()); + EXPECT_STREQ("localhost", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); + + upstream_request_->encodeHeaders(default_response_headers_, true); + ASSERT_TRUE(response->waitForEndStream()); + checkSimpleRequestSuccess(0, 0, response.get()); +} + TEST_P(ProxyFilterIntegrationTest, UpstreamTlsWithIpHost) { upstream_tls_ = true; initializeWithArgs(); diff --git a/test/extensions/filters/http/router/auto_sni_integration_test.cc b/test/extensions/filters/http/router/auto_sni_integration_test.cc index 25328614681fe..4c8307125d1a7 100644 --- a/test/extensions/filters/http/router/auto_sni_integration_test.cc +++ b/test/extensions/filters/http/router/auto_sni_integration_test.cc @@ -18,24 +18,29 @@ class AutoSniIntegrationTest : public testing::TestWithParammutable_clusters()->at(0); - ConfigHelper::HttpProtocolOptions protocol_options; - protocol_options.mutable_upstream_http_protocol_options()->set_auto_sni(true); - ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), - protocol_options); - - envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; - auto* validation_context = - tls_context.mutable_common_tls_context()->mutable_validation_context(); - validation_context->mutable_trusted_ca()->set_filename( - TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); - cluster_config.mutable_transport_socket()->set_name("envoy.transport_sockets.tls"); - cluster_config.mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context); - }); + config_helper_.addConfigModifier( + [override_auto_sni_header](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto& cluster_config = bootstrap.mutable_static_resources()->mutable_clusters()->at(0); + ConfigHelper::HttpProtocolOptions protocol_options; + protocol_options.mutable_upstream_http_protocol_options()->set_auto_sni(true); + if (!override_auto_sni_header.empty()) { + protocol_options.mutable_upstream_http_protocol_options()->set_override_auto_sni_header( + override_auto_sni_header); + } + ConfigHelper::setProtocolOptions( + *bootstrap.mutable_static_resources()->mutable_clusters(0), protocol_options); + + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; + auto* validation_context = + tls_context.mutable_common_tls_context()->mutable_validation_context(); + validation_context->mutable_trusted_ca()->set_filename( + TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); + cluster_config.mutable_transport_socket()->set_name("envoy.transport_sockets.tls"); + cluster_config.mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context); + }); HttpIntegrationTest::initialize(); } @@ -83,6 +88,26 @@ TEST_P(AutoSniIntegrationTest, BasicAutoSniTest) { EXPECT_STREQ("localhost", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); } +TEST_P(AutoSniIntegrationTest, AutoSniWithAltHeaderNameTest) { + setup("x-host"); + codec_client_ = makeHttpConnection(lookupPort("http")); + const auto response_ = + sendRequestAndWaitForResponse(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "localhost"}, + {"x-host", "custom"}}, + 0, default_response_headers_, 0); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response_->complete()); + + const Extensions::TransportSockets::Tls::SslHandshakerImpl* ssl_socket = + dynamic_cast( + fake_upstream_connection_->connection().ssl().get()); + EXPECT_STREQ("custom", SSL_get_servername(ssl_socket->ssl(), TLSEXT_NAMETYPE_host_name)); +} + TEST_P(AutoSniIntegrationTest, PassingNotDNS) { setup(); codec_client_ = makeHttpConnection(lookupPort("http")); From b4bf119a88c02d3dc9ea6e0c5ae596dc8467c4cb Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 14 Sep 2021 13:54:11 -0400 Subject: [PATCH 041/121] test: fix build (#18115) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- .../common/dynamic_forward_proxy/dns_cache_impl_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index c077eab954dc6..689e9deb9a688 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -1001,7 +1001,7 @@ TEST(UtilityTest, PrepareDnsRefreshStrategy) { TEST_F(DnsCacheImplTest, ResolveSuccessWithCaching) { auto* time_source = new NiceMock(); - dispatcher_.time_system_.reset(time_source); + context_.dispatcher_.time_system_.reset(time_source); // Configure the cache. MockKeyValueStoreFactory factory; From e87dbf254701421188bd16fe1674db027f6eb209 Mon Sep 17 00:00:00 2001 From: Ryan Hamilton Date: Tue, 14 Sep 2021 13:53:47 -0700 Subject: [PATCH 042/121] tools: Improve the error message in tools/docs/rst_check.py for invalid backticks (#18116) tools: Improve the error message in tools/docs/rst_check.py for invalid backticks Risk Level: Low Testing: None Docs Changes: N/A Release Notes: N/A Platform Specific Features: N/A Signed-off-by: Ryan Hamilton Signed-off-by: gayang --- tools/docs/rst_check.py | 5 +++-- tools/docs/tests/test_rst_check.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/docs/rst_check.py b/tools/docs/rst_check.py index 8a5b692d036d3..763f2dbff932e 100644 --- a/tools/docs/rst_check.py +++ b/tools/docs/rst_check.py @@ -118,8 +118,9 @@ def check_reflink(self, line: str) -> List[str]: if self.invalid_reflink_re.match(line) else []) def check_ticks(self, line: str) -> List[str]: - return ([f"Backticks should come in pairs (except for links and refs): {line}"] if - (self.backticks_re.match(line)) else []) + return ([ + f"Backticks should come in pairs (``foo``) except for links (`title `_) or refs (ref:`text `): {line}" + ] if (self.backticks_re.match(line)) else []) def run_checks(self) -> Iterator[str]: self.set_tokens() diff --git a/tools/docs/tests/test_rst_check.py b/tools/docs/tests/test_rst_check.py index a2bef68a71f0b..91e91ced246ce 100644 --- a/tools/docs/tests/test_rst_check.py +++ b/tools/docs/tests/test_rst_check.py @@ -249,7 +249,7 @@ def test_rst_check_current_version_check_ticks(patches, matches): m_re.return_value.match.return_value = matches assert ( version_file.check_ticks("LINE") - == (["Backticks should come in pairs (except for links and refs): LINE"] + == (["Backticks should come in pairs (``foo``) except for links (`title `_) or refs (ref:`text `): LINE"] if matches else [])) assert ( list(m_re.return_value.match.call_args) From c8ea4bb2cebff8b969eb63e566fd9453d3d1dd3e Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 14 Sep 2021 18:01:49 -0400 Subject: [PATCH 043/121] coverage: bumping dubbo_proxy (#18121) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- test/per_file_coverage.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 49446ee5dc291..6decf6b942758 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -38,7 +38,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/listener/tls_inspector:92.4" "source/extensions/filters/network/common:96.2" "source/extensions/filters/network/common/redis:96.3" -"source/extensions/filters/network/dubbo_proxy:96.2" "source/extensions/filters/network/mongo_proxy:94.0" "source/extensions/filters/network/sni_cluster:90.3" "source/extensions/filters/network/sni_dynamic_forward_proxy:90.9" From 68c758ad9799dd703a983d1e911017ea6e3fc4ff Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 14 Sep 2021 15:41:21 -0700 Subject: [PATCH 044/121] router: add per_try_idle_timeout configuration (#18078) Allows ensuring continual progress of individual request attempts. Signed-off-by: Matt Klein Signed-off-by: gayang --- .../config/route/v3/route_components.proto | 27 ++- docs/root/faq/configuration/timeouts.rst | 5 + docs/root/version_history/current.rst | 1 + envoy/router/router.h | 7 +- envoy/stream_info/stream_info.h | 2 + envoy/upstream/cluster_manager.h | 2 +- envoy/upstream/upstream.h | 1 + source/common/http/async_client_impl.h | 3 + source/common/router/config_impl.cc | 2 + source/common/router/config_impl.h | 2 + source/common/router/router.cc | 17 +- source/common/router/router.h | 8 +- source/common/router/upstream_request.cc | 25 +++ source/common/router/upstream_request.h | 5 +- test/common/router/config_impl_test.cc | 11 ++ test/common/router/router_test.cc | 168 ++++++++++++++++-- test/common/router/router_test_base.cc | 5 + test/common/router/router_test_base.h | 2 + .../idle_timeout_integration_test.cc | 46 +++++ test/mocks/router/mocks.h | 2 + test/mocks/router/router_filter_interface.h | 1 + 21 files changed, 314 insertions(+), 28 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index e6be0c43ed0ac..4a0b93f3601c2 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1164,7 +1164,7 @@ message RouteAction { } // HTTP retry :ref:`architecture overview `. -// [#next-free-field: 12] +// [#next-free-field: 14] message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy"; @@ -1305,8 +1305,8 @@ message RetryPolicy { google.protobuf.UInt32Value num_retries = 2 [(udpa.annotations.field_migrate).rename = "max_retries"]; - // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - // same conditions documented for + // Specifies a non-zero upstream timeout per retry attempt (including the initial attempt). This + // parameter is optional. The same conditions documented for // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. // // .. note:: @@ -1318,6 +1318,27 @@ message RetryPolicy { // would have been exhausted. google.protobuf.Duration per_try_timeout = 3; + // Specifies an upstream idle timeout per retry attempt (including the initial attempt). This + // parameter is optional and if absent there is no per try idle timeout. The semantics of the per + // try idle timeout are similar to the + // :ref:`route idle timeout ` and + // :ref:`stream idle timeout + // ` + // both enforced by the HTTP connection manager. The difference is that this idle timeout + // is enforced by the router for each individual attempt and thus after all previous filters have + // run, as opposed to *before* all previous filters run for the other idle timeouts. This timeout + // is useful in cases in which total request timeout is bounded by a number of retries and a + // :ref:`per_try_timeout `, but + // there is a desire to ensure each try is making incremental progress. Note also that similar + // to :ref:`per_try_timeout `, + // this idle timeout does not start until after both the entire request has been received by the + // router *and* a connection pool connection has been obtained. Unlike + // :ref:`per_try_timeout `, + // the idle timer continues once the response starts streaming back to the downstream client. + // This ensures that response data continues to make progress without using one of the HTTP + // connection manager idle timeouts. + google.protobuf.Duration per_try_idle_timeout = 13; + // Specifies an implementation of a RetryPriority which is used to determine the // distribution of load across priorities used for retries. Refer to // :ref:`retry plugin configuration ` for more details. diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index 72c090aac266b..a68963752447d 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -95,6 +95,11 @@ stream timeouts already introduced above. is sent to the downstream, which normally happens after the upstream has sent response headers. This timeout can be used with streaming endpoints to retry if the upstream fails to begin a response within the timeout. +* The route :ref:`per_try_idle_timeout ` + can be configured to ensure continued response progress of individual retry attempts (including + the first attempt). This is useful in cases where the total upstream request time is bounded + by the number of attempts multiplied by the per try timeout, but while the user wants to + ensure that individual attempts are making progress. * The route :ref:`MaxStreamDuration proto ` can be used to override the HttpConnectionManager's :ref:`max_stream_duration ` diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index a9fd246530972..ec8a10f5475f1 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -115,6 +115,7 @@ New Features * overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. * rbac: added :ref:`destination_port_range ` for matching range of destination ports. * route config: added :ref:`dynamic_metadata ` for routing based on dynamic metadata. +* router: added :ref:`per_try_idle_timeout ` timeout configuration. * router: added an optional :ref:`override_auto_sni_header ` to support setting SNI value from an arbitrary header other than host/authority. * sxg_filter: added filter to transform response to SXG package to :ref:`contrib images `. This can be enabled by setting :ref:`SXG ` configuration. * thrift_proxy: added support for :ref:`mirroring requests `. diff --git a/envoy/router/router.h b/envoy/router/router.h index 57591b2501a8c..c4288f10c3d1d 100644 --- a/envoy/router/router.h +++ b/envoy/router/router.h @@ -204,10 +204,15 @@ class RetryPolicy { virtual ~RetryPolicy() = default; /** - * @return std::chrono::milliseconds timeout per retry attempt. + * @return std::chrono::milliseconds timeout per retry attempt. 0 is disabled. */ virtual std::chrono::milliseconds perTryTimeout() const PURE; + /** + * @return std::chrono::milliseconds the optional per try idle timeout. 0 is disabled. + */ + virtual std::chrono::milliseconds perTryIdleTimeout() const PURE; + /** * @return uint32_t the number of retries to allow against the route. */ diff --git a/envoy/stream_info/stream_info.h b/envoy/stream_info/stream_info.h index 8d01eca502e83..1a48d18e2c949 100644 --- a/envoy/stream_info/stream_info.h +++ b/envoy/stream_info/stream_info.h @@ -158,6 +158,8 @@ struct ResponseCodeDetailValues { const std::string UpstreamTimeout = "upstream_response_timeout"; // The final upstream try timed out. const std::string UpstreamPerTryTimeout = "upstream_per_try_timeout"; + // The final upstream try idle timed out. + const std::string UpstreamPerTryIdleTimeout = "upstream_per_try_idle_timeout"; // The request was destroyed because of user defined max stream duration. const std::string UpstreamMaxStreamDurationReached = "upstream_max_stream_duration_reached"; // The upstream connection was reset before a response was started. This diff --git a/envoy/upstream/cluster_manager.h b/envoy/upstream/cluster_manager.h index 16f79117b674f..eeb249affd58b 100644 --- a/envoy/upstream/cluster_manager.h +++ b/envoy/upstream/cluster_manager.h @@ -323,7 +323,7 @@ class ClusterManager { virtual void drainConnections() PURE; /** - * Check if the cluster is active and statically configured, and if not, throw excetion. + * Check if the cluster is active and statically configured, and if not, throw exception. * @param cluster, the cluster to check. */ virtual void checkActiveStaticCluster(const std::string& cluster) PURE; diff --git a/envoy/upstream/upstream.h b/envoy/upstream/upstream.h index 30bf5d8bb211c..caebef0f4e888 100644 --- a/envoy/upstream/upstream.h +++ b/envoy/upstream/upstream.h @@ -575,6 +575,7 @@ class PrioritySet { COUNTER(upstream_rq_pending_overflow) \ COUNTER(upstream_rq_pending_total) \ COUNTER(upstream_rq_per_try_timeout) \ + COUNTER(upstream_rq_per_try_idle_timeout) \ COUNTER(upstream_rq_retry) \ COUNTER(upstream_rq_retry_backoff_exponential) \ COUNTER(upstream_rq_retry_backoff_ratelimited) \ diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 0d0273c696b14..27ea7b3abea3d 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -129,6 +129,9 @@ class AsyncStreamImpl : public AsyncClient::Stream, std::chrono::milliseconds perTryTimeout() const override { return std::chrono::milliseconds(0); } + std::chrono::milliseconds perTryIdleTimeout() const override { + return std::chrono::milliseconds(0); + } std::vector retryHostPredicates() const override { return {}; } diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 68ed6e9096035..e71f5171a427c 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -95,6 +95,8 @@ RetryPolicyImpl::RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& re validation_visitor_(&validation_visitor) { per_try_timeout_ = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(retry_policy, per_try_timeout, 0)); + per_try_idle_timeout_ = + std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(retry_policy, per_try_idle_timeout, 0)); num_retries_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT(retry_policy, num_retries, 1); retry_on_ = RetryStateImpl::parseRetryOn(retry_policy.retry_on()).first; retry_on_ |= RetryStateImpl::parseRetryGrpcOn(retry_policy.retry_on()).first; diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index f291fd45e7b74..cc7a9a3dc5497 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -297,6 +297,7 @@ class RetryPolicyImpl : public RetryPolicy { // Router::RetryPolicy std::chrono::milliseconds perTryTimeout() const override { return per_try_timeout_; } + std::chrono::milliseconds perTryIdleTimeout() const override { return per_try_idle_timeout_; } uint32_t numRetries() const override { return num_retries_; } uint32_t retryOn() const override { return retry_on_; } std::vector retryHostPredicates() const override; @@ -320,6 +321,7 @@ class RetryPolicyImpl : public RetryPolicy { private: std::chrono::milliseconds per_try_timeout_{0}; + std::chrono::milliseconds per_try_idle_timeout_{0}; // We set the number of retries to 1 by default (i.e. when no route or vhost level retry policy is // set) so that when retries get enabled through the x-envoy-retry-on header we default to 1 // retry. diff --git a/source/common/router/router.cc b/source/common/router/router.cc index c8874c6923373..8e3178738e411 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -160,6 +160,7 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::RequestHeaderMap& req } } timeout.per_try_timeout_ = route.retryPolicy().perTryTimeout(); + timeout.per_try_idle_timeout_ = route.retryPolicy().perTryIdleTimeout(); uint64_t header_timeout; @@ -969,13 +970,24 @@ void Filter::onSoftPerTryTimeout(UpstreamRequest& upstream_request) { } } +void Filter::onPerTryIdleTimeout(UpstreamRequest& upstream_request) { + onPerTryTimeoutCommon(upstream_request, cluster_->stats().upstream_rq_per_try_idle_timeout_, + StreamInfo::ResponseCodeDetails::get().UpstreamPerTryIdleTimeout); +} + void Filter::onPerTryTimeout(UpstreamRequest& upstream_request) { + onPerTryTimeoutCommon(upstream_request, cluster_->stats().upstream_rq_per_try_timeout_, + StreamInfo::ResponseCodeDetails::get().UpstreamPerTryTimeout); +} + +void Filter::onPerTryTimeoutCommon(UpstreamRequest& upstream_request, Stats::Counter& error_counter, + const std::string& response_code_details) { if (hedging_params_.hedge_on_per_try_timeout_) { onSoftPerTryTimeout(upstream_request); return; } - cluster_->stats().upstream_rq_per_try_timeout_.inc(); + error_counter.inc(); if (upstream_request.upstreamHost()) { upstream_request.upstreamHost()->stats().rq_timeout_.inc(); } @@ -993,8 +1005,7 @@ void Filter::onPerTryTimeout(UpstreamRequest& upstream_request) { // Remove this upstream request from the list now that we're done with it. upstream_request.removeFromList(upstream_requests_); - onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout, - StreamInfo::ResponseCodeDetails::get().UpstreamPerTryTimeout); + onUpstreamTimeoutAbort(StreamInfo::ResponseFlag::UpstreamRequestTimeout, response_code_details); } void Filter::onStreamMaxDurationReached(UpstreamRequest& upstream_request) { diff --git a/source/common/router/router.h b/source/common/router/router.h index 9a1d057793a26..2e3a21eacda90 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -51,6 +51,7 @@ class FilterUtility { struct TimeoutData { std::chrono::milliseconds global_timeout_{0}; std::chrono::milliseconds per_try_timeout_{0}; + std::chrono::milliseconds per_try_idle_timeout_{0}; }; struct HedgingParams { @@ -271,6 +272,7 @@ class RouterFilterInterface { UpstreamRequest& upstream_request) PURE; virtual void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) PURE; virtual void onPerTryTimeout(UpstreamRequest& upstream_request) PURE; + virtual void onPerTryIdleTimeout(UpstreamRequest& upstream_request) PURE; virtual void onStreamMaxDurationReached(UpstreamRequest& upstream_request) PURE; virtual Http::StreamDecoderFilterCallbacks* callbacks() PURE; @@ -445,6 +447,7 @@ class Filter : Logger::Loggable, UpstreamRequest& upstream_request) override; void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) override; void onPerTryTimeout(UpstreamRequest& upstream_request) override; + void onPerTryIdleTimeout(UpstreamRequest& upstream_request) override; void onStreamMaxDurationReached(UpstreamRequest& upstream_request) override; Http::StreamDecoderFilterCallbacks* callbacks() override { return callbacks_; } Upstream::ClusterInfoConstSharedPtr cluster() override { return cluster_; } @@ -469,8 +472,8 @@ class Filter : Logger::Loggable, private: friend class UpstreamRequest; - RetryStatePtr retry_state_; - + void onPerTryTimeoutCommon(UpstreamRequest& upstream_request, Stats::Counter& error_counter, + const std::string& response_code_details); Stats::StatName upstreamZone(Upstream::HostDescriptionConstSharedPtr upstream_host); void chargeUpstreamCode(uint64_t response_status_code, const Http::ResponseHeaderMap& response_headers, @@ -528,6 +531,7 @@ class Filter : Logger::Loggable, uint64_t grpc_to_http_status); Http::Context& httpContext() { return config_.http_context_; } + RetryStatePtr retry_state_; FilterConfig& config_; Http::StreamDecoderFilterCallbacks* callbacks_{}; RouteConstSharedPtr route_; diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 35cbfc8f28fcf..147f7af49fd01 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -80,6 +80,10 @@ UpstreamRequest::~UpstreamRequest() { // Allows for testing. per_try_timeout_->disableTimer(); } + if (per_try_idle_timeout_ != nullptr) { + // Allows for testing. + per_try_idle_timeout_->disableTimer(); + } if (max_stream_duration_timer_ != nullptr) { max_stream_duration_timer_->disableTimer(); } @@ -136,6 +140,7 @@ void UpstreamRequest::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& head void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) { ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); + resetPerTryIdleTimer(); addResponseHeadersSize(headers->byteSize()); // We drop 1xx other than 101 on the floor; 101 upgrade headers need to be passed to the client as @@ -177,6 +182,7 @@ void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool e void UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) { ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher()); + resetPerTryIdleTimer(); maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); parent_.onUpstreamData(data, *this, end_stream); @@ -331,6 +337,12 @@ void UpstreamRequest::resetStream() { } } +void UpstreamRequest::resetPerTryIdleTimer() { + if (per_try_idle_timeout_ != nullptr) { + per_try_idle_timeout_->enableTimer(parent_.timeout().per_try_idle_timeout_); + } +} + void UpstreamRequest::setupPerTryTimeout() { ASSERT(!per_try_timeout_); if (parent_.timeout().per_try_timeout_.count() > 0) { @@ -338,6 +350,19 @@ void UpstreamRequest::setupPerTryTimeout() { parent_.callbacks()->dispatcher().createTimer([this]() -> void { onPerTryTimeout(); }); per_try_timeout_->enableTimer(parent_.timeout().per_try_timeout_); } + + ASSERT(!per_try_idle_timeout_); + if (parent_.timeout().per_try_idle_timeout_.count() > 0) { + per_try_idle_timeout_ = + parent_.callbacks()->dispatcher().createTimer([this]() -> void { onPerTryIdleTimeout(); }); + resetPerTryIdleTimer(); + } +} + +void UpstreamRequest::onPerTryIdleTimeout() { + ENVOY_STREAM_LOG(debug, "upstream per try idle timeout", *parent_.callbacks()); + stream_info_.setResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); + parent_.onPerTryIdleTimeout(*this); } void UpstreamRequest::onPerTryTimeout() { diff --git a/source/common/router/upstream_request.h b/source/common/router/upstream_request.h index f0b07e8bdacd5..98f214c0d277d 100644 --- a/source/common/router/upstream_request.h +++ b/source/common/router/upstream_request.h @@ -46,7 +46,6 @@ class UpstreamRequest : public Logger::Loggable, void resetStream(); void setupPerTryTimeout(); - void onPerTryTimeout(); void maybeEndDecode(bool end_stream); void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host); @@ -132,11 +131,15 @@ class UpstreamRequest : public Logger::Loggable, void addResponseHeadersSize(uint64_t size) { response_headers_size_ = response_headers_size_.value_or(0) + size; } + void resetPerTryIdleTimer(); + void onPerTryTimeout(); + void onPerTryIdleTimeout(); RouterFilterInterface& parent_; std::unique_ptr conn_pool_; bool grpc_rq_success_deferred_; Event::TimerPtr per_try_timeout_; + Event::TimerPtr per_try_idle_timeout_; std::unique_ptr upstream_; absl::optional deferred_reset_reason_; Buffer::InstancePtr buffered_request_body_; diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 71bcbb2e18633..ccf64a29195ff 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -3337,6 +3337,7 @@ TEST_F(RouteMatcherTest, Retry) { cluster: www2 retry_policy: per_try_timeout: 1s + per_try_idle_timeout: 5s num_retries: 3 retry_on: 5xx,gateway-error,connect-failure,reset )EOF"; @@ -3349,6 +3350,11 @@ TEST_F(RouteMatcherTest, Retry) { ->routeEntry() ->retryPolicy() .perTryTimeout()); + EXPECT_EQ(std::chrono::milliseconds(0), + config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .perTryIdleTimeout()); EXPECT_EQ(1U, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) ->routeEntry() ->retryPolicy() @@ -3378,6 +3384,11 @@ TEST_F(RouteMatcherTest, Retry) { ->routeEntry() ->retryPolicy() .perTryTimeout()); + EXPECT_EQ(std::chrono::milliseconds(5000), + config.route(genHeaders("www.lyft.com", "/", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .perTryIdleTimeout()); EXPECT_EQ(3U, config.route(genHeaders("www.lyft.com", "/", "GET"), 0) ->routeEntry() ->retryPolicy() diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 22c0fa2f2bbf0..9bf90e943e9a6 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -64,6 +64,20 @@ using testing::ReturnRef; namespace Envoy { namespace Router { +// Allows verifying the state of the upstream StreamInfo +class TestAccessLog : public AccessLog::Instance { +public: + explicit TestAccessLog(std::function func) : func_(func) {} + + void log(const Http::RequestHeaderMap*, const Http::ResponseHeaderMap*, + const Http::ResponseTrailerMap*, const StreamInfo::StreamInfo& info) override { + func_(info); + } + +private: + std::function func_; +}; + class RouterTest : public RouterTestBase { public: RouterTest() : RouterTestBase(false, false, false, Protobuf::RepeatedPtrField{}) { @@ -1940,6 +1954,140 @@ TEST_F(RouterTest, UpstreamTimeoutWithAltResponse) { EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } +// Verify the upstream per try idle timeout. +TEST_F(RouterTest, UpstreamPerTryIdleTimeout) { + InSequence s; + + callbacks_.route_->route_entry_.retry_policy_.per_try_idle_timeout_ = + std::chrono::milliseconds(3000); + + // This pattern helps ensure that we're actually invoking the callback. + bool filter_state_verified = false; + router_.config().upstream_logs_.push_back( + std::make_shared([&](const auto& stream_info) { + filter_state_verified = + stream_info.hasResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout); + })); + + NiceMock encoder; + Http::ResponseDecoder* response_decoder = nullptr; + Http::ConnectionPool::Callbacks* pool_callbacks; + + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + pool_callbacks = &callbacks; + return nullptr; + })); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*response_timeout_, enableTimer(_, _)); + + Buffer::OwnedImpl data; + router_.decodeData(data, true); + + EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) + .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { + EXPECT_EQ(host_address_, host->address()); + })); + + per_try_idle_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + // The per try timeout timer should not be started yet. + pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_, + upstream_stream_info_, Http::Protocol::Http10); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + + EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset)); + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, + putResult(Upstream::Outlier::Result::LocalOriginTimeout, _)); + EXPECT_CALL(*per_try_idle_timeout_, disableTimer()); + EXPECT_CALL(callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout)); + EXPECT_CALL(*response_timeout_, disableTimer()); + EXPECT_CALL(callbacks_.stream_info_, setResponseCodeDetails("upstream_per_try_idle_timeout")); + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}}; + EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); + EXPECT_CALL(callbacks_, encodeData(_, true)); + per_try_idle_timeout_->invokeCallback(); + + EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_per_try_idle_timeout") + .value()); + EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value()); + EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + EXPECT_TRUE(filter_state_verified); +} + +// Verify the upstream per try idle timeout gets reset in the success case. +TEST_F(RouterTest, UpstreamPerTryIdleTimeoutSuccess) { + InSequence s; + + callbacks_.route_->route_entry_.retry_policy_.per_try_idle_timeout_ = + std::chrono::milliseconds(3000); + + NiceMock encoder; + Http::ResponseDecoder* response_decoder = nullptr; + Http::ConnectionPool::Callbacks* pool_callbacks; + + EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke( + [&](Http::ResponseDecoder& decoder, + Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { + response_decoder = &decoder; + pool_callbacks = &callbacks; + return nullptr; + })); + + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, false); + + response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*response_timeout_, enableTimer(_, _)); + + Buffer::OwnedImpl data; + router_.decodeData(data, true); + + EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) + .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { + EXPECT_EQ(host_address_, host->address()); + })); + + per_try_idle_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + EXPECT_EQ(0U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + // The per try timeout timer should not be started yet. + pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_, + upstream_stream_info_, Http::Protocol::Http10); + EXPECT_EQ(1U, + callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); + + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + Http::ResponseHeaderMapPtr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "200"}}); + response_decoder->decodeHeaders(std::move(response_headers), false); + + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + response_decoder->decodeData(data, false); + + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _)); + EXPECT_CALL(*per_try_idle_timeout_, disableTimer()); + EXPECT_CALL(*response_timeout_, disableTimer()); + response_decoder->decodeData(data, true); +} + // Verifies that the per try timeout is initialized once the downstream request has been read. TEST_F(RouterTest, UpstreamPerTryTimeout) { NiceMock encoder; @@ -1964,7 +2112,7 @@ TEST_F(RouterTest, UpstreamPerTryTimeout) { router_.decodeHeaders(headers, false); // We verify that both timeouts are started after decodeData(_, true) is called. This - // verifies that we are not starting the initial per try timeout on the first onPoolReady.FOO + // verifies that we are not starting the initial per try timeout on the first onPoolReady. expectPerTryTimerCreate(); expectResponseTimerCreate(); @@ -1992,7 +2140,7 @@ TEST_F(RouterTest, UpstreamPerTryTimeout) { EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } -// Verifies that the per try timeout starts when onPoolReady is called when it occursFOO +// Verifies that the per try timeout starts when onPoolReady is called when it occurs // after the downstream request has been read. TEST_F(RouterTest, UpstreamPerTryTimeoutDelayedPoolReady) { NiceMock encoder; @@ -2017,7 +2165,7 @@ TEST_F(RouterTest, UpstreamPerTryTimeoutDelayedPoolReady) { Buffer::OwnedImpl data; router_.decodeData(data, true); - // Per try timeout starts when onPoolReady is called.FOO + // Per try timeout starts when onPoolReady is called. expectPerTryTimerCreate(); EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { @@ -4903,20 +5051,6 @@ TEST_F(RouterTest, DirectResponseWithoutLocation) { EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value()); } -// Allows verifying the state of the upstream StreamInfo -class TestAccessLog : public AccessLog::Instance { -public: - explicit TestAccessLog(std::function func) : func_(func) {} - - void log(const Http::RequestHeaderMap*, const Http::ResponseHeaderMap*, - const Http::ResponseTrailerMap*, const StreamInfo::StreamInfo& info) override { - func_(info); - } - -private: - std::function func_; -}; - // Verifies that we propagate the upstream connection filter state to the upstream and downstream // request filter state. TEST_F(RouterTest, PropagatesUpstreamFilterState) { diff --git a/test/common/router/router_test_base.cc b/test/common/router/router_test_base.cc index 671bbee7b588e..dc598c6e4f010 100644 --- a/test/common/router/router_test_base.cc +++ b/test/common/router/router_test_base.cc @@ -51,6 +51,11 @@ void RouterTestBase::expectPerTryTimerCreate() { EXPECT_CALL(*per_try_timeout_, disableTimer()); } +void RouterTestBase::expectPerTryIdleTimerCreate(std::chrono::milliseconds timeout) { + per_try_idle_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_); + EXPECT_CALL(*per_try_idle_timeout_, enableTimer(timeout, _)); +} + void RouterTestBase::expectMaxStreamDurationTimerCreate(std::chrono::milliseconds duration_msec) { max_stream_duration_timer_ = new Event::MockTimer(&callbacks_.dispatcher_); EXPECT_CALL(*max_stream_duration_timer_, enableTimer(Eq(duration_msec), _)); diff --git a/test/common/router/router_test_base.h b/test/common/router/router_test_base.h index 9ac4a94bfc92f..0dff3f0ade1e7 100644 --- a/test/common/router/router_test_base.h +++ b/test/common/router/router_test_base.h @@ -58,6 +58,7 @@ class RouterTestBase : public testing::Test { void expectResponseTimerCreate(); void expectPerTryTimerCreate(); + void expectPerTryIdleTimerCreate(std::chrono::milliseconds timeout); void expectMaxStreamDurationTimerCreate(std::chrono::milliseconds duration_msec); AssertionResult verifyHostUpstreamStats(uint64_t success, uint64_t error); void verifyMetadataMatchCriteriaFromRequest(bool route_entry_has_match); @@ -97,6 +98,7 @@ class RouterTestBase : public testing::Test { RouterTestFilter router_; Event::MockTimer* response_timeout_{}; Event::MockTimer* per_try_timeout_{}; + Event::MockTimer* per_try_idle_timeout_{}; Event::MockTimer* max_stream_duration_timer_{}; Network::Address::InstanceConstSharedPtr host_address_{ Network::Utility::resolveUrl("tcp://10.0.0.5:9211")}; diff --git a/test/integration/idle_timeout_integration_test.cc b/test/integration/idle_timeout_integration_test.cc index 46cbe957e6ae5..6d246bf90e5b2 100644 --- a/test/integration/idle_timeout_integration_test.cc +++ b/test/integration/idle_timeout_integration_test.cc @@ -35,6 +35,14 @@ class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest { hcm.mutable_request_timeout()->set_seconds(0); hcm.mutable_request_timeout()->set_nanos(RequestTimeoutMs * 1000 * 1000); } + if (enable_per_try_idle_timeout_) { + auto* route_config = hcm.mutable_route_config(); + auto* virtual_host = route_config->mutable_virtual_hosts(0); + auto* route = virtual_host->mutable_routes(0)->mutable_route(); + auto* retry_policy = route->mutable_retry_policy(); + retry_policy->mutable_per_try_idle_timeout()->set_seconds(0); + retry_policy->mutable_per_try_idle_timeout()->set_nanos(IdleTimeoutMs * 1000 * 1000); + } // For validating encode100ContinueHeaders() timer kick. hcm.set_proxy_100_continue(true); @@ -62,6 +70,26 @@ class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest { return response; } + IntegrationStreamDecoderPtr setupPerTryIdleTimeoutTest(const char* method = "GET") { + initialize(); + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + auto response = codec_client_->makeHeaderOnlyRequest( + Http::TestRequestHeaderMapImpl{{":method", method}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}); + AssertionResult result = + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_); + RELEASE_ASSERT(result, result.message()); + result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_); + RELEASE_ASSERT(result, result.message()); + result = upstream_request_->waitForHeadersComplete(); + RELEASE_ASSERT(result, result.message()); + result = upstream_request_->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + return response; + } + void sleep() { test_time_.timeSystem().advanceTimeWait(std::chrono::milliseconds(IdleTimeoutMs / 2)); } @@ -86,6 +114,7 @@ class IdleTimeoutIntegrationTest : public HttpProtocolIntegrationTest { bool enable_global_idle_timeout_{false}; bool enable_per_stream_idle_timeout_{false}; bool enable_request_timeout_{false}; + bool enable_per_try_idle_timeout_{false}; DangerousDeprecatedTestTime test_time_; }; @@ -275,6 +304,23 @@ TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterUpstreamHeaders) { EXPECT_EQ("", response->body()); } +// Per-try idle timeout after upstream headers have been sent. +TEST_P(IdleTimeoutIntegrationTest, PerTryIdleTimeoutAfterUpstreamHeaders) { + enable_per_try_idle_timeout_ = true; + auto response = setupPerTryIdleTimeoutTest(); + + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + + waitForTimeout(*response); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_rq_per_try_idle_timeout", 1); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + EXPECT_FALSE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ("", response->body()); +} + // Per-stream idle timeout after a sequence of header/data events. TEST_P(IdleTimeoutIntegrationTest, PerStreamIdleTimeoutAfterBidiData) { enable_per_stream_idle_timeout_ = true; diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 98c1c6710b201..1bf7e452ed56b 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -104,6 +104,7 @@ class TestRetryPolicy : public RetryPolicy { // Router::RetryPolicy std::chrono::milliseconds perTryTimeout() const override { return per_try_timeout_; } + std::chrono::milliseconds perTryIdleTimeout() const override { return per_try_idle_timeout_; } uint32_t numRetries() const override { return num_retries_; } uint32_t retryOn() const override { return retry_on_; } MOCK_METHOD(std::vector, retryHostPredicates, (), (const)); @@ -127,6 +128,7 @@ class TestRetryPolicy : public RetryPolicy { } std::chrono::milliseconds per_try_timeout_{0}; + std::chrono::milliseconds per_try_idle_timeout_{0}; uint32_t num_retries_{}; uint32_t retry_on_{}; uint32_t host_selection_max_attempts_; diff --git a/test/mocks/router/router_filter_interface.h b/test/mocks/router/router_filter_interface.h index 55aa4d9a42210..d1349a4103ce1 100644 --- a/test/mocks/router/router_filter_interface.h +++ b/test/mocks/router/router_filter_interface.h @@ -31,6 +31,7 @@ class MockRouterFilterInterface : public RouterFilterInterface { UpstreamRequest& upstream_request)); MOCK_METHOD(void, onUpstreamHostSelected, (Upstream::HostDescriptionConstSharedPtr host)); MOCK_METHOD(void, onPerTryTimeout, (UpstreamRequest & upstream_request)); + MOCK_METHOD(void, onPerTryIdleTimeout, (UpstreamRequest & upstream_request)); MOCK_METHOD(void, onStreamMaxDurationReached, (UpstreamRequest & upstream_request)); MOCK_METHOD(Envoy::Http::StreamDecoderFilterCallbacks*, callbacks, ()); From f5a85410fca9792dc56e20ac1597f8dfc3de7fce Mon Sep 17 00:00:00 2001 From: phlax Date: Wed, 15 Sep 2021 04:09:08 +0100 Subject: [PATCH 045/121] bazel: Add json_data and providers (#18086) Signed-off-by: Ryan Northey Signed-off-by: gayang --- api/bazel/BUILD | 7 +++++++ bazel/BUILD | 7 +++++++ bazel/api_binding.bzl | 1 + bazel/utils.bzl | 18 ++++++++++++++++++ contrib/BUILD | 8 ++++++++ source/extensions/BUILD | 8 ++++++++ 6 files changed, 49 insertions(+) create mode 100644 bazel/utils.bzl diff --git a/api/bazel/BUILD b/api/bazel/BUILD index 0e5c8aea75b01..a8b7b161067fd 100644 --- a/api/bazel/BUILD +++ b/api/bazel/BUILD @@ -1,4 +1,6 @@ load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") +load("//:utils.bzl", "json_data") +load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC") licenses(["notice"]) # Apache 2 @@ -15,3 +17,8 @@ go_proto_compiler( valid_archive = False, visibility = ["//visibility:public"], ) + +json_data( + name = "repository_locations", + data = REPOSITORY_LOCATIONS_SPEC, +) diff --git a/bazel/BUILD b/bazel/BUILD index 303ab531bead3..3b22ffc8ff878 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -1,8 +1,10 @@ load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp") +load("//bazel:utils.bzl", "json_data") load("@bazel_skylib//lib:selects.bzl", "selects") load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") +load(":repository_locations.bzl", "REPOSITORY_LOCATIONS_SPEC") licenses(["notice"]) # Apache 2 @@ -591,3 +593,8 @@ alias( name = "windows", actual = "@bazel_tools//src/conditions:windows", ) + +json_data( + name = "repository_locations", + data = REPOSITORY_LOCATIONS_SPEC, +) diff --git a/bazel/api_binding.bzl b/bazel/api_binding.bzl index 8d46d4c1827b8..97718ef5346b9 100644 --- a/bazel/api_binding.bzl +++ b/bazel/api_binding.bzl @@ -13,6 +13,7 @@ def _default_envoy_api_impl(ctx): ] for d in api_dirs: ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child(ctx.attr.reldir).get_child(d), d) + ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child("bazel").get_child("utils.bzl"), "utils.bzl") _default_envoy_api = repository_rule( implementation = _default_envoy_api_impl, diff --git a/bazel/utils.bzl b/bazel/utils.bzl new file mode 100644 index 0000000000000..0961f00eb446a --- /dev/null +++ b/bazel/utils.bzl @@ -0,0 +1,18 @@ +load("@bazel_skylib//rules:write_file.bzl", "write_file") + +def json_data( + name, + data, + visibility = ["//visibility:public"], + **kwargs): + """Write a bazel object to a file + + The provided `data` object should be json serializable. + """ + write_file( + name = name, + out = "%s.json" % name, + content = json.encode(data).split("\n"), + visibility = visibility, + **kwargs + ) diff --git a/contrib/BUILD b/contrib/BUILD index aa0691c6142a8..ceedb6dfcaacb 100644 --- a/contrib/BUILD +++ b/contrib/BUILD @@ -1,6 +1,14 @@ +load("//bazel:utils.bzl", "json_data") +load(":contrib_build_config.bzl", "CONTRIB_EXTENSIONS") + licenses(["notice"]) # Apache 2 exports_files([ "extensions_metadata.yaml", "contrib_build_config.bzl", ]) + +json_data( + name = "contrib_extensions_build_config", + data = CONTRIB_EXTENSIONS, +) diff --git a/source/extensions/BUILD b/source/extensions/BUILD index 5d4f6c8a9b745..7105032fd6d61 100644 --- a/source/extensions/BUILD +++ b/source/extensions/BUILD @@ -1,6 +1,14 @@ +load("//bazel:utils.bzl", "json_data") +load(":extensions_build_config.bzl", "EXTENSIONS") + licenses(["notice"]) # Apache 2 exports_files([ "extensions_metadata.yaml", "extensions_build_config.bzl", ]) + +json_data( + name = "extensions_build_config", + data = EXTENSIONS, +) From 823bcc6117bfac76f3171ead0c0898e9e502a6af Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Wed, 15 Sep 2021 20:55:22 +0900 Subject: [PATCH 046/121] Add remote_refuse and remote_reset in response code details docs (#18132) These two are missed in the [doc](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/response_code_details#per-codec-details). Risk Level: low Testing: n/a Docs Changes: yes Release Notes: n/a Platform Specific Features: n/a Signed-off-by: Kenjiro Nakayama Signed-off-by: gayang --- .../http/http_conn_man/response_code_details.rst | 4 ++++ source/common/http/http1/codec_impl.cc | 2 +- source/common/http/http2/codec_impl.cc | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/root/configuration/http/http_conn_man/response_code_details.rst b/docs/root/configuration/http/http_conn_man/response_code_details.rst index 342350c6a4f8c..58acfe7b9c519 100644 --- a/docs/root/configuration/http/http_conn_man/response_code_details.rst +++ b/docs/root/configuration/http/http_conn_man/response_code_details.rst @@ -101,6 +101,8 @@ All http2 details are rooted at *http2.* http2.unexpected_underscore, Envoy was configured to drop requests with header keys beginning with underscores. http2.unknown.nghttp2.error, An unknown error was encountered by nghttp2 http2.violation.of.messaging.rule, The stream was in violation of a HTTP/2 messaging rule. + http2.remote_refuse, The peer refused the stream. + http2.remote_reset, The peer reset the stream. Http3 details ~~~~~~~~~~~~~ @@ -116,4 +118,6 @@ All http3 details are rooted at *http3.* http3.unexpected_underscore, Envoy was configured to drop or reject requests with header keys beginning with underscores. http3.too_many_headers, Either incoming request or response headers contained too many headers. http3.too_many_trailers, Either incoming request or response trailers contained too many entries. + http3.remote_refuse, The peer refused the stream. + http3.remote_reset, The peer reset the stream. diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index de88ae236c440..77bc700656972 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -33,7 +33,7 @@ namespace Http1 { namespace { // Changes or additions to details should be reflected in -// docs/root/configuration/http/http_conn_man/response_code_details_details.rst +// docs/root/configuration/http/http_conn_man/response_code_details.rst struct Http1ResponseCodeDetailValues { const absl::string_view TooManyHeaders = "http1.too_many_headers"; const absl::string_view HeadersTooLarge = "http1.headers_too_large"; diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index 2ccc1d32f9f46..bf948b8c88455 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -34,7 +34,7 @@ namespace Http { namespace Http2 { // Changes or additions to details should be reflected in -// docs/root/configuration/http/http_conn_man/response_code_details_details.rst +// docs/root/configuration/http/http_conn_man/response_code_details.rst class Http2ResponseCodeDetailValues { public: // Invalid HTTP header field was received and stream is going to be From 19ad6ff1f7a1e805ce127c33110c40fef2900b7b Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 15 Sep 2021 21:10:16 +0900 Subject: [PATCH 047/121] admin: optimize prometheus format stat endopint. (#18124) * admin: optimize prometheus format endopint. Signed-off-by: Takeshi Yoneda * format_value can be const. Signed-off-by: Takeshi Yoneda Signed-off-by: gayang --- source/server/admin/stats_handler.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/source/server/admin/stats_handler.cc b/source/server/admin/stats_handler.cc index 0a3c4a5ea0d01..929e9474587dd 100644 --- a/source/server/admin/stats_handler.cc +++ b/source/server/admin/stats_handler.cc @@ -82,6 +82,11 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, return Http::Code::BadRequest; } + const absl::optional format_value = Utility::formatParam(params); + if (format_value.has_value() && format_value.value() == "prometheus") { + return handlerPrometheusStats(url, response_headers, response, admin_stream); + } + std::map all_stats; for (const Stats::CounterSharedPtr& counter : server_.stats().counters()) { if (shouldShowMetric(*counter, used_only, regex)) { @@ -103,7 +108,6 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, } } - absl::optional format_value = Utility::formatParam(params); if (!format_value.has_value()) { // Display plain stats if format query param is not there. statsAsText(all_stats, text_readouts, server_.stats().histograms(), used_only, regex, response); @@ -117,10 +121,6 @@ Http::Code StatsHandler::handlerStats(absl::string_view url, return Http::Code::OK; } - if (format_value.value() == "prometheus") { - return handlerPrometheusStats(url, response_headers, response, admin_stream); - } - response.add("usage: /stats?format=json or /stats?format=prometheus \n"); response.add("\n"); return Http::Code::NotFound; From 9877439983dcb7b8840c94fd1ebeeb1fd4d84469 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 15 Sep 2021 09:15:57 -0400 Subject: [PATCH 048/121] api: moving the k-v store to common (#18117) As discussed on #18034 Risk Level: medium Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- api/BUILD | 2 +- api/envoy/{extensions => config}/common/key_value/v3/BUILD | 0 .../{extensions => config}/common/key_value/v3/config.proto | 6 +++--- api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD | 2 +- .../common/dynamic_forward_proxy/v3/dns_cache.proto | 4 ++-- api/versioning/BUILD | 2 +- docs/root/api-v3/common_messages/common_messages.rst | 2 +- source/extensions/key_value/file_based/BUILD | 2 +- source/extensions/key_value/file_based/config.cc | 3 +-- source/extensions/key_value/file_based/config.h | 4 ++-- 10 files changed, 13 insertions(+), 14 deletions(-) rename api/envoy/{extensions => config}/common/key_value/v3/BUILD (100%) rename api/envoy/{extensions => config}/common/key_value/v3/config.proto (69%) diff --git a/api/BUILD b/api/BUILD index 93f9184a2b400..0a3d7c74b2943 100644 --- a/api/BUILD +++ b/api/BUILD @@ -68,6 +68,7 @@ proto_library( "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/key_value/v3:pkg", "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", @@ -101,7 +102,6 @@ proto_library( "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", "//envoy/extensions/common/matching/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", diff --git a/api/envoy/extensions/common/key_value/v3/BUILD b/api/envoy/config/common/key_value/v3/BUILD similarity index 100% rename from api/envoy/extensions/common/key_value/v3/BUILD rename to api/envoy/config/common/key_value/v3/BUILD diff --git a/api/envoy/extensions/common/key_value/v3/config.proto b/api/envoy/config/common/key_value/v3/config.proto similarity index 69% rename from api/envoy/extensions/common/key_value/v3/config.proto rename to api/envoy/config/common/key_value/v3/config.proto index 66a55435437b3..39bf79fe484d2 100644 --- a/api/envoy/extensions/common/key_value/v3/config.proto +++ b/api/envoy/config/common/key_value/v3/config.proto @@ -1,13 +1,13 @@ syntax = "proto3"; -package envoy.extensions.common.key_value.v3; +package envoy.config.common.key_value.v3; import "envoy/config/core/v3/extension.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; -option java_package = "io.envoyproxy.envoy.extensions.common.key_value.v3"; +option java_package = "io.envoyproxy.envoy.config.common.key_value.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; @@ -18,5 +18,5 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // This shared configuration for Envoy key value stores. message KeyValueStoreConfig { // [#extension-category: envoy.common.key_value] - config.core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}]; + core.v3.TypedExtensionConfig config = 1 [(validate.rules).message = {required: true}]; } diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD b/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD index 6e07b4a9226bb..b9cc22c7ee67c 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD @@ -8,8 +8,8 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/key_value/v3:pkg", "//envoy/config/core/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index 4a0d87ff6c3b8..e3904ae287192 100644 --- a/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -3,10 +3,10 @@ syntax = "proto3"; package envoy.extensions.common.dynamic_forward_proxy.v3; import "envoy/config/cluster/v3/cluster.proto"; +import "envoy/config/common/key_value/v3/config.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/resolver.proto"; -import "envoy/extensions/common/key_value/v3/config.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -142,5 +142,5 @@ message DnsCacheConfig { // [#not-implemented-hide:] // Configuration to flush the DNS cache to long term storage. - key_value.v3.KeyValueStoreConfig key_value_config = 13; + config.common.key_value.v3.KeyValueStoreConfig key_value_config = 13; } diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 61af4c4764680..c57ddbb5dca5f 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -20,6 +20,7 @@ proto_library( "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", + "//envoy/config/common/key_value/v3:pkg", "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", @@ -53,7 +54,6 @@ proto_library( "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", - "//envoy/extensions/common/key_value/v3:pkg", "//envoy/extensions/common/matching/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", diff --git a/docs/root/api-v3/common_messages/common_messages.rst b/docs/root/api-v3/common_messages/common_messages.rst index ddfb0fe7bb0c5..d14a59db966c2 100644 --- a/docs/root/api-v3/common_messages/common_messages.rst +++ b/docs/root/api-v3/common_messages/common_messages.rst @@ -20,7 +20,7 @@ Common messages ../config/core/v3/socket_option.proto ../config/core/v3/udp_socket_config.proto ../config/core/v3/substitution_format_string.proto - ../extensions/common/key_value/v3/config.proto + ../config/common/key_value/v3/config.proto ../extensions/common/ratelimit/v3/ratelimit.proto ../extensions/filters/common/fault/v3/fault.proto ../extensions/network/socket_interface/v3/default_socket_interface.proto diff --git a/source/extensions/key_value/file_based/BUILD b/source/extensions/key_value/file_based/BUILD index 4603b869b5908..5237ea3cdb847 100644 --- a/source/extensions/key_value/file_based/BUILD +++ b/source/extensions/key_value/file_based/BUILD @@ -19,7 +19,7 @@ envoy_cc_extension( "//envoy/filesystem:filesystem_interface", "//envoy/registry", "//source/common/common:key_value_store_lib", - "@envoy_api//envoy/extensions/common/key_value/v3:pkg_cc_proto", + "@envoy_api//envoy/config/common/key_value/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/key_value/file_based/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/key_value/file_based/config.cc b/source/extensions/key_value/file_based/config.cc index 6fbd99b77cffc..11568c625c1ac 100644 --- a/source/extensions/key_value/file_based/config.cc +++ b/source/extensions/key_value/file_based/config.cc @@ -44,8 +44,7 @@ KeyValueStorePtr FileBasedKeyValueStoreFactory::createStore( const Protobuf::Message& config, ProtobufMessage::ValidationVisitor& validation_visitor, Event::Dispatcher& dispatcher, Filesystem::Instance& file_system) { const auto& typed_config = MessageUtil::downcastAndValidate< - const envoy::extensions::common::key_value::v3::KeyValueStoreConfig&>(config, - validation_visitor); + const envoy::config::common::key_value::v3::KeyValueStoreConfig&>(config, validation_visitor); const auto file_config = MessageUtil::anyConvertAndValidate< envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig>( typed_config.config().typed_config(), validation_visitor); diff --git a/source/extensions/key_value/file_based/config.h b/source/extensions/key_value/file_based/config.h index 414b7d7473185..4b1cccec616d1 100644 --- a/source/extensions/key_value/file_based/config.h +++ b/source/extensions/key_value/file_based/config.h @@ -1,6 +1,6 @@ #include "envoy/common/key_value_store.h" -#include "envoy/extensions/common/key_value/v3/config.pb.h" -#include "envoy/extensions/common/key_value/v3/config.pb.validate.h" +#include "envoy/config/common/key_value/v3/config.pb.h" +#include "envoy/config/common/key_value/v3/config.pb.validate.h" #include "envoy/extensions/key_value/file_based/v3/config.pb.h" #include "envoy/extensions/key_value/file_based/v3/config.pb.validate.h" From 653ce5b09c7c5fb305a2a8404905e1280e409bf1 Mon Sep 17 00:00:00 2001 From: Samra Belachew Date: Wed, 15 Sep 2021 10:35:15 -0700 Subject: [PATCH 049/121] Make contrib filter visibility configurable (#18131) Signed-off-by: Samra Belachew Signed-off-by: gayang --- bazel/envoy_build_system.bzl | 3 ++- bazel/envoy_library.bzl | 3 ++- ci/osx-build-config/extensions_build_config.bzl | 1 + source/extensions/extensions_build_config.bzl | 1 + 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 4d671ab9562fa..644824f19e830 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -44,6 +44,7 @@ load( ) load( "@envoy_build_config//:extensions_build_config.bzl", + "CONTRIB_EXTENSION_PACKAGE_VISIBILITY", "EXTENSION_PACKAGE_VISIBILITY", ) load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") @@ -65,7 +66,7 @@ def envoy_extension_package(enabled_default = True, default_visibility = EXTENSI ) def envoy_contrib_package(): - envoy_extension_package(default_visibility = ["//:contrib_library"]) + envoy_extension_package(default_visibility = CONTRIB_EXTENSION_PACKAGE_VISIBILITY) # A genrule variant that can output a directory. This is useful when doing things like # generating a fuzz corpus mechanically. diff --git a/bazel/envoy_library.bzl b/bazel/envoy_library.bzl index ac74d1be29c96..5b1d674483c8a 100644 --- a/bazel/envoy_library.bzl +++ b/bazel/envoy_library.bzl @@ -12,6 +12,7 @@ load(":envoy_pch.bzl", "envoy_pch_copts") load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") load( "@envoy_build_config//:extensions_build_config.bzl", + "CONTRIB_EXTENSION_PACKAGE_VISIBILITY", "EXTENSION_CONFIG_VISIBILITY", ) @@ -75,7 +76,7 @@ def envoy_cc_contrib_extension( name, tags = [], extra_visibility = [], - visibility = ["//:contrib_library"], + visibility = CONTRIB_EXTENSION_PACKAGE_VISIBILITY, **kwargs): envoy_cc_extension(name, tags, extra_visibility, visibility, **kwargs) diff --git a/ci/osx-build-config/extensions_build_config.bzl b/ci/osx-build-config/extensions_build_config.bzl index 40c8fee0685e8..379d6748e5a95 100644 --- a/ci/osx-build-config/extensions_build_config.bzl +++ b/ci/osx-build-config/extensions_build_config.bzl @@ -14,3 +14,4 @@ EXTENSIONS = { WINDOWS_EXTENSIONS = {} EXTENSION_CONFIG_VISIBILITY = ["//:extension_config"] EXTENSION_PACKAGE_VISIBILITY = ["//:extension_library"] +CONTRIB_EXTENSION_PACKAGE_VISIBILITY = ["//:contrib_library"] diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 907828f07f8c6..d7447fbc82c8a 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -303,3 +303,4 @@ EXTENSIONS = { # need to directly reference Envoy extensions. EXTENSION_CONFIG_VISIBILITY = ["//:extension_config"] EXTENSION_PACKAGE_VISIBILITY = ["//:extension_library"] +CONTRIB_EXTENSION_PACKAGE_VISIBILITY = ["//:contrib_library"] From 1ac55e7d7a95b1faf2dccabfde3347939c6a6ecc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=99=BD=E6=B3=BD?= Date: Thu, 16 Sep 2021 02:53:54 +0800 Subject: [PATCH 050/121] lua: Fix printing Lua string containing hex characters (#17994) Signed-off-by: chaojiang Signed-off-by: Patrick Signed-off-by: gayang --- docs/root/version_history/current.rst | 1 + source/extensions/filters/common/lua/lua.h | 19 +++++ .../extensions/filters/common/lua/wrappers.cc | 3 +- .../extensions/filters/http/lua/lua_filter.cc | 19 +++-- .../extensions/filters/http/lua/lua_filter.h | 2 +- .../filters/http/lua/lua_filter_test.cc | 76 ++++++++++++++++++- 6 files changed, 105 insertions(+), 15 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index ec8a10f5475f1..edcd675f4dfd0 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -84,6 +84,7 @@ Bug Fixes * hcm: remove deprecation for :ref:`xff_num_trusted_hops ` and forbid mixing ip detection extensions with old related knobs. * http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. * listener: fixed an issue on Windows where connections are not handled by all worker threads. +* lua: fix ``BodyBuffer`` setting a Lua string and printing Lua string containing hex characters. Previously, ``BodyBuffer`` setting a Lua string or printing strings with hex characters will be truncated. * xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under 'annotations' section of the segment data. Removed Config or Runtime diff --git a/source/extensions/filters/common/lua/lua.h b/source/extensions/filters/common/lua/lua.h index 54bdcabe33bc5..5271ce089f5c4 100644 --- a/source/extensions/filters/common/lua/lua.h +++ b/source/extensions/filters/common/lua/lua.h @@ -69,6 +69,25 @@ namespace Lua { lua_pushnumber(state, val); \ lua_settable(state, -3); +/** + * Get absl::string_view from Lua string. This checks if the argument at index is a string + * and build an absl::string_view from it. + * @param state the current Lua state. + * @param index the index of argument. + * @return absl::string_view of Lua string with proper string length. + **/ +inline absl::string_view getStringViewFromLuaString(lua_State* state, int index) { + size_t input_size = 0; + // When the argument at index in Lua state is not a string, for example, giving a table to + // logTrace (which uses this function under the hood), Lua script exits with an error like the + // following: "[string \"...\"]:3: bad argument #1 to 'logTrace' (string expected, got table)". + // However,`luaL_checklstring` accepts a number as its argument and implicitly converts it to a + // string, since Lua provides automatic conversion between string and number values at run time + // (https://www.lua.org/manual/5.1/manual.html#2.2.1). + const char* input = luaL_checklstring(state, index, &input_size); + return absl::string_view(input, input_size); +} + /** * Calculate the maximum space needed to be aligned. */ diff --git a/source/extensions/filters/common/lua/wrappers.cc b/source/extensions/filters/common/lua/wrappers.cc index cb148e18bcfe2..bfb2a1c473259 100644 --- a/source/extensions/filters/common/lua/wrappers.cc +++ b/source/extensions/filters/common/lua/wrappers.cc @@ -42,7 +42,6 @@ int64_t timestampInSeconds(const absl::optional& system_time) { .count() : 0; } - } // namespace int BufferWrapper::luaLength(lua_State* state) { @@ -67,7 +66,7 @@ int BufferWrapper::luaGetBytes(lua_State* state) { int BufferWrapper::luaSetBytes(lua_State* state) { data_.drain(data_.length()); - absl::string_view bytes = luaL_checkstring(state, 2); + absl::string_view bytes = getStringViewFromLuaString(state, 2); data_.add(bytes); lua_pushnumber(state, data_.length()); return 1; diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 5c78826978676..095f9e7bc7738 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -552,37 +552,37 @@ int StreamHandleWrapper::luaConnection(lua_State* state) { } int StreamHandleWrapper::luaLogTrace(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::trace, message); return 0; } int StreamHandleWrapper::luaLogDebug(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::debug, message); return 0; } int StreamHandleWrapper::luaLogInfo(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::info, message); return 0; } int StreamHandleWrapper::luaLogWarn(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::warn, message); return 0; } int StreamHandleWrapper::luaLogErr(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::err, message); return 0; } int StreamHandleWrapper::luaLogCritical(lua_State* state) { - const char* message = luaL_checkstring(state, 2); + absl::string_view message = Filters::Common::Lua::getStringViewFromLuaString(state, 2); filter_.scriptLog(spdlog::level::critical, message); return 0; } @@ -649,9 +649,8 @@ int StreamHandleWrapper::luaImportPublicKey(lua_State* state) { } int StreamHandleWrapper::luaBase64Escape(lua_State* state) { - size_t input_size; - const char* input = luaL_checklstring(state, 2, &input_size); - auto output = absl::Base64Escape(absl::string_view(input, input_size)); + absl::string_view input = Filters::Common::Lua::getStringViewFromLuaString(state, 2); + auto output = absl::Base64Escape(input); lua_pushlstring(state, output.data(), output.length()); return 1; @@ -783,7 +782,7 @@ void Filter::scriptError(const Filters::Common::Lua::LuaException& e) { response_stream_wrapper_.reset(); } -void Filter::scriptLog(spdlog::level::level_enum level, const char* message) { +void Filter::scriptLog(spdlog::level::level_enum level, absl::string_view message) { switch (level) { case spdlog::level::trace: ENVOY_LOG(trace, "script log: {}", message); diff --git a/source/extensions/filters/http/lua/lua_filter.h b/source/extensions/filters/http/lua/lua_filter.h index e779d9fdf4245..17c9338627965 100644 --- a/source/extensions/filters/http/lua/lua_filter.h +++ b/source/extensions/filters/http/lua/lua_filter.h @@ -441,7 +441,7 @@ class Filter : public Http::StreamFilter, Logger::Loggable { Upstream::ClusterManager& clusterManager() { return config_->cluster_manager_; } void scriptError(const Filters::Common::Lua::LuaException& e); - virtual void scriptLog(spdlog::level::level_enum level, const char* message); + virtual void scriptLog(spdlog::level::level_enum level, absl::string_view message); // Http::StreamFilterBase void onDestroy() override; diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index a7ae58a4f2003..8c0bd5bdc8687 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -41,7 +41,7 @@ class TestFilter : public Filter { public: using Filter::Filter; - MOCK_METHOD(void, scriptLog, (spdlog::level::level_enum level, const char* message)); + MOCK_METHOD(void, scriptLog, (spdlog::level::level_enum level, absl::string_view message)); }; class LuaHttpFilterTest : public testing::Test { @@ -818,6 +818,7 @@ TEST_F(LuaHttpFilterTest, HttpCall) { {":method", "POST"}, {":path", "/"}, {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, {"set-cookie", "variant=chewy; Path=/"}, {"content-length", "11"}}; @@ -841,7 +842,7 @@ TEST_F(LuaHttpFilterTest, HttpCall) { response_message->body().add(response, 8); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(":status 200"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("8"))); - EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("resp"))); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq(std::string("resp\0nse", 8)))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("0"))); EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("nse"))); EXPECT_CALL(decoder_callbacks_, continueDecoding()); @@ -2392,6 +2393,77 @@ TEST_F(LuaHttpFilterTest, LuaFilterSetResponseBufferChunked) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true)); } +// BodyBuffer should not truncated when bodyBuffer set hex character +TEST_F(LuaHttpFilterTest, LuaBodyBufferSetBytesWithHex) { + const std::string SCRIPT{R"EOF( + function envoy_on_response(response_handle) + local bodyBuffer = response_handle:body() + bodyBuffer:setBytes("\x471111") + local body_str = bodyBuffer:getBytes(0, bodyBuffer:length()) + response_handle:logTrace(body_str) + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->encodeHeaders(response_headers, false)); + + Buffer::OwnedImpl response_body(""); + EXPECT_CALL(*filter_, scriptLog(spdlog::level::trace, StrEq("G1111"))); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true)); + EXPECT_EQ(5, encoder_callbacks_.buffer_->length()); +} + +// BodyBuffer should not truncated when bodyBuffer set zero +TEST_F(LuaHttpFilterTest, LuaBodyBufferSetBytesWithZero) { + const std::string SCRIPT{R"EOF( + function envoy_on_response(response_handle) + local bodyBuffer = response_handle:body() + bodyBuffer:setBytes("\0") + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->encodeHeaders(response_headers, false)); + + Buffer::OwnedImpl response_body("1111"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_body, true)); + EXPECT_EQ(1, encoder_callbacks_.buffer_->length()); +} + +// Script logging a table instead of the expected string. +TEST_F(LuaHttpFilterTest, LogTableInsteadOfString) { + const std::string LOG_TABLE{R"EOF( + function envoy_on_request(request_handle) + request_handle:logTrace({}) + end + )EOF"}; + + InSequence s; + setup(LOG_TABLE); + + Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; + EXPECT_CALL( + *filter_, + scriptLog( + spdlog::level::err, + StrEq("[string \"...\"]:3: bad argument #1 to 'logTrace' (string expected, got table)"))); + EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers, true)); +} + } // namespace } // namespace Lua } // namespace HttpFilters From c65f0ca05a88a5c4fd2d21794f106770ad12388e Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Wed, 15 Sep 2021 12:18:40 -0700 Subject: [PATCH 051/121] router: add support for retry options predicate extensions (#18058) This was built for Envoy Mobile, but will allow generic modification of router behavior between retries. Currently it only supports modifying upstream socket options (to in practice impact interface binding), but in the future is likely to be extended to modify timeouts, retry back off times, request headers, etc. Signed-off-by: Matt Klein Signed-off-by: gayang --- .../config/route/v3/route_components.proto | 5 + docs/root/version_history/current.rst | 4 + envoy/router/router.h | 7 ++ envoy/upstream/cluster_manager.h | 5 + envoy/upstream/retry.h | 64 ++++++++++++- source/common/http/async_client_impl.cc | 6 +- source/common/http/async_client_impl.h | 58 ++---------- source/common/network/BUILD | 1 + .../addr_family_aware_socket_option_impl.h | 8 +- source/common/network/socket_option_impl.cc | 9 ++ source/common/network/socket_option_impl.h | 5 +- .../win32_redirect_records_option_impl.h | 2 - source/common/router/BUILD | 1 + source/common/router/config_impl.cc | 26 ++++- source/common/router/config_impl.h | 11 ++- source/common/router/router.cc | 14 +++ source/common/router/router.h | 1 + source/common/upstream/BUILD | 8 ++ source/common/upstream/cluster_manager_impl.h | 1 + source/common/upstream/retry_factory.h | 21 +++++ test/common/http/async_client_impl_test.cc | 8 +- .../common/network/socket_option_impl_test.cc | 4 + test/common/router/config_impl_test.cc | 45 ++++++++- test/common/router/router_test.cc | 34 ++++++- test/common/upstream/test_cluster_manager.h | 1 + .../http/original_src/original_src_test.cc | 14 ++- test/integration/integration_test.cc | 94 +++++++++++++++++++ test/mocks/router/mocks.h | 5 + test/mocks/upstream/BUILD | 2 + test/mocks/upstream/cluster_manager_factory.h | 5 + 30 files changed, 385 insertions(+), 84 deletions(-) create mode 100644 source/common/upstream/retry_factory.h diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 4a0b93f3601c2..ec27e627d346d 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1350,6 +1350,11 @@ message RetryPolicy { // details. repeated RetryHostPredicate retry_host_predicate = 5; + // Retry options predicates that will be applied prior to retrying a request. These predicates + // allow customizing request behavior between retries. + // [#comment: add [#extension-category: envoy.retry_options_predicates] when there are built-in extensions] + repeated core.v3.TypedExtensionConfig retry_options_predicates = 12; + // The maximum number of times host selection will be reattempted before giving up, at which // point the host that was last selected will be routed to. If unspecified, this will default to // retrying once. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index edcd675f4dfd0..c1e3b4b2c4174 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -116,6 +116,10 @@ New Features * overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. * rbac: added :ref:`destination_port_range ` for matching range of destination ports. * route config: added :ref:`dynamic_metadata ` for routing based on dynamic metadata. +* router: added retry options predicate extensions configured via + :ref:` `. These + extensions allow modification of requests between retries at the router level. There are not + currently any built-in extensions that implement this extension point. * router: added :ref:`per_try_idle_timeout ` timeout configuration. * router: added an optional :ref:`override_auto_sni_header ` to support setting SNI value from an arbitrary header other than host/authority. * sxg_filter: added filter to transform response to SXG package to :ref:`contrib images `. This can be enabled by setting :ref:`SXG ` configuration. diff --git a/envoy/router/router.h b/envoy/router/router.h index c4288f10c3d1d..b7d8f8db2f3a9 100644 --- a/envoy/router/router.h +++ b/envoy/router/router.h @@ -236,6 +236,13 @@ class RetryPolicy { */ virtual Upstream::RetryPrioritySharedPtr retryPriority() const PURE; + /** + * @return the retry options predicates for this policy. Each policy will be applied prior + * to retrying a request, allowing for request behavior to be customized. + */ + virtual absl::Span + retryOptionsPredicates() const PURE; + /** * Number of times host selection should be reattempted when selecting a host * for a retry attempt. diff --git a/envoy/upstream/cluster_manager.h b/envoy/upstream/cluster_manager.h index eeb249affd58b..e24790038d241 100644 --- a/envoy/upstream/cluster_manager.h +++ b/envoy/upstream/cluster_manager.h @@ -412,6 +412,11 @@ class ClusterManagerFactory { * Returns the secret manager. */ virtual Secret::SecretManager& secretManager() PURE; + + /** + * Returns the singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; }; /** diff --git a/envoy/upstream/retry.h b/envoy/upstream/retry.h index f772d54029179..9e1a8de57995f 100644 --- a/envoy/upstream/retry.h +++ b/envoy/upstream/retry.h @@ -1,6 +1,7 @@ #pragma once #include "envoy/config/typed_config.h" +#include "envoy/singleton/manager.h" #include "envoy/upstream/types.h" #include "envoy/upstream/upstream.h" @@ -92,13 +93,58 @@ class RetryHostPredicate { using RetryHostPredicateSharedPtr = std::shared_ptr; +/** + * A predicate that is applied prior to retrying a request. Each predicate can customize request + * behavior prior to the request being retried. + */ +class RetryOptionsPredicate { +public: + struct UpdateOptionsParameters { + // Stream info for the previous request attempt that is about to be retried. + const StreamInfo::StreamInfo& retriable_request_stream_info_; + // The current upstream socket options that were used for connection pool selection on the + // previous attempt, or the result of an updated set of options from a previously run + // retry options predicate. + Network::Socket::OptionsSharedPtr current_upstream_socket_options_; + }; + + struct UpdateOptionsReturn { + // New upstream socket options to apply to the next request attempt. If changed, will affect + // connection pool selection similar to that which was done for the initial request. + absl::optional new_upstream_socket_options_; + }; + + virtual ~RetryOptionsPredicate() = default; + + /** + * Update request options. + * @param parameters supplies the update parameters. + * @return the new options to apply. Each option is wrapped in an optional and is only applied + * if valid. + */ + virtual UpdateOptionsReturn updateOptions(const UpdateOptionsParameters& parameters) const PURE; +}; + +using RetryOptionsPredicateConstSharedPtr = std::shared_ptr; + +/** + * Context for all retry extensions. + */ +class RetryExtensionFactoryContext { +public: + virtual ~RetryExtensionFactoryContext() = default; + + /** + * @return Singleton::Manager& the server-wide singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; +}; + /** * Factory for RetryPriority. */ class RetryPriorityFactory : public Config::TypedFactory { public: - ~RetryPriorityFactory() override = default; - virtual RetryPrioritySharedPtr createRetryPriority(const Protobuf::Message& config, ProtobufMessage::ValidationVisitor& validation_visitor, @@ -112,13 +158,23 @@ class RetryPriorityFactory : public Config::TypedFactory { */ class RetryHostPredicateFactory : public Config::TypedFactory { public: - ~RetryHostPredicateFactory() override = default; - virtual RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message& config, uint32_t retry_count) PURE; std::string category() const override { return "envoy.retry_host_predicates"; } }; +/** + * Factory for RetryOptionsPredicate. + */ +class RetryOptionsPredicateFactory : public Config::TypedFactory { +public: + virtual RetryOptionsPredicateConstSharedPtr + createOptionsPredicate(const Protobuf::Message& config, + RetryExtensionFactoryContext& context) PURE; + + std::string category() const override { return "envoy.retry_options_predicates"; } +}; + } // namespace Upstream } // namespace Envoy diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index f913103750301..2c1da999f255b 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -42,7 +42,7 @@ AsyncClientImpl::AsyncClientImpl(Upstream::ClusterInfoConstSharedPtr cluster, config_(http_context.asyncClientStatPrefix(), local_info, stats_store, cm, runtime, random, std::move(shadow_writer), true, false, false, false, false, {}, dispatcher.timeSource(), http_context, router_context), - dispatcher_(dispatcher) {} + dispatcher_(dispatcher), singleton_manager_(cm.clusterManagerFactory().singletonManager()) {} AsyncClientImpl::~AsyncClientImpl() { while (!active_streams_.empty()) { @@ -81,8 +81,8 @@ AsyncStreamImpl::AsyncStreamImpl(AsyncClientImpl& parent, AsyncClient::StreamCal router_(parent.config_), stream_info_(Protocol::Http11, parent.dispatcher().timeSource(), nullptr), tracing_config_(Tracing::EgressConfig::get()), - route_(std::make_shared(parent_.cluster_->name(), options.timeout, - options.hash_policy, options.retry_policy)), + route_(std::make_shared(parent_, options.timeout, options.hash_policy, + options.retry_policy)), send_xff_(options.send_xff) { stream_info_.dynamicMetadata().MergeFrom(options.metadata); diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 27ea7b3abea3d..608813cc01722 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -40,6 +40,7 @@ #include "source/common/router/router.h" #include "source/common/stream_info/stream_info_impl.h" #include "source/common/tracing/http_tracer_impl.h" +#include "source/common/upstream/retry_factory.h" namespace Envoy { namespace Http { @@ -67,6 +68,7 @@ class AsyncClientImpl final : public AsyncClient { Router::FilterConfig config_; Event::Dispatcher& dispatcher_; std::list> active_streams_; + Singleton::Manager& singleton_manager_; friend class AsyncStreamImpl; friend class AsyncRequestImpl; @@ -124,48 +126,6 @@ class AsyncStreamImpl : public AsyncClient::Stream, rate_limit_policy_entry_; }; - struct NullRetryPolicy : public Router::RetryPolicy { - // Router::RetryPolicy - std::chrono::milliseconds perTryTimeout() const override { - return std::chrono::milliseconds(0); - } - std::chrono::milliseconds perTryIdleTimeout() const override { - return std::chrono::milliseconds(0); - } - std::vector retryHostPredicates() const override { - return {}; - } - Upstream::RetryPrioritySharedPtr retryPriority() const override { return {}; } - - uint32_t hostSelectionMaxAttempts() const override { return 1; } - uint32_t numRetries() const override { return 1; } - uint32_t retryOn() const override { return 0; } - const std::vector& retriableStatusCodes() const override { - return retriable_status_codes_; - } - const std::vector& retriableHeaders() const override { - return retriable_headers_; - } - const std::vector& retriableRequestHeaders() const override { - return retriable_request_headers_; - } - absl::optional baseInterval() const override { - return absl::nullopt; - } - absl::optional maxInterval() const override { return absl::nullopt; } - const std::vector& resetHeaders() const override { - return reset_headers_; - } - std::chrono::milliseconds resetMaxInterval() const override { - return std::chrono::milliseconds(300000); - } - - const std::vector retriable_status_codes_{}; - const std::vector retriable_headers_{}; - const std::vector retriable_request_headers_{}; - const std::vector reset_headers_{}; - }; - struct NullConfig : public Router::Config { Router::RouteConstSharedPtr route(const Http::RequestHeaderMap&, const StreamInfo::StreamInfo&, uint64_t) const override { @@ -211,20 +171,21 @@ class AsyncStreamImpl : public AsyncClient::Stream, struct RouteEntryImpl : public Router::RouteEntry { RouteEntryImpl( - const std::string& cluster_name, const absl::optional& timeout, + AsyncClientImpl& parent, const absl::optional& timeout, const Protobuf::RepeatedPtrField& hash_policy, const absl::optional& retry_policy) - : cluster_name_(cluster_name), timeout_(timeout) { + : cluster_name_(parent.cluster_->name()), timeout_(timeout) { if (!hash_policy.empty()) { hash_policy_ = std::make_unique(hash_policy); } if (retry_policy.has_value()) { // ProtobufMessage::getStrictValidationVisitor() ? how often do we do this? + Upstream::RetryExtensionFactoryContextImpl factory_context(parent.singleton_manager_); retry_policy_ = std::make_unique( - retry_policy.value(), ProtobufMessage::getNullValidationVisitor()); + retry_policy.value(), ProtobufMessage::getNullValidationVisitor(), factory_context); } else { - retry_policy_ = std::make_unique(); + retry_policy_ = std::make_unique(); } } @@ -330,12 +291,11 @@ class AsyncStreamImpl : public AsyncClient::Stream, }; struct RouteImpl : public Router::Route { - RouteImpl(const std::string& cluster_name, - const absl::optional& timeout, + RouteImpl(AsyncClientImpl& parent, const absl::optional& timeout, const Protobuf::RepeatedPtrField& hash_policy, const absl::optional& retry_policy) - : route_entry_(cluster_name, timeout, hash_policy, retry_policy), typed_metadata_({}) {} + : route_entry_(parent, timeout, hash_policy, retry_policy), typed_metadata_({}) {} // Router::Route const Router::DirectResponseEntry* directResponseEntry() const override { return nullptr; } diff --git a/source/common/network/BUILD b/source/common/network/BUILD index 6d85e13184c8a..114142e7040a8 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -343,6 +343,7 @@ envoy_cc_library( "//source/common/api:os_sys_calls_lib", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", + "//source/common/common:scalar_to_byte_vector_lib", "//source/common/common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/common/network/addr_family_aware_socket_option_impl.h b/source/common/network/addr_family_aware_socket_option_impl.h index 749788a4d9fa4..85a1075371eac 100644 --- a/source/common/network/addr_family_aware_socket_option_impl.h +++ b/source/common/network/addr_family_aware_socket_option_impl.h @@ -24,9 +24,11 @@ class AddrFamilyAwareSocketOptionImpl : public Socket::Option, // Socket::Option bool setOption(Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; - // The common socket options don't require a hash key. - void hashKey(std::vector&) const override {} - + void hashKey(std::vector& hash_key) const override { + // Add both sub-options to the hash. + ipv4_option_->hashKey(hash_key); + ipv6_option_->hashKey(hash_key); + } absl::optional
getOptionDetails(const Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; diff --git a/source/common/network/socket_option_impl.cc b/source/common/network/socket_option_impl.cc index 929979c8fa128..ba9ff7362dc6d 100644 --- a/source/common/network/socket_option_impl.cc +++ b/source/common/network/socket_option_impl.cc @@ -5,6 +5,7 @@ #include "source/common/api/os_sys_calls_impl.h" #include "source/common/common/assert.h" +#include "source/common/common/scalar_to_byte_vector.h" #include "source/common/common/utility.h" #include "source/common/network/address_impl.h" @@ -32,6 +33,14 @@ bool SocketOptionImpl::setOption(Socket& socket, return true; } +void SocketOptionImpl::hashKey(std::vector& hash_key) const { + if (optname_.hasValue()) { + pushScalarToByteVector(optname_.level(), hash_key); + pushScalarToByteVector(optname_.option(), hash_key); + hash_key.insert(hash_key.end(), value_.begin(), value_.end()); + } +} + absl::optional SocketOptionImpl::getOptionDetails(const Socket&, envoy::config::core::v3::SocketOption::SocketState state) const { diff --git a/source/common/network/socket_option_impl.h b/source/common/network/socket_option_impl.h index 4c47dde2c08a7..fd42517c7bd90 100644 --- a/source/common/network/socket_option_impl.h +++ b/source/common/network/socket_option_impl.h @@ -134,10 +134,7 @@ class SocketOptionImpl : public Socket::Option, Logger::Loggable&) const override {} - + void hashKey(std::vector& hash_key) const override; absl::optional
getOptionDetails(const Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; diff --git a/source/common/network/win32_redirect_records_option_impl.h b/source/common/network/win32_redirect_records_option_impl.h index 3fbe5f6fc5855..efa88048d9705 100644 --- a/source/common/network/win32_redirect_records_option_impl.h +++ b/source/common/network/win32_redirect_records_option_impl.h @@ -20,8 +20,6 @@ class Win32RedirectRecordsOptionImpl : public Socket::Option, // Socket::Option bool setOption(Socket& socket, envoy::config::core::v3::SocketOption::SocketState state) const override; - - // The common socket options don't require a hash key. void hashKey(std::vector&) const override; absl::optional
diff --git a/source/common/router/BUILD b/source/common/router/BUILD index ffea1d8aa65e4..4e0bbe9145706 100644 --- a/source/common/router/BUILD +++ b/source/common/router/BUILD @@ -63,6 +63,7 @@ envoy_cc_library( "//source/common/http:utility_lib", "//source/common/protobuf:utility_lib", "//source/common/tracing:http_tracer_lib", + "//source/common/upstream:retry_factory_lib", "//source/extensions/filters/http/common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index e71f5171a427c..adf6b70edeaba 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -37,6 +37,7 @@ #include "source/common/router/retry_state_impl.h" #include "source/common/runtime/runtime_features.h" #include "source/common/tracing/http_tracer_impl.h" +#include "source/common/upstream/retry_factory.h" #include "source/extensions/filters/http/common/utility.h" #include "absl/strings/match.h" @@ -87,7 +88,8 @@ HedgePolicyImpl::HedgePolicyImpl(const envoy::config::route::v3::HedgePolicy& he HedgePolicyImpl::HedgePolicyImpl() : initial_requests_(1), hedge_on_per_try_timeout_(false) {} RetryPolicyImpl::RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& retry_policy, - ProtobufMessage::ValidationVisitor& validation_visitor) + ProtobufMessage::ValidationVisitor& validation_visitor, + Upstream::RetryExtensionFactoryContext& factory_context) : retriable_headers_( Http::HeaderUtility::buildHeaderMatcherVector(retry_policy.retriable_headers())), retriable_request_headers_( @@ -118,6 +120,16 @@ RetryPolicyImpl::RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& re retry_priority, validation_visitor, factory)); } + for (const auto& options_predicate : retry_policy.retry_options_predicates()) { + auto& factory = + Envoy::Config::Utility::getAndCheckFactory( + options_predicate); + retry_options_predicates_.emplace_back( + factory.createOptionsPredicate(*Envoy::Config::Utility::translateToFactoryConfig( + options_predicate, validation_visitor, factory), + factory_context)); + } + auto host_selection_attempts = retry_policy.host_selection_retry_max_attempts(); if (host_selection_attempts) { host_selection_attempts_ = host_selection_attempts; @@ -350,7 +362,8 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, prefix_rewrite_redirect_(route.redirect().prefix_rewrite()), strip_query_(route.redirect().strip_query()), hedge_policy_(buildHedgePolicy(vhost.hedgePolicy(), route.route())), - retry_policy_(buildRetryPolicy(vhost.retryPolicy(), route.route(), validator)), + retry_policy_( + buildRetryPolicy(vhost.retryPolicy(), route.route(), validator, factory_context)), internal_redirect_policy_( buildInternalRedirectPolicy(route.route(), validator, route.name())), rate_limit_policy_(route.route().rate_limits(), validator), @@ -893,15 +906,18 @@ HedgePolicyImpl RouteEntryImplBase::buildHedgePolicy( RetryPolicyImpl RouteEntryImplBase::buildRetryPolicy( const absl::optional& vhost_retry_policy, const envoy::config::route::v3::RouteAction& route_config, - ProtobufMessage::ValidationVisitor& validation_visitor) const { + ProtobufMessage::ValidationVisitor& validation_visitor, + Server::Configuration::ServerFactoryContext& factory_context) const { + Upstream::RetryExtensionFactoryContextImpl retry_factory_context( + factory_context.singletonManager()); // Route specific policy wins, if available. if (route_config.has_retry_policy()) { - return RetryPolicyImpl(route_config.retry_policy(), validation_visitor); + return RetryPolicyImpl(route_config.retry_policy(), validation_visitor, retry_factory_context); } // If not, we fallback to the virtual host policy if there is one. if (vhost_retry_policy) { - return RetryPolicyImpl(vhost_retry_policy.value(), validation_visitor); + return RetryPolicyImpl(vhost_retry_policy.value(), validation_visitor, retry_factory_context); } // Otherwise, an empty policy will do. diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index cc7a9a3dc5497..6688b14a548cd 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -292,7 +292,8 @@ class RetryPolicyImpl : public RetryPolicy { public: RetryPolicyImpl(const envoy::config::route::v3::RetryPolicy& retry_policy, - ProtobufMessage::ValidationVisitor& validation_visitor); + ProtobufMessage::ValidationVisitor& validation_visitor, + Upstream::RetryExtensionFactoryContext& factory_context); RetryPolicyImpl() = default; // Router::RetryPolicy @@ -302,6 +303,10 @@ class RetryPolicyImpl : public RetryPolicy { uint32_t retryOn() const override { return retry_on_; } std::vector retryHostPredicates() const override; Upstream::RetryPrioritySharedPtr retryPriority() const override; + absl::Span + retryOptionsPredicates() const override { + return retry_options_predicates_; + } uint32_t hostSelectionMaxAttempts() const override { return host_selection_attempts_; } const std::vector& retriableStatusCodes() const override { return retriable_status_codes_; @@ -344,6 +349,7 @@ class RetryPolicyImpl : public RetryPolicy { std::vector reset_headers_{}; std::chrono::milliseconds reset_max_interval_{300000}; ProtobufMessage::ValidationVisitor* validation_visitor_{}; + std::vector retry_options_predicates_; }; /** @@ -849,7 +855,8 @@ class RouteEntryImplBase : public RouteEntry, RetryPolicyImpl buildRetryPolicy(const absl::optional& vhost_retry_policy, const envoy::config::route::v3::RouteAction& route_config, - ProtobufMessage::ValidationVisitor& validation_visitor) const; + ProtobufMessage::ValidationVisitor& validation_visitor, + Server::Configuration::ServerFactoryContext& factory_context) const; InternalRedirectPolicyImpl buildInternalRedirectPolicy(const envoy::config::route::v3::RouteAction& route_config, diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 8e3178738e411..054b6a6858ecd 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -953,6 +953,7 @@ void Filter::onSoftPerTryTimeout(UpstreamRequest& upstream_request) { retry_state_->shouldHedgeRetryPerTryTimeout([this]() -> void { doRetry(); }); if (retry_status == RetryStatus::Yes) { + runRetryOptionsPredicates(upstream_request); pending_retries_++; // Don't increment upstream_host->stats().rq_error_ here, we'll do that @@ -1103,6 +1104,7 @@ bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason, const RetryStatus retry_status = retry_state_->shouldRetryReset(reset_reason, [this]() -> void { doRetry(); }); if (retry_status == RetryStatus::Yes) { + runRetryOptionsPredicates(upstream_request); pending_retries_++; if (upstream_request.upstreamHost()) { @@ -1320,6 +1322,7 @@ void Filter::onUpstreamHeaders(uint64_t response_code, Http::ResponseHeaderMapPt const RetryStatus retry_status = retry_state_->shouldRetryHeaders(*headers, [this]() -> void { doRetry(); }); if (retry_status == RetryStatus::Yes) { + runRetryOptionsPredicates(upstream_request); pending_retries_++; upstream_request.upstreamHost()->stats().rq_error_.inc(); Http::CodeStats& code_stats = httpContext().codeStats(); @@ -1651,6 +1654,17 @@ bool Filter::convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& do return true; } +void Filter::runRetryOptionsPredicates(UpstreamRequest& retriable_request) { + for (const auto& options_predicate : route_entry_->retryPolicy().retryOptionsPredicates()) { + const Upstream::RetryOptionsPredicate::UpdateOptionsParameters parameters{ + retriable_request.streamInfo(), upstreamSocketOptions()}; + auto ret = options_predicate->updateOptions(parameters); + if (ret.new_upstream_socket_options_.has_value()) { + upstream_options_ = ret.new_upstream_socket_options_.value(); + } + } +} + void Filter::doRetry() { ENVOY_STREAM_LOG(debug, "performing retry", *callbacks_); diff --git a/source/common/router/router.h b/source/common/router/router.h index 2e3a21eacda90..e9d0234aed5d0 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -524,6 +524,7 @@ class Filter : Logger::Loggable, void updateOutlierDetection(Upstream::Outlier::Result result, UpstreamRequest& upstream_request, absl::optional code); void doRetry(); + void runRetryOptionsPredicates(UpstreamRequest& retriable_request); // Called immediately after a non-5xx header is received from upstream, performs stats accounting // and handle difference between gRPC and non-gRPC requests. void handleNon5xxResponseHeaders(absl::optional grpc_status, diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index cca323285187e..eeed927f16837 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -106,6 +106,14 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "retry_factory_lib", + hdrs = ["retry_factory.h"], + deps = [ + "//envoy/upstream:retry_interface", + ], +) + envoy_cc_library( name = "conn_pool_map", hdrs = ["conn_pool_map.h"], diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index ce5480b1ea920..f96b417d9ae61 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -88,6 +88,7 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { const xds::core::v3::ResourceLocator* cds_resources_locator, ClusterManager& cm) override; Secret::SecretManager& secretManager() override { return secret_manager_; } + Singleton::Manager& singletonManager() override { return singleton_manager_; } protected: Event::Dispatcher& main_thread_dispatcher_; diff --git a/source/common/upstream/retry_factory.h b/source/common/upstream/retry_factory.h new file mode 100644 index 0000000000000..7c335116cb663 --- /dev/null +++ b/source/common/upstream/retry_factory.h @@ -0,0 +1,21 @@ +#pragma once + +#include "envoy/upstream/retry.h" + +namespace Envoy { +namespace Upstream { + +class RetryExtensionFactoryContextImpl : public Upstream::RetryExtensionFactoryContext { +public: + RetryExtensionFactoryContextImpl(Singleton::Manager& singleton_manager) + : singleton_manager_(singleton_manager) {} + + // Upstream::RetryOptionsPredicateFactoryContext + Singleton::Manager& singletonManager() override { return singleton_manager_; } + +private: + Singleton::Manager& singleton_manager_; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index 5728e0fa31ada..28d78009c2fdb 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -1544,10 +1544,10 @@ TEST_F(AsyncClientImplTest, DumpState) { } // namespace // Must not be in anonymous namespace for friend to work. -class AsyncClientImplUnitTest : public testing::Test { +class AsyncClientImplUnitTest : public AsyncClientImplTest { public: std::unique_ptr route_impl_{new AsyncStreamImpl::RouteImpl( - "foo", absl::nullopt, + client_, absl::nullopt, Protobuf::RepeatedPtrField(), absl::nullopt)}; AsyncStreamImpl::NullVirtualHost vhost_; @@ -1559,7 +1559,7 @@ class AsyncClientImplUnitTest : public testing::Test { TestUtility::loadFromYaml(yaml_config, retry_policy); route_impl_ = std::make_unique( - "foo", absl::nullopt, + client_, absl::nullopt, Protobuf::RepeatedPtrField(), std::move(retry_policy)); } @@ -1567,7 +1567,6 @@ class AsyncClientImplUnitTest : public testing::Test { // Test the extended fake route that AsyncClient uses. TEST_F(AsyncClientImplUnitTest, NullRouteImplInitTest) { - auto& route_entry = *(route_impl_->routeEntry()); EXPECT_EQ(nullptr, route_impl_->decorator()); @@ -1598,7 +1597,6 @@ TEST_F(AsyncClientImplUnitTest, NullRouteImplInitTest) { } TEST_F(AsyncClientImplUnitTest, RouteImplInitTestWithRetryPolicy) { - const std::string yaml = R"EOF( per_try_timeout: 30s num_retries: 10 diff --git a/test/common/network/socket_option_impl_test.cc b/test/common/network/socket_option_impl_test.cc index c2736caed50d9..6979b7b7f3fb9 100644 --- a/test/common/network/socket_option_impl_test.cc +++ b/test/common/network/socket_option_impl_test.cc @@ -41,6 +41,10 @@ TEST_F(SocketOptionImplTest, HasName) { EXPECT_LOG_CONTAINS( "warning", "Setting SOL_SOCKET/SO_SNDBUF option on socket failed", socket_option.setOption(socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND)); + + std::vector hash_key; + socket_option.hashKey(hash_key); + EXPECT_FALSE(hash_key.empty()); } TEST_F(SocketOptionImplTest, SetOptionSuccessTrue) { diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index ccf64a29195ff..9961f532b698c 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -3401,25 +3401,56 @@ TEST_F(RouteMatcherTest, Retry) { .retryOn()); } +class TestRetryOptionsPredicateFactory : public Upstream::RetryOptionsPredicateFactory { +public: + Upstream::RetryOptionsPredicateConstSharedPtr + createOptionsPredicate(const Protobuf::Message&, + Upstream::RetryExtensionFactoryContext&) override { + return nullptr; + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom empty config proto. This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } + + std::string name() const override { return "test_retry_options_predicate_factory"; } +}; + TEST_F(RouteMatcherTest, RetryVirtualHostLevel) { const std::string yaml = R"EOF( virtual_hosts: - domains: [www.lyft.com] per_request_buffer_limit_bytes: 8 name: www - retry_policy: {num_retries: 3, per_try_timeout: 1s, retry_on: '5xx,gateway-error,connect-failure,reset'} + retry_policy: + num_retries: 3 + per_try_timeout: 1s + retry_on: '5xx,gateway-error,connect-failure,reset' + retry_options_predicates: + - name: test_retry_options_predicate_factory + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct routes: - match: {prefix: /foo} per_request_buffer_limit_bytes: 7 route: cluster: www - retry_policy: {retry_on: connect-failure} + retry_policy: + retry_on: connect-failure + retry_options_predicates: + - name: test_retry_options_predicate_factory + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct - match: {prefix: /bar} route: {cluster: www} - match: {prefix: /} route: {cluster: www} )EOF"; + TestRetryOptionsPredicateFactory factory; + Registry::InjectFactory registered(factory); + factory_context_.cluster_manager_.initializeClusters({"www"}, {}); TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); @@ -3441,6 +3472,11 @@ TEST_F(RouteMatcherTest, RetryVirtualHostLevel) { EXPECT_EQ(7U, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) ->routeEntry() ->retryShadowBufferLimit()); + EXPECT_EQ(1U, config.route(genHeaders("www.lyft.com", "/foo", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .retryOptionsPredicates() + .size()); // Virtual Host level retry policy kicks in. EXPECT_EQ(std::chrono::milliseconds(1000), @@ -3476,6 +3512,11 @@ TEST_F(RouteMatcherTest, RetryVirtualHostLevel) { EXPECT_EQ(8U, config.route(genHeaders("www.lyft.com", "/", "GET"), 0) ->routeEntry() ->retryShadowBufferLimit()); + EXPECT_EQ(1U, config.route(genHeaders("www.lyft.com", "/", "GET"), 0) + ->routeEntry() + ->retryPolicy() + .retryOptionsPredicates() + .size()); } TEST_F(RouteMatcherTest, GrpcRetry) { diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 9bf90e943e9a6..1ff5661d20e03 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -911,7 +911,18 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestNotOverwritten) { /* expected_count */ 123); } +class MockRetryOptionsPredicate : public Upstream::RetryOptionsPredicate { +public: + MOCK_METHOD(UpdateOptionsReturn, updateOptions, (const UpdateOptionsParameters& parameters), + (const)); +}; + +// Also verify retry options predicates work. TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { + auto retry_options_predicate = std::make_shared(); + callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back( + retry_options_predicate); + setIncludeAttemptCountInRequest(true); NiceMock encoder1; @@ -938,13 +949,21 @@ TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) { // 5xx response. router_.retry_state_->expectHeadersRetry(); + Upstream::RetryOptionsPredicate::UpdateOptionsReturn update_options_return{ + std::make_shared()}; + EXPECT_CALL(*retry_options_predicate, updateOptions(_)).WillOnce(Return(update_options_return)); Http::ResponseHeaderMapPtr response_headers1( new Http::TestResponseHeaderMapImpl{{":status", "503"}}); EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(503)); + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) response_decoder->decodeHeaders(std::move(response_headers1), true); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + // Verify retry options predicate return values have been updated. + EXPECT_EQ(update_options_return.new_upstream_socket_options_.value(), + router_.upstreamSocketOptions()); + // We expect the 5xx response to kick off a new request. EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0); NiceMock encoder2; @@ -2260,8 +2279,12 @@ TEST_F(RouterTest, UpstreamPerTryTimeoutExcludesNewStream) { // Tests that a retry is sent after the first request hits the per try timeout, but then // headers received in response to the first request are still used (and the 2nd request -// canceled). +// canceled). Also verify retry options predicates work. TEST_F(RouterTest, HedgedPerTryTimeoutFirstRequestSucceeds) { + auto retry_options_predicate = std::make_shared(); + callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back( + retry_options_predicate); + enableHedgeOnPerTryTimeout(); NiceMock encoder1; @@ -2296,6 +2319,7 @@ TEST_F(RouterTest, HedgedPerTryTimeoutFirstRequestSucceeds) { NiceMock encoder2; Http::ResponseDecoder* response_decoder2 = nullptr; router_.retry_state_->expectHedgedPerTryTimeoutRetry(); + EXPECT_CALL(*retry_options_predicate, updateOptions(_)); per_try_timeout_->invokeCallback(); EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) @@ -2714,8 +2738,12 @@ TEST_F(RouterTest, BadHeadersDroppedIfPreviousRetryScheduled) { } // Test retrying a request, when the first attempt fails before the client -// has sent any of the body. +// has sent any of the body. Also verify retry options predicates work. TEST_F(RouterTest, RetryRequestBeforeBody) { + auto retry_options_predicate = std::make_shared(); + callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back( + retry_options_predicate); + NiceMock encoder1; Http::ResponseDecoder* response_decoder = nullptr; EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) @@ -2735,6 +2763,7 @@ TEST_F(RouterTest, RetryRequestBeforeBody) { router_.decodeHeaders(headers, false); router_.retry_state_->expectResetRetry(); + EXPECT_CALL(*retry_options_predicate, updateOptions(_)); encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset); NiceMock encoder2; @@ -2766,6 +2795,7 @@ TEST_F(RouterTest, RetryRequestBeforeBody) { .WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void { EXPECT_EQ(headers.Status()->value(), "200"); })); + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) response_decoder->decodeHeaders(std::move(response_headers), true); EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); } diff --git a/test/common/upstream/test_cluster_manager.h b/test/common/upstream/test_cluster_manager.h index 463f4fb2a9222..8bc91e7ac8772 100644 --- a/test/common/upstream/test_cluster_manager.h +++ b/test/common/upstream/test_cluster_manager.h @@ -115,6 +115,7 @@ class TestClusterManagerFactory : public ClusterManagerFactory { } Secret::SecretManager& secretManager() override { return secret_manager_; } + Singleton::Manager& singletonManager() override { return singleton_manager_; } MOCK_METHOD(ClusterManager*, clusterManagerFromProto_, (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); diff --git a/test/extensions/filters/http/original_src/original_src_test.cc b/test/extensions/filters/http/original_src/original_src_test.cc index 5839baa88b9e5..a26db42297c79 100644 --- a/test/extensions/filters/http/original_src/original_src_test.cc +++ b/test/extensions/filters/http/original_src/original_src_test.cc @@ -110,9 +110,17 @@ TEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressUsesCorrectAddress) { option->hashKey(key); } - std::vector expected_key = {1, 2, 3, 4}; - - EXPECT_EQ(key, expected_key); + // The first part of the hash is the address. Then come the other options. On Windows there are + // is only the single option. On other platforms there are more that get hashed. + EXPECT_EQ(key[0], 1); + EXPECT_EQ(key[1], 2); + EXPECT_EQ(key[2], 3); + EXPECT_EQ(key[3], 4); +#ifndef WIN32 + EXPECT_GT(key.size(), 4); +#else + EXPECT_EQ(key.size(), 4); +#endif } TEST_F(OriginalSrcHttpTest, DecodeHeadersIpv4AddressBleachesPort) { diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 794a49adda065..cc004f0a616f9 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -9,6 +9,7 @@ #include "source/common/http/header_map_impl.h" #include "source/common/http/headers.h" +#include "source/common/network/socket_option_factory.h" #include "source/common/network/socket_option_impl.h" #include "source/common/network/utility.h" #include "source/common/protobuf/utility.h" @@ -20,6 +21,7 @@ #include "test/mocks/http/mocks.h" #include "test/test_common/network_utility.h" #include "test/test_common/printers.h" +#include "test/test_common/registry.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -2097,6 +2099,98 @@ TEST_P(IntegrationTest, RandomPreconnect) { } } +class TestRetryOptionsPredicateFactory : public Upstream::RetryOptionsPredicateFactory { +public: + Upstream::RetryOptionsPredicateConstSharedPtr + createOptionsPredicate(const Protobuf::Message&, + Upstream::RetryExtensionFactoryContext&) override { + return std::make_shared(); + } + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom empty config proto. This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } + + std::string name() const override { return "test_retry_options_predicate_factory"; } + +private: + struct TestPredicate : public Upstream::RetryOptionsPredicate { + UpdateOptionsReturn updateOptions(const UpdateOptionsParameters&) const override { + UpdateOptionsReturn ret; + Network::TcpKeepaliveConfig tcp_keepalive_config; + tcp_keepalive_config.keepalive_probes_ = 1; + tcp_keepalive_config.keepalive_time_ = 1; + tcp_keepalive_config.keepalive_interval_ = 1; + ret.new_upstream_socket_options_ = + Network::SocketOptionFactory::buildTcpKeepaliveOptions(tcp_keepalive_config); + return ret; + } + }; +}; + +// Verify that a test retry options predicate starts a new connection pool with a new connection. +TEST_P(IntegrationTest, RetryOptionsPredicate) { + TestRetryOptionsPredicateFactory factory; + Registry::InjectFactory registered(factory); + + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* route_config = hcm.mutable_route_config(); + auto* virtual_host = route_config->mutable_virtual_hosts(0); + auto* route = virtual_host->mutable_routes(0)->mutable_route(); + auto* retry_policy = route->mutable_retry_policy(); + retry_policy->set_retry_on("5xx"); + auto* predicate = retry_policy->add_retry_options_predicates(); + predicate->set_name("test_retry_options_predicate_factory"); + predicate->mutable_typed_config()->set_type_url( + "type.googleapis.com/google.protobuf.Struct"); + }); + + initialize(); + + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, + {":path", "/some/path"}, + {":scheme", "http"}, + {":authority", "cluster_0"}, + }; + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + AssertionResult result = + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_); + RELEASE_ASSERT(result, result.message()); + result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_); + RELEASE_ASSERT(result, result.message()); + result = upstream_request_->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + + // Force a retry and run the predicate + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "503"}}, true); + + // Using a different socket option will cause a new connection pool to be used and a new + // connection. + FakeHttpConnectionPtr new_upstream_connection; + FakeStreamPtr new_upstream_request; + result = fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, new_upstream_connection); + RELEASE_ASSERT(result, result.message()); + result = new_upstream_connection->waitForNewStream(*dispatcher_, new_upstream_request); + RELEASE_ASSERT(result, result.message()); + result = new_upstream_request->waitForEndStream(*dispatcher_); + RELEASE_ASSERT(result, result.message()); + + new_upstream_request->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + result = response->waitForEndStream(); + RELEASE_ASSERT(result, result.message()); + + result = new_upstream_connection->close(); + RELEASE_ASSERT(result, result.message()); + result = new_upstream_connection->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); +} + // Tests that a filter (set-route-filter) using the setRoute callback and DelegatingRoute mechanism // successfully overrides the cached route, and subsequently, the request's upstream cluster // selection. diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 1bf7e452ed56b..e9ccd1de05a2e 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -109,6 +109,10 @@ class TestRetryPolicy : public RetryPolicy { uint32_t retryOn() const override { return retry_on_; } MOCK_METHOD(std::vector, retryHostPredicates, (), (const)); MOCK_METHOD(Upstream::RetryPrioritySharedPtr, retryPriority, (), (const)); + absl::Span + retryOptionsPredicates() const override { + return retry_options_predicates_; + } uint32_t hostSelectionMaxAttempts() const override { return host_selection_max_attempts_; } const std::vector& retriableStatusCodes() const override { return retriable_status_codes_; @@ -139,6 +143,7 @@ class TestRetryPolicy : public RetryPolicy { absl::optional max_interval_{}; std::vector reset_headers_{}; std::chrono::milliseconds reset_max_interval_{300000}; + std::vector retry_options_predicates_; }; class MockInternalRedirectPolicy : public InternalRedirectPolicy { diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index e7fcdef38adcf..e6c41713f5458 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -224,7 +224,9 @@ envoy_cc_mock( hdrs = ["cluster_manager_factory.h"], deps = [ "//envoy/upstream:cluster_manager_interface", + "//source/common/singleton:manager_impl_lib", "//test/mocks/secret:secret_mocks", + "//test/test_common:thread_factory_for_test_lib", ], ) diff --git a/test/mocks/upstream/cluster_manager_factory.h b/test/mocks/upstream/cluster_manager_factory.h index a9354c97998ed..b4328b31beb12 100644 --- a/test/mocks/upstream/cluster_manager_factory.h +++ b/test/mocks/upstream/cluster_manager_factory.h @@ -2,7 +2,10 @@ #include "envoy/upstream/cluster_manager.h" +#include "source/common/singleton/manager_impl.h" + #include "test/mocks/secret/mocks.h" +#include "test/test_common/thread_factory_for_test.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -16,6 +19,7 @@ class MockClusterManagerFactory : public ClusterManagerFactory { ~MockClusterManagerFactory() override; Secret::MockSecretManager& secretManager() override { return secret_manager_; }; + Singleton::Manager& singletonManager() override { return singleton_manager_; } MOCK_METHOD(ClusterManagerPtr, clusterManagerFromProto, (const envoy::config::bootstrap::v3::Bootstrap& bootstrap)); @@ -44,6 +48,7 @@ class MockClusterManagerFactory : public ClusterManagerFactory { private: NiceMock secret_manager_; + Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; }; } // namespace Upstream } // namespace Envoy From 79d470a44996f258bfe5e131e18731a61489865f Mon Sep 17 00:00:00 2001 From: phlax Date: Wed, 15 Sep 2021 20:23:03 +0100 Subject: [PATCH 052/121] tooling: Use upstream checkers (#18087) Signed-off-by: Ryan Northey Signed-off-by: gayang --- ci/do_ci.sh | 2 +- ci/format_pre.sh | 2 +- tools/base/requirements.in | 2 + tools/base/requirements.txt | 71 ++-- tools/code_format/BUILD | 15 +- tools/code_format/python_check.py | 140 ++----- tools/code_format/tests/test_python_check.py | 384 ------------------- tools/dependency/BUILD | 11 +- tools/dependency/pip_check.py | 103 ++--- tools/dependency/tests/test_pip_check.py | 195 ---------- 10 files changed, 119 insertions(+), 806 deletions(-) delete mode 100644 tools/code_format/tests/test_python_check.py delete mode 100644 tools/dependency/tests/test_pip_check.py diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 7f10e6612ea02..7e1350525a603 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -459,7 +459,7 @@ elif [[ "$CI_TARGET" == "deps" ]]; then "${ENVOY_SRCDIR}"/ci/check_repository_locations.sh # Run pip requirements tests - bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:pip_check "${ENVOY_SRCDIR}" + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:pip_check exit 0 elif [[ "$CI_TARGET" == "cve_scan" ]]; then diff --git a/ci/format_pre.sh b/ci/format_pre.sh index 831e57ca4a298..08808386b16b2 100755 --- a/ci/format_pre.sh +++ b/ci/format_pre.sh @@ -53,7 +53,7 @@ CURRENT=configs bazel run "${BAZEL_BUILD_OPTIONS[@]}" //configs:example_configs_validation CURRENT=python -bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:python_check -- --diff-file="$DIFF_OUTPUT" --fix "$(pwd)" +bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:python_check -- --diff-file="$DIFF_OUTPUT" --fix CURRENT=extensions bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/extensions:extensions_check diff --git a/tools/base/requirements.in b/tools/base/requirements.in index 8ec1dbb9be567..b12ec7e8bc1b1 100644 --- a/tools/base/requirements.in +++ b/tools/base/requirements.in @@ -3,6 +3,8 @@ colorama coloredlogs coverage envoy.base.utils +envoy.code_format.python_check>=0.0.4 +envoy.dependency.pip_check>=0.0.4 envoy.distribution.release envoy.distribution.verify envoy.gpg.sign diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index 06e91962896a6..9e737332415a1 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile --allow-unsafe --generate-hashes tools/base/requirements.in +# pip-compile --allow-unsafe --generate-hashes requirements.in # abstracts==0.0.12 \ --hash=sha256:acc01ff56c8a05fb88150dff62e295f9071fc33388c42f1dfc2787a8d1c755ff @@ -10,6 +10,8 @@ abstracts==0.0.12 \ # aio.functional # envoy.abstract.command # envoy.base.utils + # envoy.code-format.python-check + # envoy.dependency.pip-check # envoy.github.abstract # envoy.github.release aio.functional==0.0.9 \ @@ -22,9 +24,13 @@ aio.functional==0.0.9 \ aio.stream==0.0.2 \ --hash=sha256:6f5baaff48f6319db134cd56c06ccf89db1f7c5f67a26382e081efc96f2f675d # via envoy.github.release +aio.subprocess==0.0.4 \ + --hash=sha256:fd504a7c02423c40fde19ad87b62932b9eaa091f5a22d26b89b452059a728750 + # via envoy.code-format.python-check aio.tasks==0.0.4 \ --hash=sha256:9abd4b0881edb292c4f91a2f63b1dea7a9829a4bd4e8440225a1a412a90461fc # via + # envoy.code-format.python-check # envoy.github.abstract # envoy.github.release aiodocker==0.21.0 \ @@ -263,6 +269,8 @@ envoy.abstract.command==0.0.3 \ envoy.base.checker==0.0.2 \ --hash=sha256:2ac81efa20fd01fff644ff7dc7fadeac1c3e4dbb6210881ac7a7919ec0e048d8 # via + # envoy.code-format.python-check + # envoy.dependency.pip-check # envoy.distribution.distrotest # envoy.distribution.verify envoy.base.runner==0.0.4 \ @@ -276,9 +284,17 @@ envoy.base.utils==0.0.8 \ --hash=sha256:b82e18ab0535207b7136d6980239c9350f7113fa5da7dda781bcb6ad1e05b3ab # via # -r requirements.in + # envoy.code-format.python-check + # envoy.dependency.pip-check # envoy.distribution.distrotest # envoy.github.release # envoy.gpg.sign +envoy.code-format.python-check==0.0.4 \ + --hash=sha256:5e166102d1f873f0c14640bcef87b46147cbad1cb68888c977acfde7fce96e04 + # via -r requirements.in +envoy.dependency.pip-check==0.0.4 \ + --hash=sha256:3213d77959f65c3c97e9b5d74cb14c02bc02dae64bac2e7c3cb829a2f4e5e40e + # via -r requirements.in envoy.distribution.distrotest==0.0.3 \ --hash=sha256:c094adbd959eb1336f93afc00aedb7ee4e68e8252e2365be816a6f9ede8a3de7 # via envoy.distribution.verify @@ -305,17 +321,18 @@ envoy.gpg.identity==0.0.2 \ envoy.gpg.sign==0.0.3 \ --hash=sha256:31667931f5d7ff05fd809b89748f277511486311c777652af4cb8889bd641049 # via -r requirements.in +flake8-polyfill==1.0.2 \ + --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ + --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda + # via pep8-naming flake8==3.9.2 \ --hash=sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b \ --hash=sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907 # via # -r requirements.in + # envoy.code-format.python-check # flake8-polyfill # pep8-naming -flake8-polyfill==1.0.2 \ - --hash=sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9 \ - --hash=sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda - # via pep8-naming frozendict==2.0.6 \ --hash=sha256:3f00de72805cf4c9e81b334f3f04809278b967d2fed84552313a0fcce511beb1 \ --hash=sha256:5d3f75832c35d4df041f0e19c268964cbef29c1eb34cd3517cf883f1c2d089b9 @@ -471,7 +488,9 @@ packaging==21.0 \ pep8-naming==0.12.1 \ --hash=sha256:4a8daeaeb33cfcde779309fc0c9c0a68a3bbe2ad8a8308b763c5068f86eb9f37 \ --hash=sha256:bb2455947757d162aa4cad55dba4ce029005cd1692f2899a21d51d8630ca7841 - # via -r requirements.in + # via + # -r requirements.in + # envoy.code-format.python-check pluggy==1.0.0 \ --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 @@ -535,14 +554,6 @@ pyparsing==2.4.7 \ pyreadline==2.1 \ --hash=sha256:4530592fc2e85b25b1a9f79664433da09237c1a270e4d78ea5aa3a2c7229e2d1 # via -r requirements.in -pytest==6.2.5 \ - --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 \ - --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 - # via - # -r requirements.in - # pytest-asyncio - # pytest-cov - # pytest-patches pytest-asyncio==0.15.1 \ --hash=sha256:2564ceb9612bbd560d19ca4b41347b54e7835c2f792c504f698e05395ed63f6f \ --hash=sha256:3042bcdf1c5d978f6b74d96a151c4cfb9dcece65006198389ccd7e6c60eb1eea @@ -554,6 +565,14 @@ pytest-cov==2.12.1 \ pytest-patches==0.0.3 \ --hash=sha256:6f8cdc8641c708c4812f58ae48d410f373a6fd16cd6cc4dc4d3fb8951df9c92a # via -r requirements.in +pytest==6.2.5 \ + --hash=sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89 \ + --hash=sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134 + # via + # -r requirements.in + # pytest-asyncio + # pytest-cov + # pytest-patches python-gnupg==0.4.7 \ --hash=sha256:2061f56b1942c29b92727bf9aecbd3cea3893acc9cccbdc7eb4604285efe4ac7 \ --hash=sha256:3ff5b1bf5e397de6e1fe41a7c0f403dad4e242ac92b345f440eaecfb72a7ebae @@ -615,16 +634,6 @@ snowballstemmer==2.1.0 \ --hash=sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2 \ --hash=sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914 # via sphinx -sphinx==4.1.2 \ - --hash=sha256:3092d929cd807926d846018f2ace47ba2f3b671b309c7a89cd3306e80c826b13 \ - --hash=sha256:46d52c6cee13fec44744b8c01ed692c18a640f6910a725cbb938bc36e8d64544 - # via - # -r requirements.in - # sphinx-copybutton - # sphinx-rtd-theme - # sphinx-tabs - # sphinxcontrib-httpdomain - # sphinxext-rediraffe sphinx-copybutton==0.4.0 \ --hash=sha256:4340d33c169dac6dd82dce2c83333412aa786a42dd01a81a8decac3b130dc8b0 \ --hash=sha256:8daed13a87afd5013c3a9af3575cc4d5bec052075ccd3db243f895c07a689386 @@ -637,6 +646,16 @@ sphinx-tabs==3.2.0 \ --hash=sha256:1e1b1846c80137bd81a78e4a69b02664b98b1e1da361beb30600b939dfc75065 \ --hash=sha256:33137914ed9b276e6a686d7a337310ee77b1dae316fdcbce60476913a152e0a4 # via -r requirements.in +sphinx==4.1.2 \ + --hash=sha256:3092d929cd807926d846018f2ace47ba2f3b671b309c7a89cd3306e80c826b13 \ + --hash=sha256:46d52c6cee13fec44744b8c01ed692c18a640f6910a725cbb938bc36e8d64544 + # via + # -r requirements.in + # sphinx-copybutton + # sphinx-rtd-theme + # sphinx-tabs + # sphinxcontrib-httpdomain + # sphinxext-rediraffe sphinxcontrib-applehelp==1.0.2 \ --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 @@ -710,7 +729,9 @@ wrapt==1.12.1 \ yapf==0.31.0 \ --hash=sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d \ --hash=sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e - # via -r requirements.in + # via + # -r requirements.in + # envoy.code-format.python-check yarl==1.6.3 \ --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ diff --git a/tools/code_format/BUILD b/tools/code_format/BUILD index ba9de5fce8557..11416d9ca8415 100644 --- a/tools/code_format/BUILD +++ b/tools/code_format/BUILD @@ -1,6 +1,6 @@ load("@base_pip3//:requirements.bzl", "requirement") +load("@rules_python//python:defs.bzl", "py_binary") load("//bazel:envoy_build_system.bzl", "envoy_package") -load("//tools/base:envoy_python.bzl", "envoy_py_binary") licenses(["notice"]) # Apache 2 @@ -12,14 +12,11 @@ exports_files([ "envoy_build_fixer.py", ]) -envoy_py_binary( - name = "tools.code_format.python_check", +py_binary( + name = "python_check", + srcs = ["python_check.py"], deps = [ - "//tools/base:aio", - "//tools/base:checker", - "//tools/base:utils", - requirement("flake8"), - requirement("pep8-naming"), - requirement("yapf"), + "@envoy_repo", + requirement("envoy.code_format.python_check"), ], ) diff --git a/tools/code_format/python_check.py b/tools/code_format/python_check.py index e3a00f45b0cb5..135b7e9fd3ffe 100755 --- a/tools/code_format/python_check.py +++ b/tools/code_format/python_check.py @@ -4,135 +4,51 @@ # # with bazel: # -# bazel run //tools/code_format:python_check -- -h +# $ bazel run //tools/code_format:python_check -- -h # -# alternatively, if you have the necessary python deps available +# $ bazel run //tools/code_format:python_check # -# PYTHONPATH=. ./tools/code_format/python_check.py -h +# with pip: # -# python requires: flake8, yapf +# $ pip install envoy.code_format.python_check +# $ envoy.code_format.python_check -h +# +# usage with pip requires a path, eg +# +# $ envoy.code_format.python_check . +# +# The upstream lib is maintained here: +# +# https://github.com/envoyproxy/pytooling/tree/main/envoy.code_format.python_check +# +# Please submit issues/PRs to the pytooling repo: +# +# https://github.com/envoyproxy/pytooling # -import argparse import pathlib import sys from functools import cached_property -from typing import Iterable, List, Optional, Tuple -from flake8.main.application import Application as Flake8Application # type:ignore +import abstracts -import yapf # type:ignore +from envoy.code_format import python_check -from tools.base import aio, checker, utils +import envoy_repo -FLAKE8_CONFIG = '.flake8' -YAPF_CONFIG = '.style.yapf' -# TODO(phlax): add checks for: -# - isort - - -class PythonChecker(checker.AsyncChecker): - checks = ("flake8", "yapf") - - @property - def diff_file_path(self) -> Optional[pathlib.Path]: - return pathlib.Path(self.args.diff_file) if self.args.diff_file else None +@abstracts.implementer(python_check.APythonChecker) +class EnvoyPythonChecker: @cached_property - def flake8_app(self) -> Flake8Application: - flake8_app = Flake8Application() - flake8_app.initialize(self.flake8_args) - return flake8_app - - @property - def flake8_args(self) -> Tuple[str, ...]: - return ("--config", str(self.flake8_config_path), str(self.path)) - - @property - def flake8_config_path(self) -> pathlib.Path: - return self.path.joinpath(FLAKE8_CONFIG) - - @property - def recurse(self) -> bool: - """Flag to determine whether to apply checks recursively""" - return self.args.recurse - - @property - def yapf_config_path(self) -> pathlib.Path: - return self.path.joinpath(YAPF_CONFIG) - - @property - def yapf_files(self) -> List[str]: - return yapf.file_resources.GetCommandLineFiles( - self.args.paths, - recursive=self.recurse, - exclude=yapf.file_resources.GetExcludePatternsForDir(str(self.path))) - - def add_arguments(self, parser: argparse.ArgumentParser) -> None: - super().add_arguments(parser) - parser.add_argument( - "--recurse", - "-r", - choices=["yes", "no"], - default="yes", - help="Recurse path or paths directories") - parser.add_argument( - "--diff-file", default=None, help="Specify the path to a diff file with fixes") - - async def check_flake8(self) -> None: - """Run flake8 on files and/or repo""" - errors: List[str] = [] - with utils.buffered(stdout=errors, mangle=self._strip_lines): - self.flake8_app.run_checks() - self.flake8_app.report() - if errors: - self.error("flake8", errors) - - async def check_yapf(self) -> None: - """Run flake8 on files and/or repo""" - futures = aio.concurrent(self.yapf_format(python_file) for python_file in self.yapf_files) - - async for (python_file, (reformatted, encoding, changed)) in futures: - self.yapf_result(python_file, reformatted, changed) - - async def on_check_run(self, check: str) -> None: - if check not in self.failed and check not in self.warned: - self.succeed(check, [check]) - - async def on_checks_complete(self) -> int: - if self.diff_file_path and self.has_failed: - result = await aio.async_subprocess.run(["git", "diff", "HEAD"], - cwd=self.path, - capture_output=True) - self.diff_file_path.write_bytes(result.stdout) - return await super().on_checks_complete() - - async def yapf_format(self, python_file: str) -> tuple: - return python_file, yapf.yapf_api.FormatFile( - python_file, - style_config=str(self.yapf_config_path), - in_place=self.fix, - print_diff=not self.fix) - - def yapf_result(self, python_file: str, reformatted: str, changed: bool) -> None: - if not changed: - return self.succeed("yapf", [python_file]) - if self.fix: - return self.warn("yapf", [f"{python_file}: reformatted"]) - if reformatted: - return self.warn("yapf", [f"{python_file}: diff\n{reformatted}"]) - self.error("yapf", [python_file]) - - def _strip_line(self, line: str) -> str: - return line[len(str(self.path)) + 1:] if line.startswith(f"{self.path}/") else line - - def _strip_lines(self, lines: Iterable[str]) -> List[str]: - return [self._strip_line(line) for line in lines if line] + def path(self) -> pathlib.Path: + if self.args.paths: + return pathlib.Path(self.args.paths[0]) + return pathlib.Path(envoy_repo.PATH) -def main(*args: str) -> Optional[int]: - return PythonChecker(*args).run() +def main(*args) -> int: + return EnvoyPythonChecker(*args).run() if __name__ == "__main__": diff --git a/tools/code_format/tests/test_python_check.py b/tools/code_format/tests/test_python_check.py deleted file mode 100644 index 7cf39577d4bb7..0000000000000 --- a/tools/code_format/tests/test_python_check.py +++ /dev/null @@ -1,384 +0,0 @@ -import types -from contextlib import contextmanager -from unittest.mock import AsyncMock, patch, MagicMock, PropertyMock - -import pytest - -from tools.code_format import python_check - - -def test_python_checker_constructor(): - checker = python_check.PythonChecker("path1", "path2", "path3") - assert checker.checks == ("flake8", "yapf") - assert checker.args.paths == ['path1', 'path2', 'path3'] - - -@pytest.mark.parametrize("diff_path", ["", None, "PATH"]) -def test_python_diff_path(patches, diff_path): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "pathlib", - ("PythonChecker.args", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_plib, m_args): - m_args.return_value.diff_file = diff_path - assert checker.diff_file_path == (m_plib.Path.return_value if diff_path else None) - - if diff_path: - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.diff_file, ), {}]) - else: - assert not m_plib.Path.called - - -def test_python_flake8_app(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - ("PythonChecker.flake8_args", dict(new_callable=PropertyMock)), - "Flake8Application", - prefix="tools.code_format.python_check") - - with patched as (m_flake8_args, m_flake8_app): - assert checker.flake8_app == m_flake8_app.return_value - - assert ( - list(m_flake8_app.call_args) - == [(), {}]) - assert ( - list(m_flake8_app.return_value.initialize.call_args) - == [(m_flake8_args.return_value,), {}]) - - -def test_python_flake8_args(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - ("PythonChecker.flake8_config_path", dict(new_callable=PropertyMock)), - ("PythonChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_flake8_config, m_path): - assert ( - checker.flake8_args - == ('--config', - str(m_flake8_config.return_value), - str(m_path.return_value))) - - -def test_python_flake8_config_path(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - ("PythonChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_path, ): - assert checker.flake8_config_path == m_path.return_value.joinpath.return_value - - assert ( - list(m_path.return_value.joinpath.call_args) - == [(python_check.FLAKE8_CONFIG, ), {}]) - - -def test_python_yapf_config_path(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - ("PythonChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_path, ): - assert checker.yapf_config_path == m_path.return_value.joinpath.return_value - - assert ( - list(m_path.return_value.joinpath.call_args) - == [(python_check.YAPF_CONFIG, ), {}]) - - -def test_python_yapf_files(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - - patched = patches( - ("PythonChecker.args", dict(new_callable=PropertyMock)), - ("PythonChecker.path", dict(new_callable=PropertyMock)), - "yapf.file_resources.GetCommandLineFiles", - "yapf.file_resources.GetExcludePatternsForDir", - prefix="tools.code_format.python_check") - - with patched as (m_args, m_path, m_yapf_files, m_yapf_exclude): - assert checker.yapf_files == m_yapf_files.return_value - - assert ( - list(m_yapf_files.call_args) - == [(m_args.return_value.paths,), - {'recursive': m_args.return_value.recurse, - 'exclude': m_yapf_exclude.return_value}]) - assert ( - list(m_yapf_exclude.call_args) - == [(str(m_path.return_value),), {}]) - - -def test_python_add_arguments(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - add_mock = patch("tools.code_format.python_check.checker.AsyncChecker.add_arguments") - m_parser = MagicMock() - - with add_mock as m_add: - checker.add_arguments(m_parser) - - assert ( - list(m_add.call_args) - == [(m_parser,), {}]) - assert ( - list(list(c) for c in m_parser.add_argument.call_args_list) - == [[('--recurse', '-r'), - {'choices': ['yes', 'no'], - 'default': 'yes', - 'help': 'Recurse path or paths directories'}], - [('--diff-file',), - {'default': None, 'help': 'Specify the path to a diff file with fixes'}]]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("errors", [[], ["err1", "err2"]]) -async def test_python_check_flake8(patches, errors): - checker = python_check.PythonChecker("path1", "path2", "path3") - - patched = patches( - "utils.buffered", - "PythonChecker.error", - "PythonChecker._strip_lines", - ("PythonChecker.flake8_app", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - @contextmanager - def mock_buffered(stdout=None, mangle=None): - yield - stdout.extend(errors) - - with patched as (m_buffered, m_error, m_mangle, m_flake8_app): - m_buffered.side_effect = mock_buffered - assert not await checker.check_flake8() - - assert ( - list(m_buffered.call_args) - == [(), {'stdout': errors, 'mangle': m_mangle}]) - assert ( - list(m_flake8_app.return_value.run_checks.call_args) - == [(), {}]) - assert ( - list(m_flake8_app.return_value.report.call_args) - == [(), {}]) - - if errors: - assert ( - list(m_error.call_args) - == [('flake8', ['err1', 'err2']), {}]) - else: - assert not m_error.called - - -def test_python_check_recurse(): - checker = python_check.PythonChecker("path1", "path2", "path3") - args_mock = patch( - "tools.code_format.python_check.PythonChecker.args", - new_callable=PropertyMock) - - with args_mock as m_args: - assert checker.recurse == m_args.return_value.recurse - assert "recurse" not in checker.__dict__ - - -@pytest.mark.asyncio -async def test_python_check_yapf(patches): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "aio", - ("PythonChecker.yapf_format", dict(new_callable=MagicMock)), - "PythonChecker.yapf_result", - ("PythonChecker.yapf_files", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - files = ["file1", "file2", "file3"] - - async def concurrent(iters): - assert isinstance(iters, types.GeneratorType) - for i, format_result in enumerate(iters): - yield (format_result, (f"REFORMAT{i}", f"ENCODING{i}", f"CHANGED{i}")) - - with patched as (m_aio, m_yapf_format, m_yapf_result, m_yapf_files): - m_yapf_files.return_value = files - m_aio.concurrent.side_effect = concurrent - assert not await checker.check_yapf() - - assert ( - list(list(c) for c in m_yapf_format.call_args_list) - == [[(file,), {}] for file in files]) - assert ( - list(list(c) for c in m_yapf_result.call_args_list) - == [[(m_yapf_format.return_value, f"REFORMAT{i}", f"CHANGED{i}"), {}] for i, _ in enumerate(files)]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("errors", [[], ["check2", "check3"], ["check1", "check3"]]) -@pytest.mark.parametrize("warnings", [[], ["check4", "check5"], ["check1", "check5"]]) -async def test_python_on_check_run(patches, errors, warnings): - checker = python_check.PythonChecker("path1", "path2", "path3") - checkname = "check1" - patched = patches( - "PythonChecker.succeed", - ("PythonChecker.name", dict(new_callable=PropertyMock)), - ("PythonChecker.failed", dict(new_callable=PropertyMock)), - ("PythonChecker.warned", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_succeed, m_name, m_failed, m_warned): - m_failed.return_value = errors - m_warned.return_value = warnings - assert not await checker.on_check_run(checkname) - - if checkname in warnings or checkname in errors: - assert not m_succeed.called - else: - assert ( - list(m_succeed.call_args) - == [(checkname, [checkname]), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("diff_path", ["", "DIFF1"]) -@pytest.mark.parametrize("failed", [True, False]) -async def test_python_on_checks_complete(patches, diff_path, failed): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "aio", - ("checker.AsyncChecker.on_checks_complete", dict(new_callable=AsyncMock)), - ("PythonChecker.diff_file_path", dict(new_callable=PropertyMock)), - ("PythonChecker.has_failed", dict(new_callable=PropertyMock)), - ("PythonChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_aio, m_super, m_diff, m_failed, m_path): - m_aio.async_subprocess.run = AsyncMock() - if not diff_path: - m_diff.return_value = None - m_failed.return_value = failed - assert await checker.on_checks_complete() == m_super.return_value - - if diff_path and failed: - assert ( - list(m_aio.async_subprocess.run.call_args) - == [(['git', 'diff', 'HEAD'],), - dict(capture_output=True, cwd=m_path.return_value)]) - assert ( - list(m_diff.return_value.write_bytes.call_args) - == [(m_aio.async_subprocess.run.return_value.stdout,), {}]) - else: - assert not m_aio.async_subprocess.run.called - - assert ( - list(m_super.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("fix", [True, False]) -async def test_python_yapf_format(patches, fix): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "yapf.yapf_api.FormatFile", - ("PythonChecker.yapf_config_path", dict(new_callable=PropertyMock)), - ("PythonChecker.fix", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_format, m_config, m_fix): - m_fix.return_value = fix - assert await checker.yapf_format("FILENAME") == ("FILENAME", m_format.return_value) - - assert ( - list(m_format.call_args) - == [('FILENAME',), - {'style_config': str(m_config.return_value), - 'in_place': fix, - 'print_diff': not fix}]) - assert ( - list(list(c) for c in m_fix.call_args_list) - == [[(), {}], [(), {}]]) - - -@pytest.mark.parametrize("reformatted", ["", "REFORMAT"]) -@pytest.mark.parametrize("fix", [True, False]) -@pytest.mark.parametrize("changed", [True, False]) -def test_python_yapf_result(patches, reformatted, fix, changed): - checker = python_check.PythonChecker("path1", "path2", "path3") - patched = patches( - "PythonChecker.succeed", - "PythonChecker.warn", - "PythonChecker.error", - ("PythonChecker.fix", dict(new_callable=PropertyMock)), - prefix="tools.code_format.python_check") - - with patched as (m_succeed, m_warn, m_error, m_fix): - m_fix.return_value = fix - checker.yapf_result("FILENAME", reformatted, changed) - - if not changed: - assert ( - list(m_succeed.call_args) - == [('yapf', ['FILENAME']), {}]) - assert not m_warn.called - assert not m_error.called - assert not m_fix.called - return - assert not m_succeed.called - if fix: - assert not m_error.called - assert len(m_warn.call_args_list) == 1 - assert ( - list(m_warn.call_args) - == [('yapf', [f'FILENAME: reformatted']), {}]) - return - if reformatted: - assert not m_error.called - assert len(m_warn.call_args_list) == 1 - assert ( - list(m_warn.call_args) - == [('yapf', [f'FILENAME: diff\n{reformatted}']), {}]) - return - assert not m_warn.called - assert ( - list(m_error.call_args) - == [('yapf', ['FILENAME']), {}]) - - -def test_python_strip_lines(): - checker = python_check.PythonChecker("path1", "path2", "path3") - strip_mock = patch("tools.code_format.python_check.PythonChecker._strip_line") - lines = ["", "foo", "", "bar", "", "", "baz", "", ""] - - with strip_mock as m_strip: - assert ( - checker._strip_lines(lines) - == [m_strip.return_value] * 3) - - assert ( - list(list(c) for c in m_strip.call_args_list) - == [[('foo',), {}], [('bar',), {}], [('baz',), {}]]) - - -@pytest.mark.parametrize("line", ["REMOVE/foo", "REMOVE", "bar", "other", "REMOVE/baz", "baz"]) -def test_python_strip_line(line): - checker = python_check.PythonChecker("path1", "path2", "path3") - path_mock = patch( - "tools.code_format.python_check.PythonChecker.path", - new_callable=PropertyMock) - - with path_mock as m_path: - m_path.return_value = "REMOVE" - assert ( - checker._strip_line(line) - == line[7:] if line.startswith(f"REMOVE/") else line) - - -def test_python_checker_main(command_main): - command_main( - python_check.main, - "tools.code_format.python_check.PythonChecker") diff --git a/tools/dependency/BUILD b/tools/dependency/BUILD index 2909e36a8b8a9..b1e0af18c2aea 100644 --- a/tools/dependency/BUILD +++ b/tools/dependency/BUILD @@ -1,6 +1,6 @@ load("@rules_python//python:defs.bzl", "py_binary", "py_library") load("//bazel:envoy_build_system.bzl", "envoy_package") -load("//tools/base:envoy_python.bzl", "envoy_py_binary") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 @@ -41,10 +41,11 @@ py_binary( ], ) -envoy_py_binary( - name = "tools.dependency.pip_check", +py_binary( + name = "pip_check", + srcs = ["pip_check.py"], deps = [ - "//tools/base:checker", - "//tools/base:utils", + "@envoy_repo", + requirement("envoy.dependency.pip_check"), ], ) diff --git a/tools/dependency/pip_check.py b/tools/dependency/pip_check.py index 91a8456fc2854..4da2ac5f8fb4e 100755 --- a/tools/dependency/pip_check.py +++ b/tools/dependency/pip_check.py @@ -4,96 +4,51 @@ # # with bazel: # -# bazel //tools/dependency:pip_check -- -h +# $ bazel run //tools/dependency:pip_check -- -h # -# alternatively, if you have the necessary python deps available +# $ bazel run //tools/dependency:pip_check # -# ./tools/dependency/pip_check.py -h +# with pip: +# +# $ pip install envoy.dependency.pip_check +# $ envoy.dependency.pip_check -h +# +# usage with pip requires a path, eg +# +# $ envoy.dependency.pip_check . +# +# The upstream lib is maintained here: +# +# https://github.com/envoyproxy/pytooling/tree/main/envoy.dependency.pip_check +# +# Please submit issues/PRs to the pytooling repo: +# +# https://github.com/envoyproxy/pytooling # +import pathlib import sys from functools import cached_property -from typing import Iterable, Set - -from tools.base import checker, utils - -DEPENDABOT_CONFIG = ".github/dependabot.yml" -REQUIREMENTS_FILENAME = "requirements.txt" -# TODO(phlax): add checks for: -# - requirements can be installed together -# - pip-compile formatting +import abstracts +from envoy.dependency import pip_check -class PipConfigurationError(Exception): - pass +import envoy_repo -class PipChecker(checker.Checker): - checks = ("dependabot",) - _dependabot_config = DEPENDABOT_CONFIG - _requirements_filename = REQUIREMENTS_FILENAME +@abstracts.implementer(pip_check.APipChecker) +class EnvoyPipChecker: @cached_property - def config_requirements(self) -> set: - """Set of configured pip dependabot directories""" - return set( - update['directory'] - for update in self.dependabot_config["updates"] - if update["package-ecosystem"] == "pip") - - @cached_property - def dependabot_config(self) -> dict: - """Parsed dependabot config""" - result = utils.from_yaml(self.path.joinpath(self.dependabot_config_path)) - if not isinstance(result, dict): - raise PipConfigurationError( - f"Unable to parse dependabot config: {self.dependabot_config_path}") - return result - - @property - def dependabot_config_path(self) -> str: - return self._dependabot_config - - @cached_property - def requirements_dirs(self) -> Set[str]: - """Set of found directories in the repo containing requirements.txt""" - return set( - f"/{f.parent.relative_to(self.path)}" for f in self.path.glob("**/*") - if f.name == self.requirements_filename) - - @property - def requirements_filename(self) -> str: - return self._requirements_filename - - def check_dependabot(self) -> None: - """Check that dependabot config matches requirements.txt files found in repo""" - missing_dirs = self.config_requirements.difference(self.requirements_dirs) - missing_config = self.requirements_dirs.difference(self.config_requirements) - correct = self.requirements_dirs.intersection(self.config_requirements) - if correct: - self.dependabot_success(correct) - if missing_dirs: - self.dependabot_errors( - missing_dirs, - f"Missing {self.requirements_filename} dir, specified in dependabot config") - if missing_config: - self.dependabot_errors( - missing_config, - f"Missing dependabot config for {self.requirements_filename} in dir") - - def dependabot_success(self, correct: Iterable) -> None: - self.succeed( - "dependabot", - ([f"{self.requirements_filename}: {dirname}" for dirname in sorted(correct)])) - - def dependabot_errors(self, missing: Iterable, msg: str) -> None: - for dirname in sorted(missing): - self.error("dependabot", [f"{msg}: {dirname}"]) + def path(self) -> pathlib.Path: + if self.args.paths: + return pathlib.Path(self.args.paths[0]) + return pathlib.Path(envoy_repo.PATH) def main(*args) -> int: - return PipChecker(*args).run() + return EnvoyPipChecker(*args).run() if __name__ == "__main__": diff --git a/tools/dependency/tests/test_pip_check.py b/tools/dependency/tests/test_pip_check.py deleted file mode 100644 index 0c3458626cc89..0000000000000 --- a/tools/dependency/tests/test_pip_check.py +++ /dev/null @@ -1,195 +0,0 @@ -from unittest.mock import MagicMock, patch, PropertyMock - -import pytest - -from tools.dependency import pip_check - - -def test_pip_checker_constructor(): - checker = pip_check.PipChecker("path1", "path2", "path3") - assert checker.checks == ("dependabot",) - assert checker.dependabot_config_path == pip_check.DEPENDABOT_CONFIG == ".github/dependabot.yml" - assert checker.requirements_filename == pip_check.REQUIREMENTS_FILENAME == "requirements.txt" - assert checker.args.paths == ['path1', 'path2', 'path3'] - - -def test_pip_checker_config_requirements(): - checker = pip_check.PipChecker("path1", "path2", "path3") - - config_mock = patch( - "tools.dependency.pip_check.PipChecker.dependabot_config", - new_callable=PropertyMock) - - with config_mock as m_config: - m_config.return_value.__getitem__.return_value = [ - {"package-ecosystem": "pip", "directory": "dir1"}, - {"package-ecosystem": "not-pip", "directory": "dir2"}, - {"package-ecosystem": "pip", "directory": "dir3"}] - assert checker.config_requirements == {'dir1', 'dir3'} - assert ( - list(m_config.return_value.__getitem__.call_args) - == [('updates',), {}]) - - -@pytest.mark.parametrize("isdict", [True, False]) -def test_pip_checker_dependabot_config(patches, isdict): - checker = pip_check.PipChecker("path1", "path2", "path3") - patched = patches( - "utils", - ("PipChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.dependency.pip_check") - - with patched as (m_utils, m_path): - if isdict: - m_utils.from_yaml.return_value = {} - - if isdict: - assert checker.dependabot_config == m_utils.from_yaml.return_value - else: - with pytest.raises(pip_check.PipConfigurationError) as e: - checker.dependabot_config - - assert ( - e.value.args[0] - == f'Unable to parse dependabot config: {checker.dependabot_config_path}') - - assert ( - list(m_path.return_value.joinpath.call_args) - == [(checker._dependabot_config, ), {}]) - assert ( - list(m_utils.from_yaml.call_args) - == [(m_path.return_value.joinpath.return_value,), {}]) - - -def test_pip_checker_requirements_dirs(patches): - checker = pip_check.PipChecker("path1", "path2", "path3") - dummy_glob = [ - "FILE1", "FILE2", "FILE3", - "REQUIREMENTS_FILE", "FILE4", - "REQUIREMENTS_FILE", "FILE5"] - patched = patches( - ("PipChecker.requirements_filename", dict(new_callable=PropertyMock)), - ("PipChecker.path", dict(new_callable=PropertyMock)), - prefix="tools.dependency.pip_check") - expected = [] - - with patched as (m_reqs, m_path): - m_reqs.return_value = "REQUIREMENTS_FILE" - _glob = [] - - for fname in dummy_glob: - _mock = MagicMock() - _mock.name = fname - if fname == "REQUIREMENTS_FILE": - expected.append(_mock) - _glob.append(_mock) - - m_path.return_value.glob.return_value = _glob - assert checker.requirements_dirs == {f"/{f.parent.relative_to.return_value}" for f in expected} - - for exp in expected: - assert ( - list(exp.parent.relative_to.call_args) - == [(m_path.return_value,), {}]) - assert "requirements_dirs" in checker.__dict__ - - -TEST_REQS = ( - (set(), set()), - (set(["A", "B"]), set()), - (set(["A", "B"]), set(["B", "C"])), - (set(["A", "B", "C"]), set(["A", "B", "C"])), - (set(), set(["B", "C"]))) - - -@pytest.mark.parametrize("requirements", TEST_REQS) -def test_pip_checker_check_dependabot(patches, requirements): - config, dirs = requirements - checker = pip_check.PipChecker("path1", "path2", "path3") - - patched = patches( - ("PipChecker.config_requirements", dict(new_callable=PropertyMock)), - ("PipChecker.requirements_dirs", dict(new_callable=PropertyMock)), - ("PipChecker.requirements_filename", dict(new_callable=PropertyMock)), - "PipChecker.dependabot_success", - "PipChecker.dependabot_errors", - prefix="tools.dependency.pip_check") - - with patched as (m_config, m_dirs, m_fname, m_success, m_errors): - m_config.return_value = config - m_dirs.return_value = dirs - assert not checker.check_dependabot() - - if config & dirs: - assert ( - list(m_success.call_args) - == [(config & dirs, ), {}]) - else: - assert not m_success.called - - if config - dirs: - assert ( - [(config - dirs, f"Missing {m_fname.return_value} dir, specified in dependabot config"), {}] - in list(list(c) for c in m_errors.call_args_list)) - - if dirs - config: - assert ( - [(dirs - config, f"Missing dependabot config for {m_fname.return_value} in dir"), {}] - in list(list(c) for c in m_errors.call_args_list)) - - if not config - dirs and not dirs - config: - assert not m_errors.called - - -def test_pip_checker_dependabot_success(patches): - checker = pip_check.PipChecker("path1", "path2", "path3") - succeed_mock = patch - success = set(["C", "D", "B", "A"]) - - patched = patches( - "PipChecker.succeed", - ("PipChecker.requirements_filename", dict(new_callable=PropertyMock)), - prefix="tools.dependency.pip_check") - - with patched as (m_succeed, m_fname): - checker.dependabot_success(success) - - assert ( - list(m_succeed.call_args) - == [('dependabot', - [f"{m_fname.return_value}: {x}" for x in sorted(success)]), {}]) - - -def test_pip_checker_dependabot_errors(patches): - checker = pip_check.PipChecker("path1", "path2", "path3") - succeed_mock = patch - errors = set(["C", "D", "B", "A"]) - MSG = "ERROR MESSAGE" - - patched = patches( - "PipChecker.error", - ("PipChecker.name", dict(new_callable=PropertyMock)), - prefix="tools.dependency.pip_check") - - with patched as (m_error, m_name): - checker.dependabot_errors(errors, MSG) - - assert ( - list(list(c) for c in list(m_error.call_args_list)) - == [[('dependabot', [f'ERROR MESSAGE: {x}']), {}] for x in sorted(errors)]) - - -def test_pip_checker_main(): - class_mock = patch("tools.dependency.pip_check.PipChecker") - - with class_mock as m_class: - assert ( - pip_check.main("arg0", "arg1", "arg2") - == m_class.return_value.run.return_value) - - assert ( - list(m_class.call_args) - == [('arg0', 'arg1', 'arg2'), {}]) - assert ( - list(m_class.return_value.run.call_args) - == [(), {}]) From 75037116d3f6542032f719f7a590a26090fe3c22 Mon Sep 17 00:00:00 2001 From: Ryan Hamilton Date: Wed, 15 Sep 2021 15:42:37 -0700 Subject: [PATCH 053/121] Quiche merge (#18136) Signed-off-by: Ryan Hamilton Signed-off-by: gayang --- bazel/repository_locations.bzl | 6 +++--- source/common/quic/platform/quiche_flags_impl.cc | 1 - test/common/quic/envoy_quic_client_session_test.cc | 6 ++---- test/common/quic/envoy_quic_server_session_test.cc | 6 ++---- 4 files changed, 7 insertions(+), 12 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index b01a14f881ba1..f088dfd3d05f4 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -822,12 +822,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://github.com/google/quiche", - version = "e8ddc3873182355137862b4d6417add2b2b8a31d", - sha256 = "f1d17b033a9e7449ef84f0c7392319061981439fa15c5be3007c4dea4b58ebc3", + version = "744a8d60762acb1b7d73fc4f169bdf2258ca35d8", + sha256 = "5840ef8d4767a6d51302c75964f05e77bc9c504b7c6a283b9762980b3892e23f", urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"], strip_prefix = "quiche-{version}", use_category = ["dataplane_core"], - release_date = "2021-09-09", + release_date = "2021-09-15", cpe = "N/A", ), com_googlesource_googleurl = dict( diff --git a/source/common/quic/platform/quiche_flags_impl.cc b/source/common/quic/platform/quiche_flags_impl.cc index 6dcd07e3f0815..af607a83adc49 100644 --- a/source/common/quic/platform/quiche_flags_impl.cc +++ b/source/common/quic/platform/quiche_flags_impl.cc @@ -34,7 +34,6 @@ absl::flat_hash_map makeFlagMap() { #undef QUIC_FLAG // Disable IETF draft 29 implementation. Envoy only supports RFC-v1. FLAGS_quic_reloadable_flag_quic_disable_version_draft_29->setValue(true); - FLAGS_quic_reloadable_flag_quic_decline_server_push_stream->setValue(true); #define QUIC_PROTOCOL_FLAG(type, flag, ...) flags.emplace(FLAGS_##flag->name(), FLAGS_##flag); #include "quiche/quic/core/quic_protocol_flags_list.h" diff --git a/test/common/quic/envoy_quic_client_session_test.cc b/test/common/quic/envoy_quic_client_session_test.cc index 92f54a87f948c..56ee37b1a29c0 100644 --- a/test/common/quic/envoy_quic_client_session_test.cc +++ b/test/common/quic/envoy_quic_client_session_test.cc @@ -70,10 +70,8 @@ class EnvoyQuicClientSessionTest : public testing::Test { EnvoyQuicClientSessionTest() : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { - SetQuicReloadableFlag(quic_decline_server_push_stream, true); - return quic::CurrentSupportedHttp3Versions(); - }()), + alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), + quic_version_([]() { return quic::CurrentSupportedHttp3Versions(); }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), 12345)), self_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), diff --git a/test/common/quic/envoy_quic_server_session_test.cc b/test/common/quic/envoy_quic_server_session_test.cc index f44b359cc2b78..8ac32ca898135 100644 --- a/test/common/quic/envoy_quic_server_session_test.cc +++ b/test/common/quic/envoy_quic_server_session_test.cc @@ -151,10 +151,8 @@ class EnvoyQuicServerSessionTest : public testing::Test { EnvoyQuicServerSessionTest() : api_(Api::createApiForTest(time_system_)), dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), - alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_({[]() { - SetQuicReloadableFlag(quic_decline_server_push_stream, true); - return quic::CurrentSupportedHttp3Versions()[0]; - }()}), + alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), + quic_version_({[]() { return quic::CurrentSupportedHttp3Versions()[0]; }()}), quic_stat_names_(listener_config_.listenerScope().symbolTable()), quic_connection_(new MockEnvoyQuicServerConnection( connection_helper_, alarm_factory_, writer_, quic_version_, *listener_config_.socket_)), From deaac569f90614c5df8daf9bd86f752549dbeeee Mon Sep 17 00:00:00 2001 From: phlax Date: Thu, 16 Sep 2021 08:23:59 +0100 Subject: [PATCH 054/121] bazel: Add implementation of ABazelQuery (#18021) Signed-off-by: Ryan Northey Signed-off-by: gayang --- tools/base/BUILD | 11 +++++++ tools/base/bazel_query.py | 63 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) create mode 100644 tools/base/bazel_query.py diff --git a/tools/base/BUILD b/tools/base/BUILD index c1d243c119ed7..0a4488ddb4d33 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -1,3 +1,4 @@ +load("@rules_python//python:defs.bzl", "py_binary") load("@base_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//tools/base:envoy_python.bzl", "envoy_py_library") @@ -37,3 +38,13 @@ envoy_py_library( requirement("setuptools"), ], ) + +py_binary( + name = "bazel_query", + srcs = ["bazel_query.py"], + main = "bazel_query.py", + deps = [ + "@envoy_repo", + requirement("envoy.base.utils"), + ], +) diff --git a/tools/base/bazel_query.py b/tools/base/bazel_query.py new file mode 100644 index 0000000000000..48825838de45e --- /dev/null +++ b/tools/base/bazel_query.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +"""Envoy Bazel query implementation. + +This module can be used either as a `py_binary` or a `py_library`. + +cli usage (outputs to json): + +```console +$ bazel run //tools/base:bazel_query "deps(source/...)" | jq "." +``` + +python usage: + +```python +from tools.base.bazel_query import query + +result = query("deps(source/...)") +``` + +NB: This allows running queries that do not define scope and cannot be +run as genqueries. **It should not therefore be used in build rules**. +""" + +# The upstream lib is maintained here: +# +# https://github.com/envoyproxy/pytooling/tree/main/envoy.base.utils +# +# Please submit issues/PRs to the pytooling repo: +# +# https://github.com/envoyproxy/pytooling +# + +import json +import pathlib +import sys +from functools import cached_property + +import abstracts + +from envoy.base.utils import ABazelQuery + +import envoy_repo + + +@abstracts.implementer(ABazelQuery) +class EnvoyBazelQuery: + + @cached_property + def path(self) -> pathlib.Path: + return pathlib.Path(envoy_repo.PATH) + + +query = EnvoyBazelQuery().query + + +def main(*args): + print(json.dumps(query(*args[0:1]))) + + +if __name__ == "__main__": + sys.exit(main(*sys.argv[1:])) + +__all__ = ("query",) From e9c4905b36289f0530a83d0e11cebb76258cc0c6 Mon Sep 17 00:00:00 2001 From: Ryan Hamilton Date: Thu, 16 Sep 2021 05:51:54 -0700 Subject: [PATCH 055/121] https: Unhide alt svc cache (#18101) http: Unhide the AlternateProtocolCache configuration and move some docs from http3_upstream.md to connection_pooling.rst Risk Level: N/A - Docs only Testing: N/A - Docs only Docs Changes: Unhide the AlternateProtocolCache configuration and move some docs from http3_upstream.md to connection_pooling.rst Release Notes: Added Platform Specific Features: N/A Signed-off-by: Ryan Hamilton Signed-off-by: gayang --- .../v3/alternate_protocols_cache.proto | 3 --- .../http/v3/http_protocol_options.proto | 1 - .../upstream/connection_pooling.rst | 22 ++++++++++++++----- docs/root/version_history/current.rst | 1 + 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto b/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto index e628a6ca73fbb..0f0609b6e55ed 100644 --- a/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto +++ b/api/envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.proto @@ -15,10 +15,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Configuration for the alternate protocols cache HTTP filter. // [#extension: envoy.filters.http.alternate_protocols_cache] -// TODO(RyanTheOptimist): Move content from source/docs/http3_upstream.md to -// docs/root/intro/arch_overview/upstream/connection_pooling.rst when unhiding the proto. message FilterConfig { - // [#not-implemented-hide:] // If set, causes the use of the alternate protocols cache, which is responsible for // parsing and caching HTTP Alt-Svc headers. This enables the use of HTTP/3 for upstream // servers that advertise supporting it. diff --git a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto index 271dcfbe49cec..217d343f47d0a 100644 --- a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto +++ b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto @@ -118,7 +118,6 @@ message HttpProtocolOptions { // is alpha is not guaranteed to be API-stable. config.core.v3.Http3ProtocolOptions http3_protocol_options = 3; - // [#not-implemented-hide:] // The presence of alternate protocols cache options causes the use of the // alternate protocols cache, which is responsible for parsing and caching // HTTP Alt-Svc headers. This enables the use of HTTP/3 for origins that diff --git a/docs/root/intro/arch_overview/upstream/connection_pooling.rst b/docs/root/intro/arch_overview/upstream/connection_pooling.rst index 147171f498e13..b6d93cf3c0e12 100644 --- a/docs/root/intro/arch_overview/upstream/connection_pooling.rst +++ b/docs/root/intro/arch_overview/upstream/connection_pooling.rst @@ -50,8 +50,7 @@ pool will drain the affected connection. Once a connection reaches its :ref:`max stream limit `, it will be marked as busy until a stream is available. New connections are established anytime there is a pending request without a connection that can be dispatched to (up to circuit breaker limits for -connections). HTTP/3 upstream support is currently only usable in situations where HTTP/3 is guaranteed -to work, but automatic failover to TCP is coming soon!. +connections). Automatic protocol selection ---------------------------- @@ -69,10 +68,21 @@ then 300ms later, if a QUIC connection is not established, will also attempt to Whichever handshake succeeds will be used for the initial stream, but if both TCP and QUIC connections are established, QUIC will eventually be preferred. -Upcoming versions of HTTP/3 support will include only selecting HTTP/3 if the upstream advertises support -either via `HTTP Alternative Services `_, -`HTTPS DNS RR `_, or "QUIC hints" which -will be manually configured. This path is alpha and rapidly undergoing improvements with the goal of having +If an alternate protocol cache is configured via +:ref:`alternate_protocols_cache_options ` +then HTTP/3 connections will only be attempted to servers which +advertise HTTP/3 support either via `HTTP Alternative Services `, (eventually +the `HTTPS DNS resource record` or "QUIC hints" +which will be manually configured). +If no such advertisement exists, then HTTP/2 or HTTP/1 will be used instead. + +If no alternate protocol cache is configured, then HTTP/3 connections will be attempted to +all servers, even those which do not advertise HTTP/3. + +Further, HTTP/3 runs over QUIC (which uses UDP) and not over TCP (which HTTP/1 and HTTP/2 use). +It is not uncommon for network devices to block UDP traffic, and hence block HTTP/3. This +means that upstream HTTP/3 connection attempts might be blocked by the network and will fall +back to using HTTP/2 or HTTP/1. This path is alpha and rapidly undergoing improvements with the goal of having the default behavior result in optimal latency for internet environments, so please be patient and follow along with Envoy release notes to stay aprised of the latest and greatest changes. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index c1e3b4b2c4174..c5a2739343e6b 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -104,6 +104,7 @@ New Features * bootstrap: added :ref:`inline_headers ` in the bootstrap to make custom inline headers bootstrap configurable. * contrib: added new :ref:`contrib images ` which contain contrib extensions. * grpc reverse bridge: added a new :ref:`option ` to support streaming response bodies when withholding gRPC frames from the upstream. +* http: added :ref:`alternate_protocols_cache_options ` for enabling HTTP/3 connections to servers which advertise HTTP/3 support via `HTTP Alternative Services `_. * http: added :ref:`string_match ` in the header matcher. * http: added :ref:`x-envoy-upstream-stream-duration-ms ` that allows configuring the max stream duration via a request header. * http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. From f0dd5a3790a5310978a5b4f0995166233efb12c5 Mon Sep 17 00:00:00 2001 From: phlax Date: Thu, 16 Sep 2021 13:56:49 +0100 Subject: [PATCH 056/121] repokitteh: Ignore non-envoy repo prs (#18141) Signed-off-by: Ryan Northey Signed-off-by: gayang --- ci/repokitteh/modules/newcontributor.star | 43 ----------------------- ci/repokitteh/modules/newpr.star | 9 ++++- 2 files changed, 8 insertions(+), 44 deletions(-) delete mode 100644 ci/repokitteh/modules/newcontributor.star diff --git a/ci/repokitteh/modules/newcontributor.star b/ci/repokitteh/modules/newcontributor.star deleted file mode 100644 index 865e5e90c7624..0000000000000 --- a/ci/repokitteh/modules/newcontributor.star +++ /dev/null @@ -1,43 +0,0 @@ - -NEW_CONTRIBUTOR_MESSAGE = """ -Hi @%s, welcome and thank you for your contribution. - -We will try to review your Pull Request as quickly as possible. - -In the meantime, please take a look at the [contribution guidelines](https://github.com/envoyproxy/envoy/blob/main/CONTRIBUTING.md) if you have not done so already. - -""" - -DRAFT_MESSAGE = """ -As a reminder, PRs marked as draft will not be automatically assigned reviewers, -or be handled by maintainer-oncall triage. - -Please mark your PR as ready when you want it to be reviewed! -""" - - -def get_pr_author_association(issue_number): - return github.call( - method="GET", - path="repos/envoyproxy/envoy/pulls/%s" % issue_number)["json"]["author_association"] - -def is_newcontributor(issue_number): - return ( - get_pr_author_association(issue_number) - in ["NONE", "FIRST_TIME_CONTRIBUTOR", "FIRST_TIMER"]) - -def should_message_newcontributor(action, issue_number): - return ( - action == 'opened' - and is_newcontributor(issue_number)) - -def send_newcontributor_message(sender): - github.issue_create_comment(NEW_CONTRIBUTOR_MESSAGE % sender) - -def _pr(action, issue_number, sender, config, draft): - if should_message_newcontributor(action, issue_number): - send_newcontributor_message(sender) - if action == 'opened' and draft: - github.issue_create_comment(DRAFT_MESSAGE) - -handlers.pull_request(func=_pr) diff --git a/ci/repokitteh/modules/newpr.star b/ci/repokitteh/modules/newpr.star index 865e5e90c7624..4c4797f442262 100644 --- a/ci/repokitteh/modules/newpr.star +++ b/ci/repokitteh/modules/newpr.star @@ -34,7 +34,14 @@ def should_message_newcontributor(action, issue_number): def send_newcontributor_message(sender): github.issue_create_comment(NEW_CONTRIBUTOR_MESSAGE % sender) -def _pr(action, issue_number, sender, config, draft): +def is_envoy_repo(repo_owner, repo_name): + return ( + repo_owner == "envoyproxy" + and repo_name == "envoy") + +def _pr(action, issue_number, sender, config, draft, repo_owner, repo_name): + if not is_envoy_repo(repo_owner, repo_name): + return if should_message_newcontributor(action, issue_number): send_newcontributor_message(sender) if action == 'opened' and draft: From ac0adb2f7c088f6a57375d99e3742f0ed203e1b8 Mon Sep 17 00:00:00 2001 From: Rohit Agrawal Date: Thu, 16 Sep 2021 10:31:41 -0400 Subject: [PATCH 057/121] fix: De-dupe current version history entries and address some minor typos (#18089) Signed-off-by: Rohit Agrawal Signed-off-by: gayang --- docs/root/version_history/current.rst | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index c5a2739343e6b..29a0f443fbb57 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -38,11 +38,11 @@ Minor Behavior Changes for "gRPC config stream closed" is now reduced to debug when the status is ``Ok`` or has been retriable (``DeadlineExceeded``, ``ResourceExhausted``, or ``Unavailable``) for less than 30 seconds. -* grpc: gRPC async client can be cached and shared accross filter instances in the same thread, this feature is turned off by default, can be turned on by setting runtime guard ``envoy.reloadable_features.enable_grpc_async_client_cache`` to true. +* grpc: gRPC async client can be cached and shared across filter instances in the same thread, this feature is turned off by default, can be turned on by setting runtime guard ``envoy.reloadable_features.enable_grpc_async_client_cache`` to true. * http: correct the use of the ``x-forwarded-proto`` header and the ``:scheme`` header. Where they differ (which is rare) ``:scheme`` will now be used for serving redirect URIs and cached content. This behavior can be reverted by setting runtime guard ``correct_scheme_and_xfp`` to false. -* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of request +* http: reject requests with #fragment in the URI path. The fragment is not allowed to be part of the request URI according to RFC3986 (3.5), RFC7230 (5.1) and RFC 7540 (8.1.2.3). Rejection of requests can be changed to stripping the #fragment instead by setting the runtime guard ``envoy.reloadable_features.http_reject_path_with_fragment`` to false. This behavior can further be changed to the deprecated behavior of keeping the fragment by setting the runtime guard @@ -56,24 +56,23 @@ Minor Behavior Changes * http: stop processing pending H/2 frames if connection transitioned to a closed state. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.skip_dispatching_frames_for_closed_connection`` to false. * listener: added the :ref:`enable_reuse_port ` field and changed the default for reuse_port from false to true, as the feature is now well - supported on the majority of production Linux kernels in use. The default change is aware of hot - restart, as otherwise the change would not be backwards compatible between restarts. This means - that hot restarting on to a new binary will retain the default of false until the binary undergoes + supported on the majority of production Linux kernels in use. The default change is aware of the hot + restart, as otherwise, the change would not be backward compatible between restarts. This means + that hot restarting onto a new binary will retain the default of false until the binary undergoes a full restart. To retain the previous behavior, either explicitly set the new configuration field to false, or set the runtime feature flag ``envoy.reloadable_features.listener_reuse_port_default_enabled`` to false. As part of this change, the use of reuse_port for TCP listeners on both macOS and Windows has been disabled due to suboptimal behavior. See the field documentation for more information. -* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in place update. -* quic: enables IETF connection migration. This feature requires stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. +* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in-place update. +* quic: enables IETF connection migration. This feature requires a stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* * access log: fix ``%UPSTREAM_CLUSTER%`` when used in http upstream access logs. Previously, it was always logging as an unset value. -* access log: fix ``%UPSTREAM_CLUSTER%`` when used in http upstream access logs. Previously, it was always logging as an unset value. -* aws request signer: fix the AWS Request Signer extension to correctly normalize the path and query string to be signed according to AWS' guidelines, so that the hash on the server side matches. See `AWS SigV4 documentaion `_. +* aws request signer: fix the AWS Request Signer extension to correctly normalize the path and query string to be signed according to AWS' guidelines, so that the hash on the server side matches. See `AWS SigV4 documentation `_. * cluster: delete pools when they're idle to fix unbounded memory use when using PROXY protocol upstream with tcp_proxy. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.conn_pool_delete_when_idle`` runtime guard to false. * cluster: finish cluster warming even if hosts are removed before health check initialization. This only affected clusters with :ref:`ignore_health_on_host_removal `. * compressor: fix a bug where if trailers were added and a subsequent filter paused the filter chain, the request could be stalled. This behavior can be reverted by setting ``envoy.reloadable_features.fix_added_trailers`` to false. @@ -85,7 +84,7 @@ Bug Fixes * http: limit use of deferred resets in the http2 codec to server-side connections. Use of deferred reset for client connections can result in incorrect behavior and performance problems. * listener: fixed an issue on Windows where connections are not handled by all worker threads. * lua: fix ``BodyBuffer`` setting a Lua string and printing Lua string containing hex characters. Previously, ``BodyBuffer`` setting a Lua string or printing strings with hex characters will be truncated. -* xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under 'annotations' section of the segment data. +* xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under the 'annotations' section of the segment data. Removed Config or Runtime ------------------------- @@ -114,7 +113,7 @@ New Features * jwt_authn: added support for extracting JWTs from request cookies using :ref:`from_cookies `. * listener: new listener metric ``downstream_cx_transport_socket_connect_timeout`` to track transport socket timeouts. * matcher: added :ref:`invert ` for inverting the match result in the metadata matcher. -* overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. +* overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. * rbac: added :ref:`destination_port_range ` for matching range of destination ports. * route config: added :ref:`dynamic_metadata ` for routing based on dynamic metadata. * router: added retry options predicate extensions configured via From 5b15e7beaebc3a771fad06d51edec6fe92d0bbb1 Mon Sep 17 00:00:00 2001 From: phlax Date: Thu, 16 Sep 2021 16:48:36 +0100 Subject: [PATCH 058/121] bazel: Shift deprecate_* tools to bazel and cleanup requirements (#18073) Signed-off-by: Ryan Northey Signed-off-by: gayang --- .github/dependabot.yml | 10 -- GOVERNANCE.md | 2 +- tools/deprecate_features/BUILD | 9 ++ .../deprecate_features/deprecate_features.py | 6 +- .../deprecate_features/deprecate_features.sh | 7 - tools/deprecate_features/requirements.txt | 3 - tools/deprecate_version/BUILD | 13 ++ tools/deprecate_version/deprecate_version.py | 12 +- tools/deprecate_version/deprecate_version.sh | 7 - tools/deprecate_version/requirements.txt | 138 ------------------ 10 files changed, 29 insertions(+), 178 deletions(-) create mode 100644 tools/deprecate_features/BUILD delete mode 100644 tools/deprecate_features/deprecate_features.sh delete mode 100644 tools/deprecate_features/requirements.txt create mode 100644 tools/deprecate_version/BUILD delete mode 100755 tools/deprecate_version/deprecate_version.sh delete mode 100644 tools/deprecate_version/requirements.txt diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4aeb246c9db95..bf057a3add5a2 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -36,16 +36,6 @@ updates: schedule: interval: "daily" -- package-ecosystem: "pip" - directory: "/tools/deprecate_features" - schedule: - interval: "daily" - -- package-ecosystem: "pip" - directory: "/tools/deprecate_version" - schedule: - interval: "daily" - - package-ecosystem: "pip" directory: "/ci/flaky_test" schedule: diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 13342260c7bc1..afe7242b20cb5 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -140,7 +140,7 @@ New Features Deprecated ---------- ``` -* Run the deprecate_versions.py script (e.g. `sh tools/deprecate_version/deprecate_version.sh`) +* Run the deprecate_versions.py script (e.g. `bazel run //tools/deprecate_version:deprecate_version`) to file tracking issues for runtime guarded code which can be removed. * Check source/common/runtime/runtime_features.cc and see if any runtime guards in disabled_runtime_features should be reassessed, and ping on the relevant issues. diff --git a/tools/deprecate_features/BUILD b/tools/deprecate_features/BUILD new file mode 100644 index 0000000000000..c3429723995d9 --- /dev/null +++ b/tools/deprecate_features/BUILD @@ -0,0 +1,9 @@ +load("@rules_python//python:defs.bzl", "py_binary") + +licenses(["notice"]) # Apache 2 + +py_binary( + name = "deprecate_features", + srcs = ["deprecate_features.py"], + deps = ["@envoy_repo"], +) diff --git a/tools/deprecate_features/deprecate_features.py b/tools/deprecate_features/deprecate_features.py index c7468e6784585..aa1d3ff0458f2 100644 --- a/tools/deprecate_features/deprecate_features.py +++ b/tools/deprecate_features/deprecate_features.py @@ -4,13 +4,15 @@ import re import subprocess import fileinput -from six.moves import input + +import envoy_repo # Sorts out the list of deprecated proto fields which should be disallowed and returns a tuple of # email and code changes. def deprecate_proto(): - grep_output = subprocess.check_output('grep -r "deprecated = true" api/*', shell=True) + grep_output = subprocess.check_output( + 'grep -r "deprecated = true" api/*', shell=True, cwd=envoy_repo.PATH) filenames_and_fields = set() diff --git a/tools/deprecate_features/deprecate_features.sh b/tools/deprecate_features/deprecate_features.sh deleted file mode 100644 index 661b348e0f0d7..0000000000000 --- a/tools/deprecate_features/deprecate_features.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -. tools/shell_utils.sh - -set -e - -python_venv deprecate_features diff --git a/tools/deprecate_features/requirements.txt b/tools/deprecate_features/requirements.txt deleted file mode 100644 index 643fcd2d4a395..0000000000000 --- a/tools/deprecate_features/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -six==1.16.0 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 diff --git a/tools/deprecate_version/BUILD b/tools/deprecate_version/BUILD new file mode 100644 index 0000000000000..abe06de4b7f8f --- /dev/null +++ b/tools/deprecate_version/BUILD @@ -0,0 +1,13 @@ +load("@rules_python//python:defs.bzl", "py_binary") +load("@base_pip3//:requirements.bzl", "requirement") + +licenses(["notice"]) # Apache 2 + +py_binary( + name = "deprecate_version", + srcs = ["deprecate_version.py"], + deps = [ + requirement("gitpython"), + requirement("pygithub"), + ], +) diff --git a/tools/deprecate_version/deprecate_version.py b/tools/deprecate_version/deprecate_version.py index 0e7ad87a82ba3..69e275666f143 100644 --- a/tools/deprecate_version/deprecate_version.py +++ b/tools/deprecate_version/deprecate_version.py @@ -1,14 +1,6 @@ -# Script for automating cleanup PR creation for deprecated runtime features +# Bazel usage # -# sh tools/deprecate_version/deprecate_version.sh -# -# Direct usage (not recommended): -# -# python tools/deprecate_version/deprecate_version.py -# -# e.g -# -# python tools/deprecate_version/deprecate_version.py +# bazel run //tools/deprecate_version:deprecate_version # # A GitHub access token must be set in GITHUB_TOKEN. To create one, go to # Settings -> Developer settings -> Personal access tokens in GitHub and create diff --git a/tools/deprecate_version/deprecate_version.sh b/tools/deprecate_version/deprecate_version.sh deleted file mode 100755 index 5421f66565b54..0000000000000 --- a/tools/deprecate_version/deprecate_version.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -. tools/shell_utils.sh - -set -e - -python_venv deprecate_version diff --git a/tools/deprecate_version/requirements.txt b/tools/deprecate_version/requirements.txt deleted file mode 100644 index e3ce651eb5f15..0000000000000 --- a/tools/deprecate_version/requirements.txt +++ /dev/null @@ -1,138 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --generate-hashes tools/deprecate_version/requirements.txt -# -certifi==2021.5.30 \ - --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ - --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee - # via - # -r tools/deprecate_version/requirements.txt - # requests -cffi==1.14.5 \ - --hash=sha256:005a36f41773e148deac64b08f233873a4d0c18b053d37da83f6af4d9087b813 \ - --hash=sha256:0857f0ae312d855239a55c81ef453ee8fd24136eaba8e87a2eceba644c0d4c06 \ - --hash=sha256:1071534bbbf8cbb31b498d5d9db0f274f2f7a865adca4ae429e147ba40f73dea \ - --hash=sha256:158d0d15119b4b7ff6b926536763dc0714313aa59e320ddf787502c70c4d4bee \ - --hash=sha256:1f436816fc868b098b0d63b8920de7d208c90a67212546d02f84fe78a9c26396 \ - --hash=sha256:2894f2df484ff56d717bead0a5c2abb6b9d2bf26d6960c4604d5c48bbc30ee73 \ - --hash=sha256:29314480e958fd8aab22e4a58b355b629c59bf5f2ac2492b61e3dc06d8c7a315 \ - --hash=sha256:34eff4b97f3d982fb93e2831e6750127d1355a923ebaeeb565407b3d2f8d41a1 \ - --hash=sha256:35f27e6eb43380fa080dccf676dece30bef72e4a67617ffda586641cd4508d49 \ - --hash=sha256:3d3dd4c9e559eb172ecf00a2a7517e97d1e96de2a5e610bd9b68cea3925b4892 \ - --hash=sha256:43e0b9d9e2c9e5d152946b9c5fe062c151614b262fda2e7b201204de0b99e482 \ - --hash=sha256:48e1c69bbacfc3d932221851b39d49e81567a4d4aac3b21258d9c24578280058 \ - --hash=sha256:51182f8927c5af975fece87b1b369f722c570fe169f9880764b1ee3bca8347b5 \ - --hash=sha256:58e3f59d583d413809d60779492342801d6e82fefb89c86a38e040c16883be53 \ - --hash=sha256:5de7970188bb46b7bf9858eb6890aad302577a5f6f75091fd7cdd3ef13ef3045 \ - --hash=sha256:65fa59693c62cf06e45ddbb822165394a288edce9e276647f0046e1ec26920f3 \ - --hash=sha256:69e395c24fc60aad6bb4fa7e583698ea6cc684648e1ffb7fe85e3c1ca131a7d5 \ - --hash=sha256:6c97d7350133666fbb5cf4abdc1178c812cb205dc6f41d174a7b0f18fb93337e \ - --hash=sha256:6e4714cc64f474e4d6e37cfff31a814b509a35cb17de4fb1999907575684479c \ - --hash=sha256:72d8d3ef52c208ee1c7b2e341f7d71c6fd3157138abf1a95166e6165dd5d4369 \ - --hash=sha256:8ae6299f6c68de06f136f1f9e69458eae58f1dacf10af5c17353eae03aa0d827 \ - --hash=sha256:8b198cec6c72df5289c05b05b8b0969819783f9418e0409865dac47288d2a053 \ - --hash=sha256:99cd03ae7988a93dd00bcd9d0b75e1f6c426063d6f03d2f90b89e29b25b82dfa \ - --hash=sha256:9cf8022fb8d07a97c178b02327b284521c7708d7c71a9c9c355c178ac4bbd3d4 \ - --hash=sha256:9de2e279153a443c656f2defd67769e6d1e4163952b3c622dcea5b08a6405322 \ - --hash=sha256:9e93e79c2551ff263400e1e4be085a1210e12073a31c2011dbbda14bda0c6132 \ - --hash=sha256:9ff227395193126d82e60319a673a037d5de84633f11279e336f9c0f189ecc62 \ - --hash=sha256:a465da611f6fa124963b91bf432d960a555563efe4ed1cc403ba5077b15370aa \ - --hash=sha256:ad17025d226ee5beec591b52800c11680fca3df50b8b29fe51d882576e039ee0 \ - --hash=sha256:afb29c1ba2e5a3736f1c301d9d0abe3ec8b86957d04ddfa9d7a6a42b9367e396 \ - --hash=sha256:b85eb46a81787c50650f2392b9b4ef23e1f126313b9e0e9013b35c15e4288e2e \ - --hash=sha256:bb89f306e5da99f4d922728ddcd6f7fcebb3241fc40edebcb7284d7514741991 \ - --hash=sha256:cbde590d4faaa07c72bf979734738f328d239913ba3e043b1e98fe9a39f8b2b6 \ - --hash=sha256:cd2868886d547469123fadc46eac7ea5253ea7fcb139f12e1dfc2bbd406427d1 \ - --hash=sha256:d42b11d692e11b6634f7613ad8df5d6d5f8875f5d48939520d351007b3c13406 \ - --hash=sha256:f2d45f97ab6bb54753eab54fffe75aaf3de4ff2341c9daee1987ee1837636f1d \ - --hash=sha256:fd78e5fee591709f32ef6edb9a015b4aa1a5022598e36227500c8f4e02328d9c - # via pynacl -chardet==4.0.0 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 - # via - # -r tools/deprecate_version/requirements.txt - # requests -deprecated==1.2.13 \ - --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d \ - --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d - # via - # -r tools/deprecate_version/requirements.txt - # pygithub -gitdb==4.0.7 \ - --hash=sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0 \ - --hash=sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005 - # via - # -r tools/deprecate_version/requirements.txt - # gitpython -gitpython==3.1.18 \ - --hash=sha256:fce760879cd2aebd2991b3542876dc5c4a909b30c9d69dfc488e504a8db37ee8 \ - --hash=sha256:b838a895977b45ab6f0cc926a9045c8d1c44e2b653c1fcc39fe91f42c6e8f05b - # via -r tools/deprecate_version/requirements.txt -idna==2.10 \ - --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ - --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 - # via - # -r tools/deprecate_version/requirements.txt - # requests -pycparser==2.20 \ - --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ - --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 - # via cffi -pygithub==1.55 \ - --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ - --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b - # via -r tools/deprecate_version/requirements.txt -pyjwt==2.1.0 \ - --hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \ - --hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130 - # via pygithub -pynacl==1.4.0 \ - --hash=sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4 \ - --hash=sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4 \ - --hash=sha256:2fe0fc5a2480361dcaf4e6e7cea00e078fcda07ba45f811b167e3f99e8cff574 \ - --hash=sha256:30f9b96db44e09b3304f9ea95079b1b7316b2b4f3744fe3aaecccd95d547063d \ - --hash=sha256:4e10569f8cbed81cb7526ae137049759d2a8d57726d52c1a000a3ce366779634 \ - --hash=sha256:511d269ee845037b95c9781aa702f90ccc36036f95d0f31373a6a79bd8242e25 \ - --hash=sha256:537a7ccbea22905a0ab36ea58577b39d1fa9b1884869d173b5cf111f006f689f \ - --hash=sha256:54e9a2c849c742006516ad56a88f5c74bf2ce92c9f67435187c3c5953b346505 \ - --hash=sha256:757250ddb3bff1eecd7e41e65f7f833a8405fede0194319f87899690624f2122 \ - --hash=sha256:7757ae33dae81c300487591c68790dfb5145c7d03324000433d9a2c141f82af7 \ - --hash=sha256:7c6092102219f59ff29788860ccb021e80fffd953920c4a8653889c029b2d420 \ - --hash=sha256:8122ba5f2a2169ca5da936b2e5a511740ffb73979381b4229d9188f6dcb22f1f \ - --hash=sha256:9c4a7ea4fb81536c1b1f5cc44d54a296f96ae78c1ebd2311bd0b60be45a48d96 \ - --hash=sha256:c914f78da4953b33d4685e3cdc7ce63401247a21425c16a39760e282075ac4a6 \ - --hash=sha256:cd401ccbc2a249a47a3a1724c2918fcd04be1f7b54eb2a5a71ff915db0ac51c6 \ - --hash=sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514 \ - --hash=sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff \ - --hash=sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80 - # via pygithub -requests==2.25.1 \ - --hash=sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804 \ - --hash=sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e - # via - # -r tools/deprecate_version/requirements.txt - # pygithub -six==1.16.0 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 - # via pynacl -smmap==4.0.0 \ - --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ - --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 - # via - # -r tools/deprecate_version/requirements.txt - # gitdb -urllib3==1.26.6 \ - --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ - --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f - # via - # -r tools/deprecate_version/requirements.txt - # requests -wrapt==1.12.1 \ - --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 - # via - # -r tools/deprecate_version/requirements.txt - # deprecated From 2635e546751d3812be8b5203a59bfa99c43e118e Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 17 Sep 2021 10:16:53 +0800 Subject: [PATCH 059/121] factories: renaming to mainThreadDispatcher (#18122) factories: renaming to mainThreadDispatcher Also using the new factory API in one more place which I can back out if you prefer. Risk Level: Low Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- .../filters/network/source/config.cc | 4 +- .../network/test/active_message_test.cc | 2 +- .../filters/network/test/conn_manager_test.cc | 6 +-- .../filters/network/test/router_test.cc | 2 +- contrib/sxg/filters/http/test/filter_test.cc | 2 +- envoy/server/factory_context.h | 2 +- envoy/server/health_checker_config.h | 2 +- envoy/server/resource_monitor_config.h | 2 +- envoy/server/transport_socket_config.h | 2 +- source/common/router/config_impl.cc | 2 +- source/common/router/rds_impl.cc | 8 +-- source/common/router/scoped_rds.cc | 4 +- source/common/secret/sds_api.h | 20 +++++--- source/common/upstream/BUILD | 1 + .../common/upstream/cluster_factory_impl.cc | 8 +-- source/common/upstream/cluster_factory_impl.h | 2 +- .../common/upstream/cluster_manager_impl.cc | 44 +++++++++-------- source/common/upstream/cluster_manager_impl.h | 17 +++---- source/common/upstream/eds.cc | 4 +- source/common/upstream/health_checker_impl.cc | 2 +- source/common/upstream/logical_dns_cluster.cc | 4 +- .../common/upstream/original_dst_cluster.cc | 4 +- source/common/upstream/static_cluster.cc | 4 +- source/common/upstream/strict_dns_cluster.cc | 6 +-- source/common/upstream/upstream_impl.cc | 8 +-- .../extensions/access_loggers/wasm/config.cc | 4 +- source/extensions/bootstrap/wasm/config.cc | 8 +-- .../extensions/clusters/aggregate/cluster.cc | 2 +- .../clusters/dynamic_forward_proxy/cluster.cc | 3 +- .../clusters/redis/redis_cluster.cc | 7 +-- .../dynamic_forward_proxy/dns_cache_impl.cc | 2 +- .../http/adaptive_concurrency/config.cc | 2 +- .../http/alternate_protocols_cache/config.cc | 8 +-- .../filters/http/aws_lambda/config.cc | 3 +- .../http/aws_request_signing/config.cc | 2 +- .../extensions/filters/http/dynamo/config.cc | 2 +- .../filters/http/health_check/config.cc | 2 +- .../filters/http/jwt_authn/filter_config.cc | 2 +- .../http/jwt_authn/jwks_async_fetcher.cc | 3 +- .../filters/http/local_ratelimit/config.cc | 5 +- source/extensions/filters/http/lua/config.cc | 2 +- .../extensions/filters/http/lua/lua_filter.cc | 2 +- source/extensions/filters/http/tap/config.cc | 7 +-- .../filters/http/wasm/wasm_filter.cc | 4 +- .../filters/network/client_ssl_auth/config.cc | 2 +- .../filters/network/dubbo_proxy/config.cc | 5 +- .../network/http_connection_manager/config.cc | 8 +-- .../filters/network/local_ratelimit/config.cc | 2 +- .../filters/network/mongo_proxy/config.cc | 5 +- .../filters/network/redis_proxy/config.cc | 2 +- .../filters/network/thrift_proxy/config.cc | 5 +- .../network/thrift_proxy/router/config.cc | 2 +- .../filters/network/wasm/wasm_filter.cc | 4 +- .../filters/network/zookeeper_proxy/config.cc | 2 +- .../health_checkers/redis/config.cc | 4 +- .../injected_resource_monitor.cc | 2 +- source/extensions/stat_sinks/wasm/config.cc | 8 +-- .../transport_sockets/tap/config.cc | 14 +++--- source/server/BUILD | 8 +++ source/server/api_listener_impl.h | 2 +- .../config_validation/cluster_manager.cc | 5 +- source/server/factory_context_base_impl.h | 49 +++++++++++++++++++ source/server/filter_chain_manager_impl.cc | 6 +-- source/server/filter_chain_manager_impl.h | 4 +- source/server/listener_impl.cc | 8 +-- source/server/listener_impl.h | 4 +- source/server/resource_monitor_config_impl.h | 2 +- source/server/server.h | 2 +- source/server/transport_socket_config_impl.h | 2 +- .../common/secret/secret_manager_impl_test.cc | 18 +++---- .../http/jwt_authn/filter_config_test.cc | 3 +- .../filters/http/oauth2/filter_test.cc | 2 +- .../filters/http/wasm/config_test.cc | 2 +- .../network/common/fuzz/uber_readfilter.cc | 2 +- .../network/common/fuzz/uber_writefilter.cc | 2 +- .../filters/network/common/fuzz/utils/fakes.h | 2 +- .../network/dubbo_proxy/conn_manager_test.cc | 2 +- .../config_test_base.h | 2 +- .../filters/network/wasm/config_test.cc | 2 +- .../injected_resource_monitor_test.cc | 2 +- .../tls/context_impl_test.cc | 10 ++-- .../transport_sockets/tls/ssl_socket_test.cc | 4 +- .../clusters/custom_static_cluster.h | 2 +- test/integration/fake_resource_monitor.cc | 2 +- test/mocks/server/factory_context.cc | 2 +- test/mocks/server/factory_context.h | 2 +- .../server/health_checker_factory_context.cc | 2 +- .../server/health_checker_factory_context.h | 2 +- test/mocks/server/instance.cc | 2 +- test/mocks/server/instance.h | 2 +- test/mocks/server/listener_factory_context.cc | 2 +- test/mocks/server/listener_factory_context.h | 2 +- .../server/transport_socket_factory_context.h | 2 +- test/server/overload_manager_impl_test.cc | 2 +- test/tools/router_check/router.cc | 6 +-- 95 files changed, 277 insertions(+), 195 deletions(-) create mode 100644 source/server/factory_context_base_impl.h diff --git a/contrib/rocketmq_proxy/filters/network/source/config.cc b/contrib/rocketmq_proxy/filters/network/source/config.cc index dbb63f91a292a..25d630d6d1239 100644 --- a/contrib/rocketmq_proxy/filters/network/source/config.cc +++ b/contrib/rocketmq_proxy/filters/network/source/config.cc @@ -23,8 +23,8 @@ Network::FilterFactoryCb RocketmqProxyFilterConfigFactory::createFilterFactoryFr Server::Configuration::FactoryContext& context) { std::shared_ptr filter_config = std::make_shared(proto_config, context); return [filter_config, &context](Network::FilterManager& filter_manager) -> void { - filter_manager.addReadFilter( - std::make_shared(*filter_config, context.dispatcher().timeSource())); + filter_manager.addReadFilter(std::make_shared( + *filter_config, context.mainThreadDispatcher().timeSource())); }; } diff --git a/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc b/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc index 8b98a7be887b6..8ac6645efc7b2 100644 --- a/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/active_message_test.cc @@ -24,7 +24,7 @@ class ActiveMessageTest : public testing::Test { ActiveMessageTest() : stats_(RocketmqFilterStats::generateStats("test.", store_)), config_(rocketmq_proxy_config_, factory_context_), - connection_manager_(config_, factory_context_.dispatcher().timeSource()) { + connection_manager_(config_, factory_context_.mainThreadDispatcher().timeSource()) { connection_manager_.initializeReadFilterCallbacks(filter_callbacks_); } diff --git a/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc b/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc index 30b783c4a0d98..e2c7d835c745c 100644 --- a/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/conn_manager_test.cc @@ -54,11 +54,11 @@ class RocketmqConnectionManagerTest : public Event::TestUsingSimulatedTime, publ TestUtility::validate(proto_config_); } config_ = std::make_unique(proto_config_, factory_context_, stats_); - conn_manager_ = - std::make_unique(*config_, factory_context_.dispatcher().timeSource()); + conn_manager_ = std::make_unique( + *config_, factory_context_.mainThreadDispatcher().timeSource()); conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); conn_manager_->onNewConnection(); - current_ = factory_context_.dispatcher().timeSource().monotonicTime(); + current_ = factory_context_.mainThreadDispatcher().timeSource().monotonicTime(); } void initializeCluster() { diff --git a/contrib/rocketmq_proxy/filters/network/test/router_test.cc b/contrib/rocketmq_proxy/filters/network/test/router_test.cc index 3cee446599e4e..681439d9212ae 100644 --- a/contrib/rocketmq_proxy/filters/network/test/router_test.cc +++ b/contrib/rocketmq_proxy/filters/network/test/router_test.cc @@ -25,7 +25,7 @@ class RocketmqRouterTestBase { cluster_info_(std::make_shared()) { context_.cluster_manager_.initializeThreadLocalClusters({"fake_cluster"}); conn_manager_ = - std::make_unique(config_, context_.dispatcher().timeSource()); + std::make_unique(config_, context_.mainThreadDispatcher().timeSource()); conn_manager_->initializeReadFilterCallbacks(filter_callbacks_); } diff --git a/contrib/sxg/filters/http/test/filter_test.cc b/contrib/sxg/filters/http/test/filter_test.cc index 0ec8fedcdd5fe..7853ac0590b9b 100644 --- a/contrib/sxg/filters/http/test/filter_test.cc +++ b/contrib/sxg/filters/http/test/filter_test.cc @@ -365,7 +365,7 @@ TEST_F(FilterTest, SdsDynamicGenericSecret) { NiceMock dispatcher; EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(init_manager, add(_)) diff --git a/envoy/server/factory_context.h b/envoy/server/factory_context.h index 3624f32fe95b6..7a05d09b6ac20 100644 --- a/envoy/server/factory_context.h +++ b/envoy/server/factory_context.h @@ -52,7 +52,7 @@ class FactoryContextBase { * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used * for all singleton processing. */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Event::Dispatcher& mainThreadDispatcher() PURE; /** * @return Api::Api& a reference to the api object. diff --git a/envoy/server/health_checker_config.h b/envoy/server/health_checker_config.h index 00584f5176517..82f27123db8c3 100644 --- a/envoy/server/health_checker_config.h +++ b/envoy/server/health_checker_config.h @@ -28,7 +28,7 @@ class HealthCheckerFactoryContext { * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used * for all singleton processing. */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Event::Dispatcher& mainThreadDispatcher() PURE; /* * @return Upstream::HealthCheckEventLoggerPtr the health check event logger for the diff --git a/envoy/server/resource_monitor_config.h b/envoy/server/resource_monitor_config.h index 18e211e3801f1..9f680f44f8f2a 100644 --- a/envoy/server/resource_monitor_config.h +++ b/envoy/server/resource_monitor_config.h @@ -22,7 +22,7 @@ class ResourceMonitorFactoryContext { * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used * for all singleton processing. */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Event::Dispatcher& mainThreadDispatcher() PURE; /** * @return Server::Options& the command-line options that Envoy was started with. diff --git a/envoy/server/transport_socket_config.h b/envoy/server/transport_socket_config.h index 38308a9f1a642..703116ef250cc 100644 --- a/envoy/server/transport_socket_config.h +++ b/envoy/server/transport_socket_config.h @@ -62,7 +62,7 @@ class TransportSocketFactoryContext { /** * @return Event::Dispatcher& the main thread's dispatcher. */ - virtual Event::Dispatcher& dispatcher() PURE; + virtual Event::Dispatcher& mainThreadDispatcher() PURE; /** * @return Server::Options& the command-line options that Envoy was started with. diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index adf6b70edeaba..e0f9f4245521f 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -389,7 +389,7 @@ RouteEntryImplBase::RouteEntryImplBase(const VirtualHostImpl& vhost, vhost_.globalRouteConfig().maxDirectResponseBodySizeBytes())), per_filter_configs_(route.typed_per_filter_config(), optional_http_filters, factory_context, validator), - route_name_(route.name()), time_source_(factory_context.dispatcher().timeSource()) { + route_name_(route.name()), time_source_(factory_context.mainThreadDispatcher().timeSource()) { if (route.route().has_metadata_match()) { const auto filter_it = route.route().metadata_match().filter_metadata().find( Envoy::Config::MetadataFilters::get().ENVOY_LB); diff --git a/source/common/router/rds_impl.cc b/source/common/router/rds_impl.cc index 049e0f752fded..62f47d9221a11 100644 --- a/source/common/router/rds_impl.cc +++ b/source/common/router/rds_impl.cc @@ -309,9 +309,11 @@ void RdsRouteConfigProviderImpl::requestVirtualHostsUpdate( // execute the callback. still_alive shared_ptr will be deallocated when the current instance of // the RdsRouteConfigProviderImpl is deallocated; we rely on a weak_ptr to still_alive flag to // determine if the RdsRouteConfigProviderImpl instance is still valid. - factory_context_.dispatcher().post([this, maybe_still_alive = std::weak_ptr(still_alive_), - alias, &thread_local_dispatcher, - route_config_updated_cb]() -> void { + factory_context_.mainThreadDispatcher().post([this, + maybe_still_alive = + std::weak_ptr(still_alive_), + alias, &thread_local_dispatcher, + route_config_updated_cb]() -> void { if (maybe_still_alive.lock()) { subscription_->updateOnDemand(alias); config_update_callbacks_.push_back({alias, thread_local_dispatcher, route_config_updated_cb}); diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index eac4557b0ceb8..bbf3bec0e716c 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -487,8 +487,8 @@ void ScopedRdsConfigSubscription::onDemandRdsUpdate( std::shared_ptr scope_key, Event::Dispatcher& thread_local_dispatcher, Http::RouteConfigUpdatedCallback&& route_config_updated_cb, std::weak_ptr weak_subscription) { - factory_context_.dispatcher().post([this, &thread_local_dispatcher, scope_key, - route_config_updated_cb, weak_subscription]() { + factory_context_.mainThreadDispatcher().post([this, &thread_local_dispatcher, scope_key, + route_config_updated_cb, weak_subscription]() { // If the subscription has been destroyed, return immediately. if (!weak_subscription.lock()) { thread_local_dispatcher.post([route_config_updated_cb] { route_config_updated_cb(false); }); diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index f2ed3301ab61a..110e5acce70b6 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -140,9 +140,10 @@ class TlsCertificateSdsApi : public SdsApi, public TlsCertificateConfigProvider Config::Utility::checkLocalInfo("TlsCertificateSdsApi", secret_provider_context.localInfo()); return std::make_shared( sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), - secret_provider_context.dispatcher().timeSource(), + secret_provider_context.mainThreadDispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); + destructor_cb, secret_provider_context.mainThreadDispatcher(), + secret_provider_context.api()); } TlsCertificateSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, @@ -226,9 +227,10 @@ class CertificateValidationContextSdsApi : public SdsApi, secret_provider_context.localInfo()); return std::make_shared( sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), - secret_provider_context.dispatcher().timeSource(), + secret_provider_context.mainThreadDispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); + destructor_cb, secret_provider_context.mainThreadDispatcher(), + secret_provider_context.api()); } CertificateValidationContextSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, const std::string& sds_config_name, @@ -320,9 +322,10 @@ class TlsSessionTicketKeysSdsApi : public SdsApi, public TlsSessionTicketKeysCon secret_provider_context.localInfo()); return std::make_shared( sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), - secret_provider_context.dispatcher().timeSource(), + secret_provider_context.mainThreadDispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); + destructor_cb, secret_provider_context.mainThreadDispatcher(), + secret_provider_context.api()); } TlsSessionTicketKeysSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, @@ -392,9 +395,10 @@ class GenericSecretSdsApi : public SdsApi, public GenericSecretConfigProvider { Config::Utility::checkLocalInfo("GenericSecretSdsApi", secret_provider_context.localInfo()); return std::make_shared( sds_config, sds_config_name, secret_provider_context.clusterManager().subscriptionFactory(), - secret_provider_context.dispatcher().timeSource(), + secret_provider_context.mainThreadDispatcher().timeSource(), secret_provider_context.messageValidationVisitor(), secret_provider_context.stats(), - destructor_cb, secret_provider_context.dispatcher(), secret_provider_context.api()); + destructor_cb, secret_provider_context.mainThreadDispatcher(), + secret_provider_context.api()); } GenericSecretSdsApi(const envoy::config::core::v3::ConfigSource& sds_config, diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index eeed927f16837..aca75b4cf2b61 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -87,6 +87,7 @@ envoy_cc_library( "//source/common/upstream:priority_conn_pool_map_impl_lib", "//source/common/upstream:upstream_lib", "//source/common/quic:quic_stat_names_lib", + "//source/server:factory_context_base_impl_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index 7425d9e546fb6..f3f2f2f06e77f 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -113,7 +113,7 @@ ClusterFactoryImplBase::selectDnsResolver(const envoy::config::cluster::v3::Clus resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); } } - return context.dispatcher().createDnsResolver(resolvers, dns_resolver_options); + return context.mainThreadDispatcher().createDnsResolver(resolvers, dns_resolver_options); } return context.dnsResolver(); @@ -127,7 +127,7 @@ ClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluste transport_factory_context = std::make_unique( context.admin(), context.sslContextManager(), *stats_scope, context.clusterManager(), - context.localInfo(), context.dispatcher(), context.stats(), + context.localInfo(), context.mainThreadDispatcher(), context.stats(), context.singletonManager(), context.threadLocal(), context.messageValidationVisitor(), context.api(), context.options()); @@ -141,13 +141,13 @@ ClusterFactoryImplBase::create(const envoy::config::cluster::v3::Cluster& cluste } else { new_cluster_pair.first->setHealthChecker(HealthCheckerFactory::create( cluster.health_checks()[0], *new_cluster_pair.first, context.runtime(), - context.dispatcher(), context.logManager(), context.messageValidationVisitor(), + context.mainThreadDispatcher(), context.logManager(), context.messageValidationVisitor(), context.api())); } } new_cluster_pair.first->setOutlierDetector(Outlier::DetectorImplFactory::createForCluster( - *new_cluster_pair.first, cluster, context.dispatcher(), context.runtime(), + *new_cluster_pair.first, cluster, context.mainThreadDispatcher(), context.runtime(), context.outlierEventLogger())); new_cluster_pair.first->setTransportFactoryContext(std::move(transport_factory_context)); diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h index 1d7f3a6bc94cb..1ff81b8503285 100644 --- a/source/common/upstream/cluster_factory_impl.h +++ b/source/common/upstream/cluster_factory_impl.h @@ -74,7 +74,7 @@ class ClusterFactoryContextImpl : public ClusterFactoryContext { Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; } Ssl::ContextManager& sslContextManager() override { return ssl_context_manager_; } Runtime::Loader& runtime() override { return runtime_; } - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } AccessLog::AccessLogManager& logManager() override { return log_manager_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } const Server::Options& options() override { return options_; } diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index d8d7507fd7948..e41451f8658d9 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1643,8 +1643,9 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::tcpConnPoolIsIdle( ClusterManagerPtr ProdClusterManagerFactory::clusterManagerFromProto( const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { return ClusterManagerPtr{new ClusterManagerImpl( - bootstrap, *this, stats_, tls_, runtime_, local_info_, log_manager_, main_thread_dispatcher_, - admin_, validation_context_, api_, http_context_, grpc_context_, router_context_)}; + bootstrap, *this, stats_, tls_, context_.runtime(), context_.localInfo(), log_manager_, + context_.mainThreadDispatcher(), context_.admin(), validation_context_, context_.api(), + http_context_, grpc_context_, router_context_)}; } Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( @@ -1655,7 +1656,8 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, TimeSource& source, ClusterConnectivityState& state) { - if (protocols.size() == 3 && runtime_.snapshot().featureEnabled("upstream.use_http3", 100)) { + if (protocols.size() == 3 && + context_.runtime().snapshot().featureEnabled("upstream.use_http3", 100)) { ASSERT(contains(protocols, {Http::Protocol::Http11, Http::Protocol::Http2, Http::Protocol::Http3})); Http::AlternateProtocolsCacheSharedPtr alternate_protocols_cache; @@ -1667,9 +1669,9 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( // TODO(RyanTheOptimist): Plumb an actual alternate protocols cache. Envoy::Http::ConnectivityGrid::ConnectivityOptions coptions{protocols}; return std::make_unique( - dispatcher, api_.randomGenerator(), host, priority, options, transport_socket_options, - state, source, alternate_protocols_cache, std::chrono::milliseconds(300), coptions, - quic_stat_names_, stats_); + dispatcher, context_.api().randomGenerator(), host, priority, options, + transport_socket_options, state, source, alternate_protocols_cache, + std::chrono::milliseconds(300), coptions, quic_stat_names_, stats_); #else // Should be blocked by configuration checking at an earlier point. NOT_REACHED_GCOVR_EXCL_LINE; @@ -1677,20 +1679,20 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( } if (protocols.size() >= 2) { ASSERT(contains(protocols, {Http::Protocol::Http11, Http::Protocol::Http2})); - return std::make_unique(dispatcher, api_.randomGenerator(), host, - priority, options, - transport_socket_options, state); + return std::make_unique( + dispatcher, context_.api().randomGenerator(), host, priority, options, + transport_socket_options, state); } if (protocols.size() == 1 && protocols[0] == Http::Protocol::Http2 && - runtime_.snapshot().featureEnabled("upstream.use_http2", 100)) { - return Http::Http2::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority, - options, transport_socket_options, state); + context_.runtime().snapshot().featureEnabled("upstream.use_http2", 100)) { + return Http::Http2::allocateConnPool(dispatcher, context_.api().randomGenerator(), host, + priority, options, transport_socket_options, state); } if (protocols.size() == 1 && protocols[0] == Http::Protocol::Http3 && - runtime_.snapshot().featureEnabled("upstream.use_http3", 100)) { + context_.runtime().snapshot().featureEnabled("upstream.use_http3", 100)) { #ifdef ENVOY_ENABLE_QUIC - return Http::Http3::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority, - options, transport_socket_options, state, source, + return Http::Http3::allocateConnPool(dispatcher, context_.api().randomGenerator(), host, + priority, options, transport_socket_options, state, source, quic_stat_names_, stats_); #else UNREFERENCED_PARAMETER(source); @@ -1699,8 +1701,8 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( #endif } ASSERT(protocols.size() == 1 && protocols[0] == Http::Protocol::Http11); - return Http::Http1::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority, options, - transport_socket_options, state); + return Http::Http1::allocateConnPool(dispatcher, context_.api().randomGenerator(), host, priority, + options, transport_socket_options, state); } Tcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool( @@ -1722,12 +1724,12 @@ std::pair ProdClusterManagerFactor const envoy::config::cluster::v3::Cluster& cluster, ClusterManager& cm, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) { return ClusterFactoryImplBase::create( - cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, - main_thread_dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, - outlier_event_logger, added_via_api, + cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, context_.runtime(), + context_.mainThreadDispatcher(), log_manager_, context_.localInfo(), admin_, + singleton_manager_, outlier_event_logger, added_via_api, added_via_api ? validation_context_.dynamicValidationVisitor() : validation_context_.staticValidationVisitor(), - api_, options_); + context_.api(), context_.options()); } CdsApiPtr diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index f96b417d9ae61..949d62ef5cd20 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -36,6 +36,7 @@ #include "source/common/upstream/load_stats_reporter.h" #include "source/common/upstream/priority_conn_pool_map.h" #include "source/common/upstream/upstream_impl.h" +#include "source/server/factory_context_base_impl.h" namespace Envoy { namespace Upstream { @@ -54,12 +55,13 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { Http::Context& http_context, Grpc::Context& grpc_context, Router::Context& router_context, AccessLog::AccessLogManager& log_manager, Singleton::Manager& singleton_manager, const Server::Options& options, Quic::QuicStatNames& quic_stat_names) - : main_thread_dispatcher_(main_thread_dispatcher), validation_context_(validation_context), - api_(api), http_context_(http_context), grpc_context_(grpc_context), - router_context_(router_context), admin_(admin), runtime_(runtime), stats_(stats), tls_(tls), - dns_resolver_(dns_resolver), ssl_context_manager_(ssl_context_manager), + : context_(options, main_thread_dispatcher, api, local_info, admin, runtime, + singleton_manager, validation_context.staticValidationVisitor(), stats, tls), + validation_context_(validation_context), http_context_(http_context), + grpc_context_(grpc_context), router_context_(router_context), admin_(admin), stats_(stats), + tls_(tls), dns_resolver_(dns_resolver), ssl_context_manager_(ssl_context_manager), local_info_(local_info), secret_manager_(secret_manager), log_manager_(log_manager), - singleton_manager_(singleton_manager), options_(options), quic_stat_names_(quic_stat_names), + singleton_manager_(singleton_manager), quic_stat_names_(quic_stat_names), alternate_protocols_cache_manager_factory_(singleton_manager, main_thread_dispatcher.timeSource(), tls_), alternate_protocols_cache_manager_(alternate_protocols_cache_manager_factory_.get()) {} @@ -91,14 +93,12 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { Singleton::Manager& singletonManager() override { return singleton_manager_; } protected: - Event::Dispatcher& main_thread_dispatcher_; + Server::FactoryContextBaseImpl context_; ProtobufMessage::ValidationContext& validation_context_; - Api::Api& api_; Http::Context& http_context_; Grpc::Context& grpc_context_; Router::Context& router_context_; Server::Admin& admin_; - Runtime::Loader& runtime_; Stats::Store& stats_; ThreadLocal::Instance& tls_; Network::DnsResolverSharedPtr dns_resolver_; @@ -107,7 +107,6 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { Secret::SecretManager& secret_manager_; AccessLog::AccessLogManager& log_manager_; Singleton::Manager& singleton_manager_; - const Server::Options& options_; Quic::QuicStatNames& quic_stat_names_; Http::AlternateProtocolsCacheManagerFactoryImpl alternate_protocols_cache_manager_factory_; Http::AlternateProtocolsCacheManagerSharedPtr alternate_protocols_cache_manager_; diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index b25bdc07e5824..b95df7924a194 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -18,14 +18,14 @@ EdsClusterImpl::EdsClusterImpl( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, factory_context.mainThreadDispatcher().timeSource()), Envoy::Config::SubscriptionBase( factory_context.messageValidationVisitor(), "cluster_name"), local_info_(factory_context.localInfo()), cluster_name_(cluster.eds_cluster_config().service_name().empty() ? cluster.name() : cluster.eds_cluster_config().service_name()) { - Event::Dispatcher& dispatcher = factory_context.dispatcher(); + Event::Dispatcher& dispatcher = factory_context.mainThreadDispatcher(); assignment_timeout_ = dispatcher.createTimer([this]() -> void { onAssignmentTimeout(); }); const auto& eds_config = cluster.eds_cluster_config().eds_config(); if (eds_config.config_source_specifier_case() == diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 2e7a1689b5191..09d8ebeca47e8 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -69,7 +69,7 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec } Upstream::Cluster& cluster() override { return cluster_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } HealthCheckEventLoggerPtr eventLogger() override { return std::move(event_logger_); } ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { return validation_visitor_; diff --git a/source/common/upstream/logical_dns_cluster.cc b/source/common/upstream/logical_dns_cluster.cc index ca9c9a809664b..c51bc3c94386d 100644 --- a/source/common/upstream/logical_dns_cluster.cc +++ b/source/common/upstream/logical_dns_cluster.cc @@ -49,13 +49,13 @@ LogicalDnsCluster::LogicalDnsCluster( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, - factory_context.dispatcher().timeSource()), + factory_context.mainThreadDispatcher().timeSource()), dns_resolver_(dns_resolver), dns_refresh_rate_ms_( std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(cluster, dns_refresh_rate, 5000))), respect_dns_ttl_(cluster.respect_dns_ttl()), resolve_timer_( - factory_context.dispatcher().createTimer([this]() -> void { startResolve(); })), + factory_context.mainThreadDispatcher().createTimer([this]() -> void { startResolve(); })), local_info_(factory_context.localInfo()), load_assignment_(convertPriority(cluster.load_assignment())) { failure_backoff_strategy_ = diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index 9cf6887aff530..00561a544a165 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -110,8 +110,8 @@ OriginalDstCluster::OriginalDstCluster( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : ClusterImplBase(config, runtime, factory_context, std::move(stats_scope), added_via_api, - factory_context.dispatcher().timeSource()), - dispatcher_(factory_context.dispatcher()), + factory_context.mainThreadDispatcher().timeSource()), + dispatcher_(factory_context.mainThreadDispatcher()), cleanup_interval_ms_( std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, cleanup_interval, 5000))), cleanup_timer_(dispatcher_.createTimer([this]() -> void { cleanup(); })), diff --git a/source/common/upstream/static_cluster.cc b/source/common/upstream/static_cluster.cc index a8741f7a8f592..d8ce7b1a6ea02 100644 --- a/source/common/upstream/static_cluster.cc +++ b/source/common/upstream/static_cluster.cc @@ -12,7 +12,7 @@ StaticClusterImpl::StaticClusterImpl( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, - factory_context.dispatcher().timeSource()), + factory_context.mainThreadDispatcher().timeSource()), priority_state_manager_( new PriorityStateManager(*this, factory_context.localInfo(), nullptr)) { const envoy::config::endpoint::v3::ClusterLoadAssignment& cluster_load_assignment = @@ -20,7 +20,7 @@ StaticClusterImpl::StaticClusterImpl( overprovisioning_factor_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( cluster_load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); - Event::Dispatcher& dispatcher = factory_context.dispatcher(); + Event::Dispatcher& dispatcher = factory_context.mainThreadDispatcher(); for (const auto& locality_lb_endpoint : cluster_load_assignment.endpoints()) { validateEndpointsForZoneAwareRouting(locality_lb_endpoint); diff --git a/source/common/upstream/strict_dns_cluster.cc b/source/common/upstream/strict_dns_cluster.cc index 0d03ea0a00f63..44cb3e83485a1 100644 --- a/source/common/upstream/strict_dns_cluster.cc +++ b/source/common/upstream/strict_dns_cluster.cc @@ -14,7 +14,7 @@ StrictDnsClusterImpl::StrictDnsClusterImpl( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, factory_context.mainThreadDispatcher().timeSource()), load_assignment_(cluster.load_assignment()), local_info_(factory_context.localInfo()), dns_resolver_(dns_resolver), dns_refresh_rate_ms_( @@ -37,8 +37,8 @@ StrictDnsClusterImpl::StrictDnsClusterImpl( const std::string& url = fmt::format("tcp://{}:{}", socket_address.address(), socket_address.port_value()); - resolve_targets.emplace_back(new ResolveTarget(*this, factory_context.dispatcher(), url, - locality_lb_endpoint, lb_endpoint)); + resolve_targets.emplace_back(new ResolveTarget(*this, factory_context.mainThreadDispatcher(), + url, locality_lb_endpoint, lb_endpoint)); } } resolve_targets_ = std::move(resolve_targets); diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 2eec93be78914..6bb9a4ffc2cbf 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -677,12 +677,12 @@ class FactoryContextImpl : public Server::Configuration::CommonFactoryContext { FactoryContextImpl(Stats::Scope& stats_scope, Envoy::Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContext& c) : admin_(c.admin()), stats_scope_(stats_scope), cluster_manager_(c.clusterManager()), - local_info_(c.localInfo()), dispatcher_(c.dispatcher()), runtime_(runtime), + local_info_(c.localInfo()), dispatcher_(c.mainThreadDispatcher()), runtime_(runtime), singleton_manager_(c.singletonManager()), tls_(c.threadLocal()), api_(c.api()), options_(c.options()), message_validation_visitor_(c.messageValidationVisitor()) {} Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } const Server::Options& options() override { return options_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } Envoy::Runtime::Loader& runtime() override { return runtime_; } @@ -1017,14 +1017,14 @@ ClusterImplBase::ClusterImplBase( local_cluster_(factory_context.clusterManager().localClusterName().value_or("") == cluster.name()), const_metadata_shared_pool_(Config::Metadata::getConstMetadataSharedPool( - factory_context.singletonManager(), factory_context.dispatcher())) { + factory_context.singletonManager(), factory_context.mainThreadDispatcher())) { factory_context.setInitManager(init_manager_); auto socket_factory = createTransportSocketFactory(cluster, factory_context); auto* raw_factory_pointer = socket_factory.get(); auto socket_matcher = std::make_unique( cluster.transport_socket_matches(), factory_context, socket_factory, *stats_scope); - auto& dispatcher = factory_context.dispatcher(); + auto& dispatcher = factory_context.mainThreadDispatcher(); info_ = std::shared_ptr( new ClusterInfoImpl(cluster, factory_context.clusterManager().bindConfig(), runtime, std::move(socket_matcher), std::move(stats_scope), added_via_api, diff --git a/source/extensions/access_loggers/wasm/config.cc b/source/extensions/access_loggers/wasm/config.cc index 721b0c062f46a..467398b7369ac 100644 --- a/source/extensions/access_loggers/wasm/config.cc +++ b/source/extensions/access_loggers/wasm/config.cc @@ -41,8 +41,8 @@ AccessLog::InstanceSharedPtr WasmAccessLogFactory::createAccessLogInstance( }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { throw Common::Wasm::WasmException( fmt::format("Unable to create Wasm access log {}", plugin->name_)); diff --git a/source/extensions/bootstrap/wasm/config.cc b/source/extensions/bootstrap/wasm/config.cc index e87ddecebd34f..4f34f7e88dab4 100644 --- a/source/extensions/bootstrap/wasm/config.cc +++ b/source/extensions/bootstrap/wasm/config.cc @@ -32,8 +32,8 @@ void WasmServiceExtension::createWasm(Server::Configuration::ServerFactoryContex if (config_.singleton()) { // Return a Wasm VM which will be stored as a singleton by the Server. wasm_service_ = std::make_unique( - plugin, - Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, context.dispatcher())); + plugin, Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, + context.mainThreadDispatcher())); return; } // Per-thread WASM VM. @@ -49,8 +49,8 @@ void WasmServiceExtension::createWasm(Server::Configuration::ServerFactoryContex }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { // NB: throw if we get a synchronous configuration failures as this is how such failures are // reported to xDS. diff --git a/source/extensions/clusters/aggregate/cluster.cc b/source/extensions/clusters/aggregate/cluster.cc index c7f21b5abdbd3..6e6b7ab77cab3 100644 --- a/source/extensions/clusters/aggregate/cluster.cc +++ b/source/extensions/clusters/aggregate/cluster.cc @@ -19,7 +19,7 @@ Cluster::Cluster(const envoy::config::cluster::v3::Cluster& cluster, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : Upstream::ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, factory_context.mainThreadDispatcher().timeSource()), cluster_manager_(cluster_manager), runtime_(runtime), random_(random), clusters_(std::make_shared(config.clusters().begin(), config.clusters().end())) {} diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index 2bef1a7f333b8..62209fbd8b98c 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -22,7 +22,8 @@ Cluster::Cluster( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : Upstream::BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, + factory_context.mainThreadDispatcher().timeSource()), dns_cache_manager_(cache_manager_factory.get()), dns_cache_(dns_cache_manager_->getCache(config.dns_cache_config())), update_callbacks_handle_(dns_cache_->addUpdateCallbacks(*this)), local_info_(local_info) {} diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index 59d6f7ad0d3ca..e497a62a9ccf7 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -25,7 +25,8 @@ RedisCluster::RedisCluster( Stats::ScopePtr&& stats_scope, bool added_via_api, ClusterSlotUpdateCallBackSharedPtr lb_factory) : Upstream::BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api, factory_context.dispatcher().timeSource()), + added_via_api, + factory_context.mainThreadDispatcher().timeSource()), cluster_manager_(cluster_manager), cluster_refresh_rate_(std::chrono::milliseconds( PROTOBUF_GET_MS_OR_DEFAULT(redis_cluster, cluster_refresh_rate, 5000))), @@ -37,7 +38,7 @@ RedisCluster::RedisCluster( PROTOBUF_GET_WRAPPED_OR_DEFAULT(redis_cluster, redirect_refresh_threshold, 5)), failure_refresh_threshold_(redis_cluster.failure_refresh_threshold()), host_degraded_refresh_threshold_(redis_cluster.host_degraded_refresh_threshold()), - dispatcher_(factory_context.dispatcher()), dns_resolver_(std::move(dns_resolver)), + dispatcher_(factory_context.mainThreadDispatcher()), dns_resolver_(std::move(dns_resolver)), dns_lookup_family_(Upstream::getDnsLookupFamilyFromCluster(cluster)), load_assignment_(cluster.load_assignment()), local_info_(factory_context.localInfo()), random_(api.randomGenerator()), redis_discovery_session_(*this, redis_client_factory), @@ -48,7 +49,7 @@ RedisCluster::RedisCluster( NetworkFilters::RedisProxy::ProtocolOptionsConfigImpl::authPassword(info(), api)), cluster_name_(cluster.name()), refresh_manager_(Common::Redis::getClusterRefreshManager( - factory_context.singletonManager(), factory_context.dispatcher(), + factory_context.singletonManager(), factory_context.mainThreadDispatcher(), factory_context.clusterManager(), factory_context.api().timeSource())), registration_handle_(refresh_manager_->registerCluster( cluster_name_, redirect_refresh_interval_, redirect_refresh_threshold_, diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index edadd96bafd95..15f1fa6a9b07a 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -19,7 +19,7 @@ namespace DynamicForwardProxy { DnsCacheImpl::DnsCacheImpl( Server::Configuration::FactoryContextBase& context, const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) - : main_thread_dispatcher_(context.dispatcher()), + : main_thread_dispatcher_(context.mainThreadDispatcher()), dns_lookup_family_(Upstream::getDnsLookupFamilyFromEnum(config.dns_lookup_family())), resolver_(selectDnsResolver(config, main_thread_dispatcher_)), tls_slot_(context.threadLocal()), diff --git a/source/extensions/filters/http/adaptive_concurrency/config.cc b/source/extensions/filters/http/adaptive_concurrency/config.cc index 5abc39bf6fd97..ed2182bafcde4 100644 --- a/source/extensions/filters/http/adaptive_concurrency/config.cc +++ b/source/extensions/filters/http/adaptive_concurrency/config.cc @@ -25,7 +25,7 @@ Http::FilterFactoryCb AdaptiveConcurrencyFilterFactory::createFilterFactoryFromP auto gradient_controller_config = Controller::GradientControllerConfig(config.gradient_controller_config(), context.runtime()); controller = std::make_shared( - std::move(gradient_controller_config), context.dispatcher(), context.runtime(), + std::move(gradient_controller_config), context.mainThreadDispatcher(), context.runtime(), acc_stats_prefix + "gradient_controller.", context.scope(), context.api().randomGenerator(), context.timeSource()); diff --git a/source/extensions/filters/http/alternate_protocols_cache/config.cc b/source/extensions/filters/http/alternate_protocols_cache/config.cc index 61327b29b1d92..dea1f9904b0a5 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/config.cc +++ b/source/extensions/filters/http/alternate_protocols_cache/config.cc @@ -16,9 +16,11 @@ Http::FilterFactoryCb AlternateProtocolsCacheFilterFactory::createFilterFactoryF proto_config, const std::string&, Server::Configuration::FactoryContext& context) { Http::AlternateProtocolsCacheManagerFactoryImpl alternate_protocol_cache_manager_factory( - context.singletonManager(), context.dispatcher().timeSource(), context.threadLocal()); - FilterConfigSharedPtr filter_config(std::make_shared( - proto_config, alternate_protocol_cache_manager_factory, context.dispatcher().timeSource())); + context.singletonManager(), context.mainThreadDispatcher().timeSource(), + context.threadLocal()); + FilterConfigSharedPtr filter_config( + std::make_shared(proto_config, alternate_protocol_cache_manager_factory, + context.mainThreadDispatcher().timeSource())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamEncoderFilter(std::make_shared(filter_config)); }; diff --git a/source/extensions/filters/http/aws_lambda/config.cc b/source/extensions/filters/http/aws_lambda/config.cc index a538f07847ddd..a3b1838ff1949 100644 --- a/source/extensions/filters/http/aws_lambda/config.cc +++ b/source/extensions/filters/http/aws_lambda/config.cc @@ -47,7 +47,8 @@ Http::FilterFactoryCb AwsLambdaFilterFactory::createFilterFactoryFromProtoTyped( } const std::string region = arn->region(); auto signer = std::make_shared( - service_name, region, std::move(credentials_provider), context.dispatcher().timeSource()); + service_name, region, std::move(credentials_provider), + context.mainThreadDispatcher().timeSource()); FilterSettings filter_settings{*arn, getInvocationMode(proto_config), proto_config.payload_passthrough()}; diff --git a/source/extensions/filters/http/aws_request_signing/config.cc b/source/extensions/filters/http/aws_request_signing/config.cc index 02408ec90a091..0d2e9bc97c1f4 100644 --- a/source/extensions/filters/http/aws_request_signing/config.cc +++ b/source/extensions/filters/http/aws_request_signing/config.cc @@ -23,7 +23,7 @@ Http::FilterFactoryCb AwsRequestSigningFilterFactory::createFilterFactoryFromPro context.api(), Extensions::Common::Aws::Utility::metadataFetcher); auto signer = std::make_unique( config.service_name(), config.region(), credentials_provider, - context.dispatcher().timeSource()); + context.mainThreadDispatcher().timeSource()); auto filter_config = std::make_shared(std::move(signer), stats_prefix, context.scope(), diff --git a/source/extensions/filters/http/dynamo/config.cc b/source/extensions/filters/http/dynamo/config.cc index 0850dc9b70836..eaf33d18e73f2 100644 --- a/source/extensions/filters/http/dynamo/config.cc +++ b/source/extensions/filters/http/dynamo/config.cc @@ -19,7 +19,7 @@ Http::FilterFactoryCb DynamoFilterConfig::createFilterFactoryFromProtoTyped( auto stats = std::make_shared(context.scope(), stats_prefix); return [&context, stats](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared( - context.runtime(), stats, context.dispatcher().timeSource())); + context.runtime(), stats, context.mainThreadDispatcher().timeSource())); }; } diff --git a/source/extensions/filters/http/health_check/config.cc b/source/extensions/filters/http/health_check/config.cc index faad4fce070c3..3cf363721e65b 100644 --- a/source/extensions/filters/http/health_check/config.cc +++ b/source/extensions/filters/http/health_check/config.cc @@ -33,7 +33,7 @@ Http::FilterFactoryCb HealthCheckFilterConfig::createFilterFactoryFromProtoTyped HealthCheckCacheManagerSharedPtr cache_manager; if (cache_time_ms > 0) { cache_manager = std::make_shared( - context.dispatcher(), std::chrono::milliseconds(cache_time_ms)); + context.mainThreadDispatcher(), std::chrono::milliseconds(cache_time_ms)); } ClusterMinHealthyPercentagesConstSharedPtr cluster_min_healthy_percentages; diff --git a/source/extensions/filters/http/jwt_authn/filter_config.cc b/source/extensions/filters/http/jwt_authn/filter_config.cc index 8d2874290678f..8ead6c17fd52a 100644 --- a/source/extensions/filters/http/jwt_authn/filter_config.cc +++ b/source/extensions/filters/http/jwt_authn/filter_config.cc @@ -15,7 +15,7 @@ FilterConfigImpl::FilterConfigImpl( envoy::extensions::filters::http::jwt_authn::v3::JwtAuthentication proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) : proto_config_(std::move(proto_config)), stats_(generateStats(stats_prefix, context.scope())), - cm_(context.clusterManager()), time_source_(context.dispatcher().timeSource()) { + cm_(context.clusterManager()), time_source_(context.mainThreadDispatcher().timeSource()) { ENVOY_LOG(debug, "Loaded JwtAuthConfig: {}", proto_config_.DebugString()); diff --git a/source/extensions/filters/http/jwt_authn/jwks_async_fetcher.cc b/source/extensions/filters/http/jwt_authn/jwks_async_fetcher.cc index 9521fc214cb63..5e1acb2763705 100644 --- a/source/extensions/filters/http/jwt_authn/jwks_async_fetcher.cc +++ b/source/extensions/filters/http/jwt_authn/jwks_async_fetcher.cc @@ -28,7 +28,8 @@ JwksAsyncFetcher::JwksAsyncFetcher(const RemoteJwks& remote_jwks, return; } - cache_duration_timer_ = context_.dispatcher().createTimer([this]() -> void { fetch(); }); + cache_duration_timer_ = + context_.mainThreadDispatcher().createTimer([this]() -> void { fetch(); }); // For fast_listener, just trigger a fetch, not register with init_manager. if (remote_jwks_.async_fetch().fast_listener()) { diff --git a/source/extensions/filters/http/local_ratelimit/config.cc b/source/extensions/filters/http/local_ratelimit/config.cc index 15c7107276960..d70e8f8aac49c 100644 --- a/source/extensions/filters/http/local_ratelimit/config.cc +++ b/source/extensions/filters/http/local_ratelimit/config.cc @@ -16,7 +16,8 @@ Http::FilterFactoryCb LocalRateLimitFilterConfig::createFilterFactoryFromProtoTy const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config, const std::string&, Server::Configuration::FactoryContext& context) { FilterConfigSharedPtr filter_config = std::make_shared( - proto_config, context.localInfo(), context.dispatcher(), context.scope(), context.runtime()); + proto_config, context.localInfo(), context.mainThreadDispatcher(), context.scope(), + context.runtime()); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared(filter_config)); }; @@ -27,7 +28,7 @@ LocalRateLimitFilterConfig::createRouteSpecificFilterConfigTyped( const envoy::extensions::filters::http::local_ratelimit::v3::LocalRateLimit& proto_config, Server::Configuration::ServerFactoryContext& context, ProtobufMessage::ValidationVisitor&) { return std::make_shared(proto_config, context.localInfo(), - context.dispatcher(), context.scope(), + context.mainThreadDispatcher(), context.scope(), context.runtime(), true); } diff --git a/source/extensions/filters/http/lua/config.cc b/source/extensions/filters/http/lua/config.cc index 6ed6ba6ddcd98..822eecc2c773a 100644 --- a/source/extensions/filters/http/lua/config.cc +++ b/source/extensions/filters/http/lua/config.cc @@ -16,7 +16,7 @@ Http::FilterFactoryCb LuaFilterConfig::createFilterFactoryFromProtoTyped( Server::Configuration::FactoryContext& context) { FilterConfigConstSharedPtr filter_config(new FilterConfig{ proto_config, context.threadLocal(), context.clusterManager(), context.api()}); - auto& time_source = context.dispatcher().timeSource(); + auto& time_source = context.mainThreadDispatcher().timeSource(); return [filter_config, &time_source](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared(filter_config, time_source)); }; diff --git a/source/extensions/filters/http/lua/lua_filter.cc b/source/extensions/filters/http/lua/lua_filter.cc index 095f9e7bc7738..54decd6dd467a 100644 --- a/source/extensions/filters/http/lua/lua_filter.cc +++ b/source/extensions/filters/http/lua/lua_filter.cc @@ -699,7 +699,7 @@ FilterConfig::FilterConfig(const envoy::extensions::filters::http::lua::v3::Lua& FilterConfigPerRoute::FilterConfigPerRoute( const envoy::extensions::filters::http::lua::v3::LuaPerRoute& config, Server::Configuration::ServerFactoryContext& context) - : main_thread_dispatcher_(context.dispatcher()), disabled_(config.disabled()), + : main_thread_dispatcher_(context.mainThreadDispatcher()), disabled_(config.disabled()), name_(config.name()) { if (disabled_ || !name_.empty()) { return; diff --git a/source/extensions/filters/http/tap/config.cc b/source/extensions/filters/http/tap/config.cc index ab992a89a40ce..5c573924f768e 100644 --- a/source/extensions/filters/http/tap/config.cc +++ b/source/extensions/filters/http/tap/config.cc @@ -26,9 +26,10 @@ class HttpTapConfigFactoryImpl : public Extensions::Common::Tap::TapConfigFactor Http::FilterFactoryCb TapFilterFactory::createFilterFactoryFromProtoTyped( const envoy::extensions::filters::http::tap::v3::Tap& proto_config, const std::string& stats_prefix, Server::Configuration::FactoryContext& context) { - FilterConfigSharedPtr filter_config(new FilterConfigImpl( - proto_config, stats_prefix, std::make_unique(), context.scope(), - context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher())); + FilterConfigSharedPtr filter_config( + new FilterConfigImpl(proto_config, stats_prefix, std::make_unique(), + context.scope(), context.admin(), context.singletonManager(), + context.threadLocal(), context.mainThreadDispatcher())); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { auto filter = std::make_shared(filter_config); callbacks.addStreamFilter(filter); diff --git a/source/extensions/filters/http/wasm/wasm_filter.cc b/source/extensions/filters/http/wasm/wasm_filter.cc index 4296be89d40b3..75e06e69b735a 100644 --- a/source/extensions/filters/http/wasm/wasm_filter.cc +++ b/source/extensions/filters/http/wasm/wasm_filter.cc @@ -21,8 +21,8 @@ FilterConfig::FilterConfig(const envoy::extensions::filters::http::wasm::v3::Was }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { throw Common::Wasm::WasmException( fmt::format("Unable to create Wasm HTTP filter {}", plugin->name_)); diff --git a/source/extensions/filters/network/client_ssl_auth/config.cc b/source/extensions/filters/network/client_ssl_auth/config.cc index f974b275274fa..dcd2db9f47a43 100644 --- a/source/extensions/filters/network/client_ssl_auth/config.cc +++ b/source/extensions/filters/network/client_ssl_auth/config.cc @@ -19,7 +19,7 @@ Network::FilterFactoryCb ClientSslAuthConfigFactory::createFilterFactoryFromProt ASSERT(!proto_config.stat_prefix().empty()); ClientSslAuthConfigSharedPtr filter_config(ClientSslAuthConfig::create( - proto_config, context.threadLocal(), context.clusterManager(), context.dispatcher(), + proto_config, context.threadLocal(), context.clusterManager(), context.mainThreadDispatcher(), context.scope(), context.api().randomGenerator())); return [filter_config](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared(filter_config)); diff --git a/source/extensions/filters/network/dubbo_proxy/config.cc b/source/extensions/filters/network/dubbo_proxy/config.cc index 8bc19cd46663a..ad379489f362f 100644 --- a/source/extensions/filters/network/dubbo_proxy/config.cc +++ b/source/extensions/filters/network/dubbo_proxy/config.cc @@ -22,8 +22,9 @@ Network::FilterFactoryCb DubboProxyFilterConfigFactory::createFilterFactoryFromP std::shared_ptr filter_config(std::make_shared(proto_config, context)); return [filter_config, &context](Network::FilterManager& filter_manager) -> void { - filter_manager.addReadFilter(std::make_shared( - *filter_config, context.api().randomGenerator(), context.dispatcher().timeSource())); + filter_manager.addReadFilter( + std::make_shared(*filter_config, context.api().randomGenerator(), + context.mainThreadDispatcher().timeSource())); }; } diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 53223b12160c3..42e3da563f829 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -151,8 +151,8 @@ Utility::Singletons Utility::createSingletons(Server::Configuration::FactoryCont std::shared_ptr date_provider = context.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(date_provider), [&context] { - return std::make_shared(context.dispatcher(), - context.threadLocal()); + return std::make_shared( + context.mainThreadDispatcher(), context.threadLocal()); }); Router::RouteConfigProviderManagerSharedPtr route_config_provider_manager = @@ -227,7 +227,7 @@ HttpConnectionManagerFilterConfigFactory::createFilterFactoryFromProtoAndHopByHo auto hcm = std::make_shared( *filter_config, context.drainDecision(), context.api().randomGenerator(), context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(), - context.overloadManager(), context.dispatcher().timeSource()); + context.overloadManager(), context.mainThreadDispatcher().timeSource()); if (!clear_hop_by_hop_headers) { hcm->setClearHopByHopResponseHeaders(false); } @@ -826,7 +826,7 @@ HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( auto conn_manager = std::make_unique( *filter_config, context.drainDecision(), context.api().randomGenerator(), context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(), - context.overloadManager(), context.dispatcher().timeSource()); + context.overloadManager(), context.mainThreadDispatcher().timeSource()); if (!clear_hop_by_hop_headers) { conn_manager->setClearHopByHopResponseHeaders(false); } diff --git a/source/extensions/filters/network/local_ratelimit/config.cc b/source/extensions/filters/network/local_ratelimit/config.cc index e9d2c907bd102..0ae1e20adfb98 100644 --- a/source/extensions/filters/network/local_ratelimit/config.cc +++ b/source/extensions/filters/network/local_ratelimit/config.cc @@ -14,7 +14,7 @@ Network::FilterFactoryCb LocalRateLimitConfigFactory::createFilterFactoryFromPro const envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& proto_config, Server::Configuration::FactoryContext& context) { ConfigSharedPtr filter_config( - new Config(proto_config, context.dispatcher(), context.scope(), context.runtime())); + new Config(proto_config, context.mainThreadDispatcher(), context.scope(), context.runtime())); return [filter_config](Network::FilterManager& filter_manager) -> void { filter_manager.addReadFilter(std::make_shared(filter_config)); }; diff --git a/source/extensions/filters/network/mongo_proxy/config.cc b/source/extensions/filters/network/mongo_proxy/config.cc index 79597c01087ed..4a1fad3d227ea 100644 --- a/source/extensions/filters/network/mongo_proxy/config.cc +++ b/source/extensions/filters/network/mongo_proxy/config.cc @@ -25,7 +25,7 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP AccessLogSharedPtr access_log; if (!proto_config.access_log().empty()) { access_log = std::make_shared(proto_config.access_log(), context.accessLogManager(), - context.dispatcher().timeSource()); + context.mainThreadDispatcher().timeSource()); } Filters::Common::Fault::FaultDelayConfigSharedPtr fault_config; @@ -45,7 +45,8 @@ Network::FilterFactoryCb MongoProxyFilterConfigFactory::createFilterFactoryFromP stats](Network::FilterManager& filter_manager) -> void { filter_manager.addFilter(std::make_shared( stat_prefix, context.scope(), context.runtime(), access_log, fault_config, - context.drainDecision(), context.dispatcher().timeSource(), emit_dynamic_metadata, stats)); + context.drainDecision(), context.mainThreadDispatcher().timeSource(), emit_dynamic_metadata, + stats)); }; } diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index b356418325799..bdef53f8fbcb6 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -38,7 +38,7 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager = Extensions::Common::Redis::getClusterRefreshManager( - context.singletonManager(), context.dispatcher(), context.clusterManager(), + context.singletonManager(), context.mainThreadDispatcher(), context.clusterManager(), context.timeSource()); ProxyFilterConfigSharedPtr filter_config(std::make_shared( diff --git a/source/extensions/filters/network/thrift_proxy/config.cc b/source/extensions/filters/network/thrift_proxy/config.cc index ae272ed1b480f..180503969176f 100644 --- a/source/extensions/filters/network/thrift_proxy/config.cc +++ b/source/extensions/filters/network/thrift_proxy/config.cc @@ -102,8 +102,9 @@ Network::FilterFactoryCb ThriftProxyFilterConfigFactory::createFilterFactoryFrom std::shared_ptr filter_config(new ConfigImpl(proto_config, context)); return [filter_config, &context](Network::FilterManager& filter_manager) -> void { - filter_manager.addReadFilter(std::make_shared( - *filter_config, context.api().randomGenerator(), context.dispatcher().timeSource())); + filter_manager.addReadFilter( + std::make_shared(*filter_config, context.api().randomGenerator(), + context.mainThreadDispatcher().timeSource())); }; } diff --git a/source/extensions/filters/network/thrift_proxy/router/config.cc b/source/extensions/filters/network/thrift_proxy/router/config.cc index 3e651b2886b7f..a6b28cc58c051 100644 --- a/source/extensions/filters/network/thrift_proxy/router/config.cc +++ b/source/extensions/filters/network/thrift_proxy/router/config.cc @@ -20,7 +20,7 @@ ThriftFilters::FilterFactoryCb RouterFilterConfig::createFilterFactoryFromProtoT auto shadow_writer = std::make_shared(context.clusterManager(), stat_prefix, context.scope(), - context.dispatcher(), context.threadLocal()); + context.mainThreadDispatcher(), context.threadLocal()); return [&context, stat_prefix, shadow_writer](ThriftFilters::FilterChainFactoryCallbacks& callbacks) -> void { diff --git a/source/extensions/filters/network/wasm/wasm_filter.cc b/source/extensions/filters/network/wasm/wasm_filter.cc index 2ee4ea033a36a..d5f1a6a3ba382 100644 --- a/source/extensions/filters/network/wasm/wasm_filter.cc +++ b/source/extensions/filters/network/wasm/wasm_filter.cc @@ -21,8 +21,8 @@ FilterConfig::FilterConfig(const envoy::extensions::filters::network::wasm::v3:: }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { throw Common::Wasm::WasmException( fmt::format("Unable to create Wasm network filter {}", plugin->name_)); diff --git a/source/extensions/filters/network/zookeeper_proxy/config.cc b/source/extensions/filters/network/zookeeper_proxy/config.cc index 0a980cf96152d..833eb4563b321 100644 --- a/source/extensions/filters/network/zookeeper_proxy/config.cc +++ b/source/extensions/filters/network/zookeeper_proxy/config.cc @@ -30,7 +30,7 @@ Network::FilterFactoryCb ZooKeeperConfigFactory::createFilterFactoryFromProtoTyp ZooKeeperFilterConfigSharedPtr filter_config( std::make_shared(stat_prefix, max_packet_bytes, context.scope())); - auto& time_source = context.dispatcher().timeSource(); + auto& time_source = context.mainThreadDispatcher().timeSource(); return [filter_config, &time_source](Network::FilterManager& filter_manager) -> void { filter_manager.addFilter(std::make_shared(filter_config, time_source)); diff --git a/source/extensions/health_checkers/redis/config.cc b/source/extensions/health_checkers/redis/config.cc index 880c2b34de770..06f73368ee7eb 100644 --- a/source/extensions/health_checkers/redis/config.cc +++ b/source/extensions/health_checkers/redis/config.cc @@ -18,8 +18,8 @@ Upstream::HealthCheckerSharedPtr RedisHealthCheckerFactory::createCustomHealthCh Server::Configuration::HealthCheckerFactoryContext& context) { return std::make_shared( context.cluster(), config, - getRedisHealthCheckConfig(config, context.messageValidationVisitor()), context.dispatcher(), - context.runtime(), context.eventLogger(), context.api(), + getRedisHealthCheckConfig(config, context.messageValidationVisitor()), + context.mainThreadDispatcher(), context.runtime(), context.eventLogger(), context.api(), NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_); }; diff --git a/source/extensions/resource_monitors/injected_resource/injected_resource_monitor.cc b/source/extensions/resource_monitors/injected_resource/injected_resource_monitor.cc index 6aa0aeb76858d..d6797ac85ec2f 100644 --- a/source/extensions/resource_monitors/injected_resource/injected_resource_monitor.cc +++ b/source/extensions/resource_monitors/injected_resource/injected_resource_monitor.cc @@ -16,7 +16,7 @@ InjectedResourceMonitor::InjectedResourceMonitor( config, Server::Configuration::ResourceMonitorFactoryContext& context) : filename_(config.filename()), file_changed_(true), - watcher_(context.dispatcher().createFilesystemWatcher()), api_(context.api()) { + watcher_(context.mainThreadDispatcher().createFilesystemWatcher()), api_(context.api()) { watcher_->addWatch(filename_, Filesystem::Watcher::Events::MovedTo, [this](uint32_t) { onFileChanged(); }); } diff --git a/source/extensions/stat_sinks/wasm/config.cc b/source/extensions/stat_sinks/wasm/config.cc index 96c9814505683..be72fb3d18610 100644 --- a/source/extensions/stat_sinks/wasm/config.cc +++ b/source/extensions/stat_sinks/wasm/config.cc @@ -36,13 +36,13 @@ WasmSinkFactory::createStatsSink(const Protobuf::Message& proto_config, } return; } - wasm_sink->setSingleton( - Common::Wasm::getOrCreateThreadLocalPlugin(base_wasm, plugin, context.dispatcher())); + wasm_sink->setSingleton(Common::Wasm::getOrCreateThreadLocalPlugin( + base_wasm, plugin, context.mainThreadDispatcher())); }; if (!Common::Wasm::createWasm(plugin, context.scope().createScope(""), context.clusterManager(), - context.initManager(), context.dispatcher(), context.api(), - context.lifecycleNotifier(), remote_data_provider_, + context.initManager(), context.mainThreadDispatcher(), + context.api(), context.lifecycleNotifier(), remote_data_provider_, std::move(callback))) { throw Common::Wasm::WasmException( fmt::format("Unable to create Wasm Stat Sink {}", plugin->name_)); diff --git a/source/extensions/transport_sockets/tap/config.cc b/source/extensions/transport_sockets/tap/config.cc index f389c8633c22d..26255d00151d4 100644 --- a/source/extensions/transport_sockets/tap/config.cc +++ b/source/extensions/transport_sockets/tap/config.cc @@ -44,9 +44,10 @@ Network::TransportSocketFactoryPtr UpstreamTapSocketConfigFactory::createTranspo auto inner_transport_factory = inner_config_factory.createTransportSocketFactory(*inner_factory_config, context); return std::make_unique( - outer_config, std::make_unique(context.dispatcher().timeSource()), - context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher(), - std::move(inner_transport_factory)); + outer_config, + std::make_unique(context.mainThreadDispatcher().timeSource()), + context.admin(), context.singletonManager(), context.threadLocal(), + context.mainThreadDispatcher(), std::move(inner_transport_factory)); } Network::TransportSocketFactoryPtr DownstreamTapSocketConfigFactory::createTransportSocketFactory( @@ -63,9 +64,10 @@ Network::TransportSocketFactoryPtr DownstreamTapSocketConfigFactory::createTrans auto inner_transport_factory = inner_config_factory.createTransportSocketFactory( *inner_factory_config, context, server_names); return std::make_unique( - outer_config, std::make_unique(context.dispatcher().timeSource()), - context.admin(), context.singletonManager(), context.threadLocal(), context.dispatcher(), - std::move(inner_transport_factory)); + outer_config, + std::make_unique(context.mainThreadDispatcher().timeSource()), + context.admin(), context.singletonManager(), context.threadLocal(), + context.mainThreadDispatcher(), std::move(inner_transport_factory)); } ProtobufTypes::MessagePtr TapSocketConfigFactory::createEmptyConfigProto() { diff --git a/source/server/BUILD b/source/server/BUILD index b29bb1a84e333..d78e4c163193b 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -538,6 +538,14 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "factory_context_base_impl_lib", + hdrs = ["factory_context_base_impl.h"], + deps = [ + "//envoy/server:factory_context_interface", + ], +) + envoy_cc_library( name = "server_lib", srcs = ["server.cc"], diff --git a/source/server/api_listener_impl.h b/source/server/api_listener_impl.h index 6d2b41bcbd3fc..088ec965f21fc 100644 --- a/source/server/api_listener_impl.h +++ b/source/server/api_listener_impl.h @@ -112,7 +112,7 @@ class ApiListenerImplBase : public ApiListener, bool isHalfCloseEnabled() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } void close(Network::ConnectionCloseType) override {} Event::Dispatcher& dispatcher() override { - return parent_.parent_.factory_context_.dispatcher(); + return parent_.parent_.factory_context_.mainThreadDispatcher(); } uint64_t id() const override { return 12345; } void hashKey(std::vector&) const override {} diff --git a/source/server/config_validation/cluster_manager.cc b/source/server/config_validation/cluster_manager.cc index 16ab99e868fc0..742fb4bb3b5ed 100644 --- a/source/server/config_validation/cluster_manager.cc +++ b/source/server/config_validation/cluster_manager.cc @@ -11,8 +11,9 @@ namespace Upstream { ClusterManagerPtr ValidationClusterManagerFactory::clusterManagerFromProto( const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { return std::make_unique( - bootstrap, *this, stats_, tls_, runtime_, local_info_, log_manager_, main_thread_dispatcher_, - admin_, validation_context_, api_, http_context_, grpc_context_, router_context_); + bootstrap, *this, stats_, tls_, context_.runtime(), local_info_, log_manager_, + context_.mainThreadDispatcher(), admin_, validation_context_, context_.api(), http_context_, + grpc_context_, router_context_); } CdsApiPtr ValidationClusterManagerFactory::createCds( diff --git a/source/server/factory_context_base_impl.h b/source/server/factory_context_base_impl.h new file mode 100644 index 0000000000000..4b56a110ccf21 --- /dev/null +++ b/source/server/factory_context_base_impl.h @@ -0,0 +1,49 @@ +#pragma once + +#include "envoy/server/factory_context.h" + +namespace Envoy { +namespace Server { + +class FactoryContextBaseImpl : public Configuration::FactoryContextBase { +public: + FactoryContextBaseImpl(const Server::Options& options, Event::Dispatcher& main_thread_dispatcher, + Api::Api& api, const LocalInfo::LocalInfo& local_info, + Server::Admin& admin, Runtime::Loader& runtime, + Singleton::Manager& singleton_manager, + ProtobufMessage::ValidationVisitor& validation_visitor, + Stats::Store& scope, ThreadLocal::Instance& local) + : options_(options), main_thread_dispatcher_(main_thread_dispatcher), api_(api), + local_info_(local_info), admin_(admin), runtime_(runtime), + singleton_manager_(singleton_manager), validation_visitor_(validation_visitor), + scope_(scope), thread_local_(local) {} + + // FactoryContextBase + const Options& options() override { return options_; }; + Event::Dispatcher& mainThreadDispatcher() override { return main_thread_dispatcher_; }; + Api::Api& api() override { return api_; }; + const LocalInfo::LocalInfo& localInfo() const override { return local_info_; }; + Server::Admin& admin() override { return admin_; }; + Envoy::Runtime::Loader& runtime() override { return runtime_; }; + Singleton::Manager& singletonManager() override { return singleton_manager_; }; + ProtobufMessage::ValidationVisitor& messageValidationVisitor() override { + return validation_visitor_; + }; + Stats::Scope& scope() override { return scope_; }; + ThreadLocal::SlotAllocator& threadLocal() override { return thread_local_; }; + +private: + const Server::Options& options_; + Event::Dispatcher& main_thread_dispatcher_; + Api::Api& api_; + const LocalInfo::LocalInfo& local_info_; + Server::Admin& admin_; + Runtime::Loader& runtime_; + Singleton::Manager& singleton_manager_; + ProtobufMessage::ValidationVisitor& validation_visitor_; + Stats::Store& scope_; + ThreadLocal::Instance& thread_local_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index f1a74a70e78cd..f99ed1f1858c0 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -69,8 +69,8 @@ Upstream::ClusterManager& PerFilterChainFactoryContextImpl::clusterManager() { return parent_context_.clusterManager(); } -Event::Dispatcher& PerFilterChainFactoryContextImpl::dispatcher() { - return parent_context_.dispatcher(); +Event::Dispatcher& PerFilterChainFactoryContextImpl::mainThreadDispatcher() { + return parent_context_.mainThreadDispatcher(); } const Server::Options& PerFilterChainFactoryContextImpl::options() { @@ -755,7 +755,7 @@ AccessLog::AccessLogManager& FactoryContextImpl::accessLogManager() { return server_.accessLogManager(); } Upstream::ClusterManager& FactoryContextImpl::clusterManager() { return server_.clusterManager(); } -Event::Dispatcher& FactoryContextImpl::dispatcher() { return server_.dispatcher(); } +Event::Dispatcher& FactoryContextImpl::mainThreadDispatcher() { return server_.dispatcher(); } const Server::Options& FactoryContextImpl::options() { return server_.options(); } Grpc::Context& FactoryContextImpl::grpcContext() { return server_.grpcContext(); } Router::Context& FactoryContextImpl::routerContext() { return server_.routerContext(); } diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 8218e89777127..af3be80f7752f 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -55,7 +55,7 @@ class PerFilterChainFactoryContextImpl : public Configuration::FilterChainFactor // Configuration::FactoryContext AccessLog::AccessLogManager& accessLogManager() override; Upstream::ClusterManager& clusterManager() override; - Event::Dispatcher& dispatcher() override; + Event::Dispatcher& mainThreadDispatcher() override; const Server::Options& options() override; Network::DrainDecision& drainDecision() override; Grpc::Context& grpcContext() override; @@ -141,7 +141,7 @@ class FactoryContextImpl : public Configuration::FactoryContext { // Configuration::FactoryContext AccessLog::AccessLogManager& accessLogManager() override; Upstream::ClusterManager& clusterManager() override; - Event::Dispatcher& dispatcher() override; + Event::Dispatcher& mainThreadDispatcher() override; const Server::Options& options() override; Grpc::Context& grpcContext() override; Router::Context& routerContext() override; diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index c6a666477ce35..df53a49ec6535 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -223,7 +223,9 @@ AccessLog::AccessLogManager& ListenerFactoryContextBaseImpl::accessLogManager() Upstream::ClusterManager& ListenerFactoryContextBaseImpl::clusterManager() { return server_.clusterManager(); } -Event::Dispatcher& ListenerFactoryContextBaseImpl::dispatcher() { return server_.dispatcher(); } +Event::Dispatcher& ListenerFactoryContextBaseImpl::mainThreadDispatcher() { + return server_.dispatcher(); +} const Server::Options& ListenerFactoryContextBaseImpl::options() { return server_.options(); } Grpc::Context& ListenerFactoryContextBaseImpl::grpcContext() { return server_.grpcContext(); } bool ListenerFactoryContextBaseImpl::healthCheckFailed() { return server_.healthCheckFailed(); } @@ -631,8 +633,8 @@ AccessLog::AccessLogManager& PerListenerFactoryContextImpl::accessLogManager() { Upstream::ClusterManager& PerListenerFactoryContextImpl::clusterManager() { return listener_factory_context_base_->clusterManager(); } -Event::Dispatcher& PerListenerFactoryContextImpl::dispatcher() { - return listener_factory_context_base_->dispatcher(); +Event::Dispatcher& PerListenerFactoryContextImpl::mainThreadDispatcher() { + return listener_factory_context_base_->mainThreadDispatcher(); } const Server::Options& PerListenerFactoryContextImpl::options() { return listener_factory_context_base_->options(); diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 0114ff9c9e33e..f621731d7d632 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -104,7 +104,7 @@ class ListenerFactoryContextBaseImpl final : public Configuration::FactoryContex Server::DrainManagerPtr drain_manager); AccessLog::AccessLogManager& accessLogManager() override; Upstream::ClusterManager& clusterManager() override; - Event::Dispatcher& dispatcher() override; + Event::Dispatcher& mainThreadDispatcher() override; const Server::Options& options() override; Network::DrainDecision& drainDecision() override; Grpc::Context& grpcContext() override; @@ -177,7 +177,7 @@ class PerListenerFactoryContextImpl : public Configuration::ListenerFactoryConte // FactoryContext AccessLog::AccessLogManager& accessLogManager() override; Upstream::ClusterManager& clusterManager() override; - Event::Dispatcher& dispatcher() override; + Event::Dispatcher& mainThreadDispatcher() override; const Options& options() override; Network::DrainDecision& drainDecision() override; Grpc::Context& grpcContext() override; diff --git a/source/server/resource_monitor_config_impl.h b/source/server/resource_monitor_config_impl.h index 03ef1c0170941..5bfc8d521e69f 100644 --- a/source/server/resource_monitor_config_impl.h +++ b/source/server/resource_monitor_config_impl.h @@ -14,7 +14,7 @@ class ResourceMonitorFactoryContextImpl : public ResourceMonitorFactoryContext { : dispatcher_(dispatcher), options_(options), api_(api), validation_visitor_(validation_visitor) {} - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } const Server::Options& options() override { return options_; } diff --git a/source/server/server.h b/source/server/server.h index 9234f6c2ae7b7..d855b5ae46ecd 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -174,7 +174,7 @@ class ServerFactoryContextImpl : public Configuration::ServerFactoryContext, // Configuration::ServerFactoryContext Upstream::ClusterManager& clusterManager() override { return server_.clusterManager(); } - Event::Dispatcher& dispatcher() override { return server_.dispatcher(); } + Event::Dispatcher& mainThreadDispatcher() override { return server_.dispatcher(); } const Server::Options& options() override { return server_.options(); } const LocalInfo::LocalInfo& localInfo() const override { return server_.localInfo(); } ProtobufMessage::ValidationContext& messageValidationContext() override { diff --git a/source/server/transport_socket_config_impl.h b/source/server/transport_socket_config_impl.h index 600c2e9386826..7a94bd110cecf 100644 --- a/source/server/transport_socket_config_impl.h +++ b/source/server/transport_socket_config_impl.h @@ -40,7 +40,7 @@ class TransportSocketFactoryContextImpl : public TransportSocketFactoryContext { } Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } const LocalInfo::LocalInfo& localInfo() const override { return local_info_; } - Event::Dispatcher& dispatcher() override { return dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return dispatcher_; } Stats::Store& stats() override { return stats_; } Init::Manager& initManager() override { ASSERT(init_manager_ != nullptr); diff --git a/test/common/secret/secret_manager_impl_test.cc b/test/common/secret/secret_manager_impl_test.cc index 15fc8fa59421b..a73f143ba8613 100644 --- a/test/common/secret/secret_manager_impl_test.cc +++ b/test/common/secret/secret_manager_impl_test.cc @@ -279,7 +279,7 @@ TEST_F(SecretManagerImplTest, DeduplicateDynamicTlsCertificateSecretProvider) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); envoy::config::core::v3::ConfigSource config_source; @@ -362,7 +362,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretUpdateSuccess) { })); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); @@ -408,7 +408,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicGenericSecret) { Init::TargetHandlePtr init_target_handle; NiceMock init_watcher; - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, messageValidationVisitor()).WillOnce(ReturnRef(validation_visitor)); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); @@ -461,7 +461,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandler) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); auto secret_provider = @@ -731,7 +731,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerWarmingSecrets) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); auto secret_provider = @@ -879,7 +879,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSecrets) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); const std::string tls_certificate = @@ -956,7 +956,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticValidationContext) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); const std::string validation_context = @@ -1004,7 +1004,7 @@ TEST_F(SecretManagerImplTest, ConfigDumpHandlerStaticSessionTicketsContext) { })); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); const std::string stek_context = @@ -1085,7 +1085,7 @@ TEST_F(SecretManagerImplTest, SdsDynamicSecretPrivateKeyProviderUpdateSuccess) { })); EXPECT_CALL(secret_context, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(*dispatcher_)); EXPECT_CALL(secret_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api_)); diff --git a/test/extensions/filters/http/jwt_authn/filter_config_test.cc b/test/extensions/filters/http/jwt_authn/filter_config_test.cc index c651c932e6db6..a3c8343fc686c 100644 --- a/test/extensions/filters/http/jwt_authn/filter_config_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_config_test.cc @@ -175,7 +175,8 @@ TEST(HttpJwtAuthnFilterConfigTest, VerifyTLSLifetime) { // The threadLocal, dispatcher and api that are used by the filter config, actually belong to // the server factory context that who's lifetime is longer. We simulate that by returning // their instances from outside the scope. - ON_CALL(context, dispatcher()).WillByDefault(ReturnRef(server_context.dispatcher())); + ON_CALL(context, mainThreadDispatcher()) + .WillByDefault(ReturnRef(server_context.mainThreadDispatcher())); ON_CALL(context, api()).WillByDefault(ReturnRef(server_context.api())); ON_CALL(context, threadLocal()).WillByDefault(ReturnRef(server_context.threadLocal())); diff --git a/test/extensions/filters/http/oauth2/filter_test.cc b/test/extensions/filters/http/oauth2/filter_test.cc index d232adb224d24..dc4f4402cd79d 100644 --- a/test/extensions/filters/http/oauth2/filter_test.cc +++ b/test/extensions/filters/http/oauth2/filter_test.cc @@ -170,7 +170,7 @@ TEST_F(OAuth2Test, SdsDynamicGenericSecret) { NiceMock dispatcher; EXPECT_CALL(secret_context, localInfo()).WillRepeatedly(ReturnRef(local_info)); EXPECT_CALL(secret_context, api()).WillRepeatedly(ReturnRef(*api)); - EXPECT_CALL(secret_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(secret_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(secret_context, stats()).WillRepeatedly(ReturnRef(stats)); EXPECT_CALL(secret_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); EXPECT_CALL(init_manager, add(_)) diff --git a/test/extensions/filters/http/wasm/config_test.cc b/test/extensions/filters/http/wasm/config_test.cc index 52d3a737c98ff..8e3305fc51033 100644 --- a/test/extensions/filters/http/wasm/config_test.cc +++ b/test/extensions/filters/http/wasm/config_test.cc @@ -38,7 +38,7 @@ class WasmFilterConfigTest : public Event::TestUsingSimulatedTime, ON_CALL(context_, listenerMetadata()).WillByDefault(ReturnRef(listener_metadata_)); EXPECT_CALL(context_, initManager()).WillRepeatedly(ReturnRef(init_manager_)); ON_CALL(context_, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(context_, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(context_, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); } void SetUp() override { Envoy::Extensions::Common::Wasm::clearCodeCacheForTesting(); } diff --git a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc index ca021727c2d17..cb906f81c2680 100644 --- a/test/extensions/filters/network/common/fuzz/uber_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_readfilter.cc @@ -127,7 +127,7 @@ void UberFilterFuzzer::fuzz( case test::extensions::filters::network::Action::kAdvanceTime: { time_source_.advanceTimeAndRun( std::chrono::milliseconds(action.advance_time().milliseconds()), - factory_context_.dispatcher(), Event::Dispatcher::RunType::NonBlock); + factory_context_.mainThreadDispatcher(), Event::Dispatcher::RunType::NonBlock); break; } default: { diff --git a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc index 6772fe995e22c..941d5b121acc9 100644 --- a/test/extensions/filters/network/common/fuzz/uber_writefilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_writefilter.cc @@ -103,7 +103,7 @@ void UberWriteFilterFuzzer::fuzz( case test::extensions::filters::network::WriteAction::kAdvanceTime: { time_source_.advanceTimeAndRun( std::chrono::milliseconds(action.advance_time().milliseconds()), - factory_context_.dispatcher(), Event::Dispatcher::RunType::NonBlock); + factory_context_.mainThreadDispatcher(), Event::Dispatcher::RunType::NonBlock); break; } default: { diff --git a/test/extensions/filters/network/common/fuzz/utils/fakes.h b/test/extensions/filters/network/common/fuzz/utils/fakes.h index e9edf5bb5bba0..44193a979a9e0 100644 --- a/test/extensions/filters/network/common/fuzz/utils/fakes.h +++ b/test/extensions/filters/network/common/fuzz/utils/fakes.h @@ -13,7 +13,7 @@ class FakeFactoryContext : public MockFactoryContext { } AccessLog::AccessLogManager& accessLogManager() override { return access_log_manager_; } Upstream::ClusterManager& clusterManager() override { return cluster_manager_; } - Event::Dispatcher& dispatcher() override { return *dispatcher_; } + Event::Dispatcher& mainThreadDispatcher() override { return *dispatcher_; } const Network::DrainDecision& drainDecision() override { return drain_manager_; } Init::Manager& initManager() override { return init_manager_; } ServerLifecycleNotifier& lifecycleNotifier() override { return lifecycle_notifier_; } diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index 56c60e93bb3f9..8cdbe6162927f 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -120,7 +120,7 @@ class ConnectionManagerTest : public testing::Test { filter_callbacks_.connection_.dispatcher_.clearDeferredDeleteList(); } - TimeSource& timeSystem() { return factory_context_.dispatcher().timeSource(); } + TimeSource& timeSystem() { return factory_context_.mainThreadDispatcher().timeSource(); } void initializeFilter() { initializeFilter(""); } diff --git a/test/extensions/filters/network/http_connection_manager/config_test_base.h b/test/extensions/filters/network/http_connection_manager/config_test_base.h index 995cb9842e146..9241da2bff892 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test_base.h +++ b/test/extensions/filters/network/http_connection_manager/config_test_base.h @@ -34,7 +34,7 @@ parseHttpConnectionManagerFromYaml(const std::string& yaml) { class HttpConnectionManagerConfigTest : public testing::Test { public: NiceMock context_; - Http::SlowDateProviderImpl date_provider_{context_.dispatcher().timeSource()}; + Http::SlowDateProviderImpl date_provider_{context_.mainThreadDispatcher().timeSource()}; NiceMock route_config_provider_manager_; NiceMock scoped_routes_config_provider_manager_; NiceMock http_tracer_manager_; diff --git a/test/extensions/filters/network/wasm/config_test.cc b/test/extensions/filters/network/wasm/config_test.cc index e77a38efad53e..9673c4d598a39 100644 --- a/test/extensions/filters/network/wasm/config_test.cc +++ b/test/extensions/filters/network/wasm/config_test.cc @@ -30,7 +30,7 @@ class WasmNetworkFilterConfigTest : public testing::TestWithParam { ON_CALL(context_, listenerMetadata()).WillByDefault(ReturnRef(listener_metadata_)); ON_CALL(context_, initManager()).WillByDefault(ReturnRef(init_manager_)); ON_CALL(context_, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(context_, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(context_, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); } void SetUp() override { Envoy::Extensions::Common::Wasm::clearCodeCacheForTesting(); } diff --git a/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_test.cc b/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_test.cc index 400f97a4d11b2..5d05210ad4f11 100644 --- a/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_test.cc +++ b/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_test.cc @@ -26,7 +26,7 @@ class TestableInjectedResourceMonitor : public InjectedResourceMonitor { const envoy::extensions::resource_monitors::injected_resource::v3::InjectedResourceConfig& config, Server::Configuration::ResourceMonitorFactoryContext& context) - : InjectedResourceMonitor(config, context), dispatcher_(context.dispatcher()) {} + : InjectedResourceMonitor(config, context), dispatcher_(context.mainThreadDispatcher()) {} protected: void onFileChanged() override { diff --git a/test/extensions/transport_sockets/tls/context_impl_test.cc b/test/extensions/transport_sockets/tls/context_impl_test.cc index 5e2de45ee3819..0967e77d25b05 100644 --- a/test/extensions/transport_sockets/tls/context_impl_test.cc +++ b/test/extensions/transport_sockets/tls/context_impl_test.cc @@ -813,7 +813,7 @@ TEST_F(SslServerContextImplTicketTest, TicketKeySdsNotReady) { NiceMock cluster_manager; NiceMock init_manager; EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); // EXPECT_CALL(factory_context_, random()).WillOnce(ReturnRef(random)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, clusterManager()).WillOnce(ReturnRef(cluster_manager)); @@ -1219,7 +1219,7 @@ TEST_F(ClientContextConfigImplTest, SecretNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); sds_secret_configs->set_name("abc.com"); @@ -1251,7 +1251,7 @@ TEST_F(ClientContextConfigImplTest, ValidationContextNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); sds_secret_configs->set_name("abc.com"); @@ -1557,7 +1557,7 @@ TEST_F(ServerContextConfigImplTest, SecretNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); sds_secret_configs->set_name("abc.com"); @@ -1589,7 +1589,7 @@ TEST_F(ServerContextConfigImplTest, ValidationContextNotReady) { EXPECT_CALL(factory_context_, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context_, stats()).WillOnce(ReturnRef(stats)); EXPECT_CALL(factory_context_, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context_, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context_, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); auto sds_secret_configs = tls_context.mutable_common_tls_context()->mutable_validation_context_sds_secret_config(); sds_secret_configs->set_name("abc.com"); diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index db8cd6b6cec52..24b2e1422c7ee 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -4720,7 +4720,7 @@ TEST_P(SslSocketTest, DownstreamNotReadySslSocket) { testing::NiceMock factory_context; NiceMock init_manager; NiceMock dispatcher; - EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); @@ -4760,7 +4760,7 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); - EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + EXPECT_CALL(factory_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher)); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; auto sds_secret_configs = diff --git a/test/integration/clusters/custom_static_cluster.h b/test/integration/clusters/custom_static_cluster.h index 5570338047634..6daeb9220eda4 100644 --- a/test/integration/clusters/custom_static_cluster.h +++ b/test/integration/clusters/custom_static_cluster.h @@ -29,7 +29,7 @@ class CustomStaticCluster : public Upstream::ClusterImplBase { Stats::ScopePtr&& stats_scope, bool added_via_api, uint32_t priority, std::string address, uint32_t port) : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, - factory_context.dispatcher().timeSource()), + factory_context.mainThreadDispatcher().timeSource()), priority_(priority), address_(std::move(address)), port_(port), host_(makeHost()) {} InitializePhase initializePhase() const override { return InitializePhase::Primary; } diff --git a/test/integration/fake_resource_monitor.cc b/test/integration/fake_resource_monitor.cc index 9dc8e4b0060ab..85a11084f6e14 100644 --- a/test/integration/fake_resource_monitor.cc +++ b/test/integration/fake_resource_monitor.cc @@ -16,7 +16,7 @@ void FakeResourceMonitorFactory::onMonitorDestroyed(FakeResourceMonitor* monitor } Server::ResourceMonitorPtr FakeResourceMonitorFactory::createResourceMonitor( const Protobuf::Message&, Server::Configuration::ResourceMonitorFactoryContext& context) { - auto monitor = std::make_unique(context.dispatcher(), *this); + auto monitor = std::make_unique(context.mainThreadDispatcher(), *this); monitor_ = monitor.get(); return monitor; } diff --git a/test/mocks/server/factory_context.cc b/test/mocks/server/factory_context.cc index c8a3e082414cf..5e59724def957 100644 --- a/test/mocks/server/factory_context.cc +++ b/test/mocks/server/factory_context.cc @@ -20,7 +20,7 @@ MockFactoryContext::MockFactoryContext() ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); ON_CALL(*this, getTransportSocketFactoryContext()) .WillByDefault(ReturnRef(transport_socket_factory_context_)); diff --git a/test/mocks/server/factory_context.h b/test/mocks/server/factory_context.h index ec7e6f8659ef0..0de704bbee3c7 100644 --- a/test/mocks/server/factory_context.h +++ b/test/mocks/server/factory_context.h @@ -25,7 +25,7 @@ class MockFactoryContext : public virtual FactoryContext { MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const)); MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(const Server::Options&, options, ()); MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); MOCK_METHOD(bool, healthCheckFailed, ()); diff --git a/test/mocks/server/health_checker_factory_context.cc b/test/mocks/server/health_checker_factory_context.cc index f6a17d962e2ac..93d5611cdd0f7 100644 --- a/test/mocks/server/health_checker_factory_context.cc +++ b/test/mocks/server/health_checker_factory_context.cc @@ -14,7 +14,7 @@ using ::testing::ReturnRef; MockHealthCheckerFactoryContext::MockHealthCheckerFactoryContext() { event_logger_ = new testing::NiceMock(); ON_CALL(*this, cluster()).WillByDefault(ReturnRef(cluster_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, random()).WillByDefault(ReturnRef(random_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_)); ON_CALL(*this, eventLogger_()).WillByDefault(Return(event_logger_)); diff --git a/test/mocks/server/health_checker_factory_context.h b/test/mocks/server/health_checker_factory_context.h index 35b94285948fa..15ac903feff66 100644 --- a/test/mocks/server/health_checker_factory_context.h +++ b/test/mocks/server/health_checker_factory_context.h @@ -23,7 +23,7 @@ class MockHealthCheckerFactoryContext : public virtual HealthCheckerFactoryConte ~MockHealthCheckerFactoryContext() override; MOCK_METHOD(Upstream::Cluster&, cluster, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Envoy::Runtime::Loader&, runtime, ()); MOCK_METHOD(Upstream::HealthCheckEventLogger*, eventLogger_, ()); diff --git a/test/mocks/server/instance.cc b/test/mocks/server/instance.cc index f19f81dc4afcd..3ea0f9ea53ad4 100644 --- a/test/mocks/server/instance.cc +++ b/test/mocks/server/instance.cc @@ -61,7 +61,7 @@ MockServerFactoryContext::MockServerFactoryContext() : singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), grpc_context_(scope_.symbolTable()), router_context_(scope_.symbolTable()) { ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); ON_CALL(*this, localInfo()).WillByDefault(ReturnRef(local_info_)); ON_CALL(*this, runtime()).WillByDefault(ReturnRef(runtime_loader_)); diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h index 455e82eebfec5..51858c0ba7f17 100644 --- a/test/mocks/server/instance.h +++ b/test/mocks/server/instance.h @@ -149,7 +149,7 @@ class MockServerFactoryContext : public virtual ServerFactoryContext { ~MockServerFactoryContext() override; MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(const Server::Options&, options, ()); MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); diff --git a/test/mocks/server/listener_factory_context.cc b/test/mocks/server/listener_factory_context.cc index bf2cc8992247c..fd8c0e047d130 100644 --- a/test/mocks/server/listener_factory_context.cc +++ b/test/mocks/server/listener_factory_context.cc @@ -20,7 +20,7 @@ MockListenerFactoryContext::MockListenerFactoryContext() ON_CALL(*this, getServerFactoryContext()).WillByDefault(ReturnRef(server_factory_context_)); ON_CALL(*this, accessLogManager()).WillByDefault(ReturnRef(access_log_manager_)); ON_CALL(*this, clusterManager()).WillByDefault(ReturnRef(cluster_manager_)); - ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); + ON_CALL(*this, mainThreadDispatcher()).WillByDefault(ReturnRef(dispatcher_)); ON_CALL(*this, drainDecision()).WillByDefault(ReturnRef(drain_manager_)); ON_CALL(*this, initManager()).WillByDefault(ReturnRef(init_manager_)); ON_CALL(*this, lifecycleNotifier()).WillByDefault(ReturnRef(lifecycle_notifier_)); diff --git a/test/mocks/server/listener_factory_context.h b/test/mocks/server/listener_factory_context.h index 095aad5931dcb..c78138e477def 100644 --- a/test/mocks/server/listener_factory_context.h +++ b/test/mocks/server/listener_factory_context.h @@ -26,7 +26,7 @@ class MockListenerFactoryContext : public ListenerFactoryContext { MOCK_METHOD(TransportSocketFactoryContext&, getTransportSocketFactoryContext, (), (const)); MOCK_METHOD(AccessLog::AccessLogManager&, accessLogManager, ()); MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(const Server::Options&, options, ()); MOCK_METHOD(const Network::DrainDecision&, drainDecision, ()); MOCK_METHOD(bool, healthCheckFailed, ()); diff --git a/test/mocks/server/transport_socket_factory_context.h b/test/mocks/server/transport_socket_factory_context.h index ee98720c8d428..fe31909626259 100644 --- a/test/mocks/server/transport_socket_factory_context.h +++ b/test/mocks/server/transport_socket_factory_context.h @@ -27,7 +27,7 @@ class MockTransportSocketFactoryContext : public TransportSocketFactoryContext { MOCK_METHOD(Stats::Scope&, scope, ()); MOCK_METHOD(Upstream::ClusterManager&, clusterManager, ()); MOCK_METHOD(const LocalInfo::LocalInfo&, localInfo, (), (const)); - MOCK_METHOD(Event::Dispatcher&, dispatcher, ()); + MOCK_METHOD(Event::Dispatcher&, mainThreadDispatcher, ()); MOCK_METHOD(const Server::Options&, options, ()); MOCK_METHOD(Envoy::Random::RandomGenerator&, random, ()); MOCK_METHOD(Stats::Store&, stats, ()); diff --git a/test/server/overload_manager_impl_test.cc b/test/server/overload_manager_impl_test.cc index 60b2214487c8e..0886e0181d5ff 100644 --- a/test/server/overload_manager_impl_test.cc +++ b/test/server/overload_manager_impl_test.cc @@ -99,7 +99,7 @@ class FakeResourceMonitorFactory : public Server::Configuration::ResourceMonitor Server::ResourceMonitorPtr createResourceMonitor(const Protobuf::Message&, Server::Configuration::ResourceMonitorFactoryContext& context) override { - auto monitor = std::make_unique(context.dispatcher()); + auto monitor = std::make_unique(context.mainThreadDispatcher()); monitor_ = monitor.get(); return monitor; } diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index eb5f1853e3736..3dedd6238e57a 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -243,9 +243,9 @@ bool RouterCheckTool::compareEntries(const std::string& expected_routes) { headers_finalized_ = false; auto connection_info_provider = std::make_shared( nullptr, Network::Utility::getCanonicalIpv4LoopbackAddress()); - Envoy::StreamInfo::StreamInfoImpl stream_info(Envoy::Http::Protocol::Http11, - factory_context_->dispatcher().timeSource(), - connection_info_provider); + Envoy::StreamInfo::StreamInfoImpl stream_info( + Envoy::Http::Protocol::Http11, factory_context_->mainThreadDispatcher().timeSource(), + connection_info_provider); ToolConfig tool_config = ToolConfig::create(check_config); tool_config.route_ = config_->route(*tool_config.request_headers_, stream_info, tool_config.random_value_); From 73643d9b270f0b7c373b09dbe015ed44987b8019 Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 17 Sep 2021 07:54:22 +0100 Subject: [PATCH 060/121] deps: Bump curl -> 7.79.0 (#18071) Signed-off-by: Ryan Northey Signed-off-by: gayang --- bazel/repository_locations.bzl | 6 +++--- test/dependencies/curl_test.cc | 8 -------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index f088dfd3d05f4..d76537a00b84b 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -790,8 +790,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "curl", project_desc = "Library for transferring data with URLs", project_url = "https://curl.haxx.se", - version = "7.77.0", - sha256 = "b0a3428acb60fa59044c4d0baae4e4fc09ae9af1d8a3aa84b2e3fbcd99841f77", + version = "7.79.0", + sha256 = "aff0c7c4a526d7ecc429d2f96263a85fa73e709877054d593d8af3d136858074", strip_prefix = "curl-{version}", urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"], use_category = ["dataplane_ext", "observability_ext"], @@ -801,7 +801,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.grpc_credentials.aws_iam", "envoy.tracers.opencensus", ], - release_date = "2021-05-26", + release_date = "2021-09-15", cpe = "cpe:2.3:a:haxx:libcurl:*", ), com_googlesource_chromium_v8 = dict( diff --git a/test/dependencies/curl_test.cc b/test/dependencies/curl_test.cc index e046db65a95d4..6218a3dea66d7 100644 --- a/test/dependencies/curl_test.cc +++ b/test/dependencies/curl_test.cc @@ -27,15 +27,7 @@ TEST(CurlTest, BuiltWithExpectedFeatures) { EXPECT_NE(0, info->features & CURL_VERSION_HTTP2); EXPECT_EQ(0, info->features & CURL_VERSION_GSSAPI); EXPECT_EQ(0, info->features & CURL_VERSION_KERBEROS5); -#ifndef WIN32 EXPECT_NE(0, info->features & CURL_VERSION_UNIX_SOCKETS); -#else - // TODO(wrowe): correct to expected, when curl 7.72 and later is patched - // or fixed upstream to include `afunix.h` in place of `sys/un.h` on recent - // Windows SDKs (it may be necessary to be more specific because older - // SDKs did not provide `afunix.h`) - EXPECT_EQ(0, info->features & CURL_VERSION_UNIX_SOCKETS); -#endif EXPECT_EQ(0, info->features & CURL_VERSION_PSL); EXPECT_EQ(0, info->features & CURL_VERSION_HTTPS_PROXY); EXPECT_EQ(0, info->features & CURL_VERSION_MULTI_SSL); From dcce89fd3369e1f4b243e11440d5036c7788ae33 Mon Sep 17 00:00:00 2001 From: Shubham Patil Date: Fri, 17 Sep 2021 13:16:13 +0530 Subject: [PATCH 061/121] jwt_authn: make from_cookies JWT removal behaviour similar to from_params (#17985) Removal of params or cookies after authentication is not implemented as of today. authenticator.cc calls the removeJwt(...) if forward is set to false (default) and this leads to an assertion failures caused by NOT_IMPLEMENTED_GCOVR_EXCL_LINE. Changed removeJwt(...) for JwtCookieLocation to be empty, added test coverage and updated proto doc to call-out this caveat. Signed-off-by: Shubham Patil Signed-off-by: gayang --- api/envoy/extensions/filters/http/jwt_authn/v3/config.proto | 1 + source/extensions/filters/http/jwt_authn/extractor.cc | 1 - test/extensions/filters/http/jwt_authn/extractor_test.cc | 3 +++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 9718dbe0550ab..5bb6960e1c1b1 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -137,6 +137,7 @@ message JwtProvider { // If false, the JWT is removed in the request after a success verification. If true, the JWT is // not removed in the request. Default value is false. + // caveat: only works for from_header & has no effect for JWTs extracted through from_params & from_cookies. bool forward = 5; // Two fields below define where to extract the JWT from an HTTP request. diff --git a/source/extensions/filters/http/jwt_authn/extractor.cc b/source/extensions/filters/http/jwt_authn/extractor.cc index bfa03f0ac4f05..6ae48e09c6e07 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.cc +++ b/source/extensions/filters/http/jwt_authn/extractor.cc @@ -119,7 +119,6 @@ class JwtCookieLocation : public JwtLocationBase { void removeJwt(Http::HeaderMap&) const override { // TODO(theshubhamp): remove JWT from cookies. - NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } }; diff --git a/test/extensions/filters/http/jwt_authn/extractor_test.cc b/test/extensions/filters/http/jwt_authn/extractor_test.cc index 2adaf35e7e463..0289e909c4958 100644 --- a/test/extensions/filters/http/jwt_authn/extractor_test.cc +++ b/test/extensions/filters/http/jwt_authn/extractor_test.cc @@ -286,16 +286,19 @@ TEST_F(ExtractorTest, TestCookieToken) { EXPECT_EQ(tokens[0]->token(), "token-cookie-value"); EXPECT_TRUE(tokens[0]->isIssuerAllowed("issuer9")); EXPECT_FALSE(tokens[0]->isIssuerAllowed("issuer10")); + tokens[0]->removeJwt(headers); // only issuer9 has specified "token-cookie-2" cookie location. EXPECT_EQ(tokens[1]->token(), "token-cookie-value-2"); EXPECT_TRUE(tokens[1]->isIssuerAllowed("issuer9")); EXPECT_FALSE(tokens[1]->isIssuerAllowed("issuer10")); + tokens[1]->removeJwt(headers); // only issuer10 has specified "token-cookie-3" cookie location. EXPECT_EQ(tokens[2]->token(), "token-cookie-value-3"); EXPECT_TRUE(tokens[2]->isIssuerAllowed("issuer10")); EXPECT_FALSE(tokens[2]->isIssuerAllowed("issuer9")); + tokens[2]->removeJwt(headers); } // Test extracting multiple tokens. From 1e65d4fbfadfc7f617449ea7f53b7d35cf239e79 Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 17 Sep 2021 13:28:51 +0100 Subject: [PATCH 062/121] tooling: Validation tool cleanup (#18158) Commit Message: tooling: Validation tool cleanup Additional Description: This is a minor/cleanup PR to fix the docstrings in the dependency validation tool I have separated these changes here as i have another PR to work on this file, and the cleanups were making that PR hard to review. Signed-off-by: Ryan Northey Signed-off-by: gayang --- tools/dependency/validate.py | 89 ++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 45 deletions(-) diff --git a/tools/dependency/validate.py b/tools/dependency/validate.py index 8cf6e8f816d84..f35c0b97f0b17 100755 --- a/tools/dependency/validate.py +++ b/tools/dependency/validate.py @@ -76,12 +76,12 @@ class DependencyInfo(object): def deps_by_use_category(self, use_category): """Find the set of external dependencies in a given use_category. - Args: - use_category: string providing use_category. + Args: + use_category: string providing use_category. - Returns: - Set of dependency identifiers that match use_category. - """ + Returns: + Set of dependency identifiers that match use_category. + """ return set( name for name, metadata in REPOSITORY_LOCATIONS_SPEC.items() if use_category in metadata['use_category']) @@ -89,13 +89,13 @@ def deps_by_use_category(self, use_category): def get_metadata(self, dependency): """Obtain repository metadata for a dependency. - Args: - dependency: string providing dependency identifier. + Args: + dependency: string providing dependency identifier. - Returns: - A dictionary with the repository metadata as defined in - bazel/repository_locations.bzl. - """ + Returns: + A dictionary with the repository metadata as defined in + bazel/repository_locations.bzl. + """ return REPOSITORY_LOCATIONS_SPEC.get(dependency) @@ -116,12 +116,12 @@ def __init__( def query_external_deps(self, *targets): """Query the build graph for transitive external dependencies. - Args: - targets: Bazel targets. + Args: + targets: Bazel targets. - Returns: - A set of dependency identifiers that are reachable from targets. - """ + Returns: + A set of dependency identifiers that are reachable from targets. + """ deps_query = 'deps(set({}))'.format(' '.join(targets)) try: deps = subprocess.check_output(['bazel', 'query', deps_query], @@ -149,9 +149,9 @@ def query_external_deps(self, *targets): def list_extensions(self): """List all extensions. - Returns: - Dictionary items from source/extensions/extensions_build_config.bzl. - """ + Returns: + Dictionary items from source/extensions/extensions_build_config.bzl. + """ return extensions_build_config.EXTENSIONS.items() @@ -167,9 +167,9 @@ def __init__(self, dep_info, build_graph): def validate_build_graph_structure(self): """Validate basic assumptions about dependency relationship in the build graph. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print('Validating build dependency structure...') queried_core_ext_deps = self._build_graph.query_external_deps( '//source/exe:envoy_main_common_with_core_extensions_lib', '//source/extensions/...') @@ -183,9 +183,9 @@ def validate_build_graph_structure(self): def validate_test_only_deps(self): """Validate that test-only dependencies aren't included in //source/... - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print('Validating test-only dependencies...') # Validate that //source doesn't depend on test_only queried_source_deps = self._build_graph.query_external_deps('//source/...') @@ -208,12 +208,12 @@ def validate_test_only_deps(self): def validate_data_plane_core_deps(self): """Validate dataplane_core dependencies. - Check that we at least tag as dataplane_core dependencies that match some - well-known targets for the data-plane. + Check that we at least tag as dataplane_core dependencies that match some + well-known targets for the data-plane. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print('Validating data-plane dependencies...') # Necessary but not sufficient for dataplane. With some refactoring we could # probably have more precise tagging of dataplane/controlplane/other deps in @@ -238,13 +238,12 @@ def validate_data_plane_core_deps(self): def validate_control_plane_deps(self): """Validate controlplane dependencies. - Check that we at least tag as controlplane dependencies that match some - well-known targets for - the control-plane. + Check that we at least tag as controlplane dependencies that match some + well-known targets for the control-plane. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print('Validating control-plane dependencies...') # Necessary but not sufficient for controlplane. With some refactoring we could # probably have more precise tagging of dataplane/controlplane/other deps in @@ -265,13 +264,13 @@ def validate_control_plane_deps(self): def validate_extension_deps(self, name, target): """Validate that extensions are correctly declared for dataplane_ext and observability_ext. - Args: - name: extension name. - target: extension Bazel target. + Args: + name: extension name. + target: extension Bazel target. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ print(f'Validating extension {name} dependencies...') queried_deps = self._build_graph.query_external_deps(target) marginal_deps = queried_deps.difference(self._queried_core_deps) @@ -297,9 +296,9 @@ def validate_extension_deps(self, name, target): def validate_all(self): """Collection of all validations. - Raises: - DependencyError: on a dependency validation error. - """ + Raises: + DependencyError: on a dependency validation error. + """ self.validate_build_graph_structure() self.validate_test_only_deps() self.validate_data_plane_core_deps() From 1c60c16c2f923d2bef1ed6db1334f6480391d37b Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Fri, 17 Sep 2021 05:30:16 -0700 Subject: [PATCH 063/121] doc: fix curl command in http connect (#18156) Commit Message: curl somehow accepts `--x` while specifying proxy. It attempts to use proxy once and then not using proxy. curl version: 7.74.0 (x86_64-pc-linux-gnu) libcurl/7.74.0 Additional Description: tl;dr 2 request are initiated * Rebuilt URL to: 127.0.0.1:10000/ * Rebuilt URL to: http://hey/ ``` $ curl -vvv --x 127.0.0.1:10000 http://hey * Rebuilt URL to: 127.0.0.1:10000/ * Trying 127.0.0.1... * TCP_NODELAY set * Connected to 127.0.0.1 (127.0.0.1) port 10000 (#0) > GET / HTTP/1.1 > Host: 127.0.0.1:10000 > User-Agent: curl/7.58.0 > Accept: */* > < HTTP/1.1 200 OK < Content-type: text/plan < Date: Fri, 17 Sep 2021 04:54:08 GMT < Connection: keep-alive < Transfer-Encoding: chunked < Hello Node JS Server Response * Connection #0 to host 127.0.0.1 left intact * Rebuilt URL to: http://hey/ * Could not resolve host: hey * Closing connection 1 curl: (6) Could not resolve host: hey ``` Risk Level: NONE Testing: Docs Changes: Release Notes: Platform Specific Features: Signed-off-by: Yuchen Dai Signed-off-by: gayang --- configs/encapsulate_in_http1_connect.yaml | 2 +- configs/encapsulate_in_http2_connect.yaml | 2 +- configs/encapsulate_in_http2_post.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/configs/encapsulate_in_http1_connect.yaml b/configs/encapsulate_in_http1_connect.yaml index a11a997880327..f8f9a6bc4a668 100644 --- a/configs/encapsulate_in_http1_connect.yaml +++ b/configs/encapsulate_in_http1_connect.yaml @@ -1,7 +1,7 @@ # This configuration takes incoming data on port 10000 and encapsulates it in a CONNECT # request which is sent upstream port 10001. # It can be used to test TCP tunneling as described in docs/root/intro/arch_overview/http/upgrades.rst -# and running `curl --x 127.0.0.1:10000 https://www.google.com` +# and running `curl -x 127.0.0.1:10000 https://www.google.com` admin: address: diff --git a/configs/encapsulate_in_http2_connect.yaml b/configs/encapsulate_in_http2_connect.yaml index abe84ecc86e29..1f985457ab2dd 100644 --- a/configs/encapsulate_in_http2_connect.yaml +++ b/configs/encapsulate_in_http2_connect.yaml @@ -1,7 +1,7 @@ # This configuration takes incoming data on port 10000 and encapsulates it in a CONNECT # request which is sent upstream port 10001. # It can be used to test TCP tunneling as described in docs/root/intro/arch_overview/http/upgrades.rst -# and running `curl --x 127.0.0.1:10000 https://www.google.com` +# and running `curl -x 127.0.0.1:10000 https://www.google.com` admin: address: diff --git a/configs/encapsulate_in_http2_post.yaml b/configs/encapsulate_in_http2_post.yaml index 61353a97a886e..d3979c393ad7f 100644 --- a/configs/encapsulate_in_http2_post.yaml +++ b/configs/encapsulate_in_http2_post.yaml @@ -1,7 +1,7 @@ # This configuration takes incoming data on port 10000 and encapsulates it in a POST # request which is sent upstream port 10001. # It can be used to test TCP tunneling as described in docs/root/intro/arch_overview/http/upgrades.rst -# and running `curl --x 127.0.0.1:10000 https://www.google.com` +# and running `curl -x 127.0.0.1:10000 https://www.google.com` admin: address: From f1e0ab1229c87fa230abf706adda08e07e310ea0 Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 17 Sep 2021 13:33:45 +0100 Subject: [PATCH 064/121] dependabot: Updates (#18133) Commit Message: dependabot: Updates Additional Description: Risk Level: Testing: Docs Changes: Release Notes: Platform Specific Features: Signed-off-by: dependabot[bot] Signed-off-by: Ryan Northey Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: gayang --- .github/actions/pr_notifier/requirements.txt | 14 +++++------ tools/base/requirements.txt | 26 ++++++++++---------- tools/dependency/requirements.txt | 6 ++--- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/.github/actions/pr_notifier/requirements.txt b/.github/actions/pr_notifier/requirements.txt index ed12c964aeec4..79b8d5a17e855 100644 --- a/.github/actions/pr_notifier/requirements.txt +++ b/.github/actions/pr_notifier/requirements.txt @@ -64,8 +64,8 @@ chardet==4.0.0 \ --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 # via requests deprecated==1.2.13 \ - --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d \ - --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d + --hash=sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d \ + --hash=sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d # via pygithub idna==2.10 \ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ @@ -78,7 +78,7 @@ pycparser==2.20 \ pygithub==1.55 \ --hash=sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283 \ --hash=sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b - # via -r .github/actions/pr_notifier/requirements.txt + # via -r requirements.in pyjwt==2.1.0 \ --hash=sha256:934d73fbba91b0483d3857d1aff50e96b2a892384ee2c17417ed3203f173fca1 \ --hash=sha256:fba44e7898bbca160a2b2b501f492824fc8382485d3a6f11ba5d0c1937ce6130 @@ -111,10 +111,10 @@ six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via pynacl -slack-sdk==3.10.1 \ - --hash=sha256:f17b71a578e94204d9033bffded634475f4ca0a6274c6c7a4fd8a9cb0ac7cd8b \ - --hash=sha256:2b4dde7728eb4ff5a581025d204578ccff25a5d8f0fe11ae175e3ce6e074434f - # via -r .github/actions/pr_notifier/requirements.txt +slack_sdk==3.11.0 \ + --hash=sha256:4d9854ee158c3137cfe1ba587dc4b777b6881aee58436d8071f36bad842acbf4 \ + --hash=sha256:8dc858cd106b639191ee3dc38fb957e55ab8dd28c2cc22feafa1223ab2def646 + # via -r requirements.in urllib3==1.26.6 \ --hash=sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4 \ --hash=sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index 9e737332415a1..bbd3e3c44c61f 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -634,28 +634,28 @@ snowballstemmer==2.1.0 \ --hash=sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2 \ --hash=sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914 # via sphinx +sphinx==4.2.0 \ + --hash=sha256:94078db9184491e15bce0a56d9186e0aec95f16ac20b12d00e06d4e36f1058a6 \ + --hash=sha256:98a535c62a4fcfcc362528592f69b26f7caec587d32cd55688db580be0287ae0 + # via + # -r requirements.in + # sphinx-copybutton + # sphinx-rtd-theme + # sphinx-tabs + # sphinxcontrib-httpdomain + # sphinxext-rediraffe sphinx-copybutton==0.4.0 \ --hash=sha256:4340d33c169dac6dd82dce2c83333412aa786a42dd01a81a8decac3b130dc8b0 \ --hash=sha256:8daed13a87afd5013c3a9af3575cc4d5bec052075ccd3db243f895c07a689386 # via -r requirements.in -sphinx-rtd-theme==0.5.2 \ - --hash=sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a \ - --hash=sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f +sphinx-rtd-theme==1.0.0 \ + --hash=sha256:4d35a56f4508cfee4c4fb604373ede6feae2a306731d533f409ef5c3496fdbd8 \ + --hash=sha256:eec6d497e4c2195fa0e8b2016b337532b8a699a68bcb22a512870e16925c6a5c # via -r requirements.in sphinx-tabs==3.2.0 \ --hash=sha256:1e1b1846c80137bd81a78e4a69b02664b98b1e1da361beb30600b939dfc75065 \ --hash=sha256:33137914ed9b276e6a686d7a337310ee77b1dae316fdcbce60476913a152e0a4 # via -r requirements.in -sphinx==4.1.2 \ - --hash=sha256:3092d929cd807926d846018f2ace47ba2f3b671b309c7a89cd3306e80c826b13 \ - --hash=sha256:46d52c6cee13fec44744b8c01ed692c18a640f6910a725cbb938bc36e8d64544 - # via - # -r requirements.in - # sphinx-copybutton - # sphinx-rtd-theme - # sphinx-tabs - # sphinxcontrib-httpdomain - # sphinxext-rediraffe sphinxcontrib-applehelp==1.0.2 \ --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 diff --git a/tools/dependency/requirements.txt b/tools/dependency/requirements.txt index f2e21e84a3d6e..f741cb209c9fe 100644 --- a/tools/dependency/requirements.txt +++ b/tools/dependency/requirements.txt @@ -55,9 +55,9 @@ chardet==4.0.0 \ --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 # via -r tools/dependency/requirements.txt -charset-normalizer==2.0.4 \ - --hash=sha256:0c8911edd15d19223366a194a513099a302055a962bca2cec0f54b8b63175d8b \ - --hash=sha256:f23667ebe1084be45f6ae0538e4a5a865206544097e4e8bbcacf42cd02a348f3 +charset-normalizer==2.0.5 \ + --hash=sha256:fa471a601dfea0f492e4f4fca035cd82155e65dc45c9b83bf4322dfab63755dd \ + --hash=sha256:7098e7e862f6370a2a8d1a6398cd359815c45d12626267652c3f13dec58e2367 # via requests colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ From 37cf18e6465923a085de25e6e948c173c0ebcf42 Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 17 Sep 2021 14:03:54 +0100 Subject: [PATCH 065/121] bazel: Shift flaky test report to bazel (#18076) Signed-off-by: Ryan Northey Signed-off-by: gayang --- .azure-pipelines/pipelines.yml | 2 +- .github/dependabot.yml | 5 -- ci/do_ci.sh | 2 +- ci/flaky_test/BUILD | 17 ++++ ci/flaky_test/process_xml.py | 36 +++++--- ci/flaky_test/requirements.txt | 138 ------------------------------- ci/flaky_test/run_process_xml.sh | 13 --- ci/windows_ci_steps.sh | 2 +- tools/base/requirements.in | 1 + tools/base/requirements.txt | 5 ++ 10 files changed, 52 insertions(+), 169 deletions(-) create mode 100644 ci/flaky_test/BUILD delete mode 100644 ci/flaky_test/requirements.txt delete mode 100755 ci/flaky_test/run_process_xml.sh diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 6bc20230bafe7..ba39bb03547bb 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -454,7 +454,7 @@ stages: testRunTitle: "macOS" condition: always() - - script: ./ci/flaky_test/run_process_xml.sh + - script: bazel run //ci/flaky_test:process_xml displayName: "Process Test Results" env: TEST_TMPDIR: $(Build.SourcesDirectory) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index bf057a3add5a2..4d2c6b1592534 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -36,11 +36,6 @@ updates: schedule: interval: "daily" -- package-ecosystem: "pip" - directory: "/ci/flaky_test" - schedule: - interval: "daily" - - package-ecosystem: "docker" directory: "/ci" schedule: diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 7e1350525a603..f8654b9a7682e 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -150,7 +150,7 @@ function bazel_contrib_binary_build() { function run_process_test_result() { if [[ -z "$CI_SKIP_PROCESS_TEST_RESULTS" ]] && [[ $(find "$TEST_TMPDIR" -name "*_attempt.xml" 2> /dev/null) ]]; then echo "running flaky test reporting script" - "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET" + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //ci/flaky_test:process_xml "$CI_TARGET" else echo "no flaky test results found" fi diff --git a/ci/flaky_test/BUILD b/ci/flaky_test/BUILD new file mode 100644 index 0000000000000..7cbc182ec9e0a --- /dev/null +++ b/ci/flaky_test/BUILD @@ -0,0 +1,17 @@ +load("@rules_python//python:defs.bzl", "py_binary") +load("//bazel:envoy_build_system.bzl", "envoy_package") +load("@base_pip3//:requirements.bzl", "requirement") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +py_binary( + name = "process_xml", + srcs = ["process_xml.py"], + deps = [ + "@envoy_repo", + requirement("pygithub"), + requirement("slackclient"), + ], +) diff --git a/ci/flaky_test/process_xml.py b/ci/flaky_test/process_xml.py index 9eae5129275c8..943d710c4dbb3 100755 --- a/ci/flaky_test/process_xml.py +++ b/ci/flaky_test/process_xml.py @@ -1,17 +1,26 @@ #!/usr/bin/env python3 -import subprocess import os +import ssl +import subprocess +import sys +from typing import Iterable import xml.etree.ElementTree as ET + import slack from slack.errors import SlackApiError -import sys -import ssl + +import envoy_repo well_known_timeouts = [60, 300, 900, 3600] section_delimiter = "---------------------------------------------------------------------------------------------------\n" +def run_in_repo(command: Iterable) -> str: + """Run a command in the repo root""" + return subprocess.check_output(command, encoding="utf-8", cwd=envoy_repo.PATH) + + # Returns a boolean indicating if a test passed. def did_test_pass(file): tree = ET.parse(file) @@ -192,7 +201,7 @@ def get_git_info(CI_TARGET): elif os.getenv('BUILD_REASON'): ret += "Build reason: {}\n".format(os.environ['BUILD_REASON']) - output = subprocess.check_output(['git', 'log', '--format=%H', '-n', '1'], encoding='utf-8') + output = run_in_repo(['git', 'log', '--format=%H', '-n', '1']) ret += "Commmit: {}/commit/{}".format(os.environ['REPO_URI'], output) build_id = os.environ['BUILD_URI'].split('/')[-1] @@ -200,23 +209,23 @@ def get_git_info(CI_TARGET): ret += "\n" - remotes = subprocess.check_output(['git', 'remote'], encoding='utf-8').splitlines() + remotes = run_in_repo(['git', 'remote']).splitlines() if ("origin" in remotes): - output = subprocess.check_output(['git', 'remote', 'get-url', 'origin'], encoding='utf-8') + output = run_in_repo(['git', 'remote', 'get-url', 'origin']) ret += "Origin: {}".format(output.replace('.git', '')) if ("upstream" in remotes): - output = subprocess.check_output(['git', 'remote', 'get-url', 'upstream'], encoding='utf-8') + output = run_in_repo(['git', 'remote', 'get-url', 'upstream']) ret += "Upstream: {}".format(output.replace('.git', '')) - output = subprocess.check_output(['git', 'describe', '--all', '--always'], encoding='utf-8') + output = run_in_repo(['git', 'describe', '--all', '--always']) ret += "Latest ref: {}".format(output) ret += "\n" ret += "Last commit:\n" - output = subprocess.check_output(['git', 'show', '-s'], encoding='utf-8') + output = run_in_repo(['git', 'show', '-s']) for line in output.splitlines(): ret += "\t" + line + "\n" @@ -225,7 +234,7 @@ def get_git_info(CI_TARGET): return ret -if __name__ == "__main__": +def main(): CI_TARGET = "" if len(sys.argv) == 2: CI_TARGET = sys.argv[1] @@ -286,3 +295,10 @@ def get_git_info(CI_TARGET): print('No flaky tests found.\n') os.remove(os.environ["TMP_OUTPUT_PROCESS_XML"]) + + +if __name__ == "__main__": + if os.getenv("ENVOY_BUILD_ARCH") == "aarch64": + os.environ["MULTIDICT_NO_EXTENSIONS"] = 1 + os.environ["YARL_NO_EXTENSIONS"] = 1 + main() diff --git a/ci/flaky_test/requirements.txt b/ci/flaky_test/requirements.txt deleted file mode 100644 index 1e9f11f4cfa12..0000000000000 --- a/ci/flaky_test/requirements.txt +++ /dev/null @@ -1,138 +0,0 @@ -aiohttp==3.7.4.post0 \ - --hash=sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5 \ - --hash=sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8 \ - --hash=sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95 \ - --hash=sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290 \ - --hash=sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f \ - --hash=sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809 \ - --hash=sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe \ - --hash=sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287 \ - --hash=sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc \ - --hash=sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87 \ - --hash=sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0 \ - --hash=sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970 \ - --hash=sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f \ - --hash=sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde \ - --hash=sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c \ - --hash=sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8 \ - --hash=sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f \ - --hash=sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5 \ - --hash=sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf \ - --hash=sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df \ - --hash=sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213 \ - --hash=sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4 \ - --hash=sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009 \ - --hash=sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5 \ - --hash=sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013 \ - --hash=sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16 \ - --hash=sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5 \ - --hash=sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b \ - --hash=sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd \ - --hash=sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439 \ - --hash=sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22 \ - --hash=sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a \ - --hash=sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb \ - --hash=sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb \ - --hash=sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9 \ - --hash=sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe \ - --hash=sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf -async-timeout==3.0.1 \ - --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ - --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 -attrs==21.2.0 \ - --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ - --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb -chardet==4.0.0 \ - --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 \ - --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa -idna==3.1 \ - --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \ - --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1 -idna_ssl==1.1.0 \ - --hash=sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c -multidict==5.1.0 \ - --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \ - --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \ - --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ - --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \ - --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \ - --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \ - --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \ - --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \ - --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \ - --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \ - --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ - --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \ - --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \ - --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \ - --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \ - --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \ - --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ - --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \ - --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \ - --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \ - --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \ - --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \ - --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \ - --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \ - --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \ - --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \ - --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \ - --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \ - --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \ - --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \ - --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \ - --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \ - --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \ - --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ - --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 \ - --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \ - --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 -slackclient==2.9.3 \ - --hash=sha256:2d68d668c02f4038299897e5c4723ab85dd40a3548354924b24f333a435856f8 \ - --hash=sha256:07ec8fa76f6aa64852210ae235ff9e637ba78124e06c0b07a7eeea4abb955965 -typing-extensions==3.10.0.2 \ - --hash=sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7 \ - --hash=sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34 \ - --hash=sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e -wheel==0.37.0 \ - --hash=sha256:21014b2bd93c6d0034b6ba5d35e4eb284340e09d63c59aef6fc14b0f346146fd \ - --hash=sha256:e2ef7239991699e3355d54f8e968a21bb940a1dbf34a4d226741e64462516fad -yarl==1.6.3 \ - --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ - --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ - --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \ - --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \ - --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \ - --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \ - --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \ - --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \ - --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \ - --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \ - --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \ - --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \ - --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \ - --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \ - --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \ - --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \ - --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ - --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \ - --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \ - --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \ - --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 \ - --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \ - --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \ - --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ - --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \ - --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \ - --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ - --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \ - --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \ - --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ - --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \ - --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \ - --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \ - --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \ - --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \ - --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \ - --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 diff --git a/ci/flaky_test/run_process_xml.sh b/ci/flaky_test/run_process_xml.sh deleted file mode 100755 index 38496128bb913..0000000000000 --- a/ci/flaky_test/run_process_xml.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -export ENVOY_SRCDIR=${ENVOY_SRCDIR:-.} - -# shellcheck source=tools/shell_utils.sh -. "${ENVOY_SRCDIR}"/tools/shell_utils.sh - -if [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then - export MULTIDICT_NO_EXTENSIONS=1 - export YARL_NO_EXTENSIONS=1 -fi - -python_venv process_xml "$1" diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh index 67ff0eb409949..eed32c1218868 100755 --- a/ci/windows_ci_steps.sh +++ b/ci/windows_ci_steps.sh @@ -103,7 +103,7 @@ if [[ $TEST_TARGETS == "//test/..." ]]; then bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" $TEST_TARGETS --test_tag_filters=-skip_on_windows,-fails_on_${FAIL_GROUP} --build_tests_only echo "running flaky test reporting script" - "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET" + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //ci/flaky_test:process_xml "$CI_TARGET" # Build tests that are known flaky or failing to ensure no compilation regressions bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=fails_on_${FAIL_GROUP} --build_tests_only diff --git a/tools/base/requirements.in b/tools/base/requirements.in index b12ec7e8bc1b1..7649830e6caab 100644 --- a/tools/base/requirements.in +++ b/tools/base/requirements.in @@ -21,6 +21,7 @@ pytest-cov pytest-patches pyyaml setuptools +slackclient sphinx sphinxcontrib-httpdomain sphinxcontrib-serializinghtml diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index bbd3e3c44c61f..feee128ea615b 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -86,6 +86,7 @@ aiohttp==3.7.4.post0 \ # aiodocker # envoy.github.abstract # envoy.github.release + # slackclient alabaster==0.7.12 \ --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \ --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 @@ -626,6 +627,10 @@ six==1.16.0 \ # via # pynacl # sphinxcontrib-httpdomain +slackclient==2.9.3 \ + --hash=sha256:07ec8fa76f6aa64852210ae235ff9e637ba78124e06c0b07a7eeea4abb955965 \ + --hash=sha256:2d68d668c02f4038299897e5c4723ab85dd40a3548354924b24f333a435856f8 + # via -r tools/base/requirements.in smmap==4.0.0 \ --hash=sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182 \ --hash=sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2 From 4e2445acbbaca63285da14c6ca03b6e4f007f3c9 Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 17 Sep 2021 19:01:49 +0100 Subject: [PATCH 066/121] tooling: Shift `api_proto_breaking_change_detector` to bazel (#17981) Signed-off-by: Ryan Northey Signed-off-by: gayang --- ci/do_ci.sh | 5 +--- .../api_proto_breaking_change_detector/BUILD | 20 +++++++++++++--- .../buf_utils.py | 24 ++++++++++++------- .../detector.py | 4 ++-- .../detector_ci.py | 14 +++++++---- .../detector_ci.sh | 9 ------- .../detector_test.py | 12 +++++----- 7 files changed, 50 insertions(+), 38 deletions(-) delete mode 100755 tools/api_proto_breaking_change_detector/detector_ci.sh diff --git a/ci/do_ci.sh b/ci/do_ci.sh index f8654b9a7682e..02f89f49c744b 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -375,14 +375,11 @@ elif [[ "$CI_TARGET" == "bazel.api" ]]; then bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api//envoy/... exit 0 elif [[ "$CI_TARGET" == "bazel.api_compat" ]]; then - echo "Building buf..." - bazel build @com_github_bufbuild_buf//:buf - BUF_PATH=$(realpath "bazel-source/external/com_github_bufbuild_buf/bin/buf") echo "Checking API for breaking changes to protobuf backwards compatibility..." BASE_BRANCH_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh) COMMIT_TITLE=$(git log -n 1 --pretty='format:%C(auto)%h (%s, %ad)' "${BASE_BRANCH_REF}") echo -e "\tUsing base commit ${COMMIT_TITLE}" - "${ENVOY_SRCDIR}"/tools/api_proto_breaking_change_detector/detector_ci.sh "${BUF_PATH}" "${BASE_BRANCH_REF}" + bazel run //tools/api_proto_breaking_change_detector:detector_ci "${BASE_BRANCH_REF}" exit 0 elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then setup_clang_toolchain diff --git a/tools/api_proto_breaking_change_detector/BUILD b/tools/api_proto_breaking_change_detector/BUILD index 4ef316b7794a3..69ce24fdb3029 100644 --- a/tools/api_proto_breaking_change_detector/BUILD +++ b/tools/api_proto_breaking_change_detector/BUILD @@ -1,4 +1,5 @@ load("@rules_python//python:defs.bzl", "py_binary", "py_library", "py_test") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 @@ -16,7 +17,21 @@ py_binary( deps = [ ":buf_utils", ":detector_errors", - "//tools:run_command", + ], +) + +py_binary( + name = "detector_ci", + srcs = [ + "detector_ci.py", + ], + args = ["$(location @com_github_bufbuild_buf//:buf)"], + data = [ + "@com_github_bufbuild_buf//:buf", + ], + deps = [ + ":detector", + "@envoy_repo", ], ) @@ -27,7 +42,7 @@ py_library( ], deps = [ ":detector_errors", - "//tools/base:utils", + requirement("envoy.base.utils"), ], ) @@ -50,7 +65,6 @@ py_test( tags = ["manual"], deps = [ ":detector", - "//tools:run_command", "@rules_python//python/runfiles", ], ) diff --git a/tools/api_proto_breaking_change_detector/buf_utils.py b/tools/api_proto_breaking_change_detector/buf_utils.py index 1c279e84a49c4..1bc9a96a222e6 100644 --- a/tools/api_proto_breaking_change_detector/buf_utils.py +++ b/tools/api_proto_breaking_change_detector/buf_utils.py @@ -1,9 +1,10 @@ +import subprocess from pathlib import Path from typing import List, Union, Tuple -from detector_errors import ChangeDetectorError, ChangeDetectorInitializeError -from tools.base.utils import cd_and_return -from tools.run_command import run_command +from tools.api_proto_breaking_change_detector.detector_errors import ( + ChangeDetectorError, ChangeDetectorInitializeError) +from envoy.base.utils import cd_and_return def _generate_buf_args(target_path, config_file_loc, additional_args): @@ -52,7 +53,10 @@ def pull_buf_deps( with _cd_into_config_parent(config_file_loc): buf_args = _generate_buf_args(target_path, config_file_loc, additional_args) - update_code, _, update_err = run_command(f'{buf_path} mod update') + response = subprocess.run([buf_path, "mod", "update"], + encoding="utf-8", + capture_output=True) + update_code, update_err = response.returncode, response.stderr.split("\n") # for some reason buf prints out the "downloading..." lines on stderr if update_code != 0: raise ChangeDetectorInitializeError( @@ -63,7 +67,7 @@ def pull_buf_deps( "buf mod update did not generate a buf.lock file (silent error... incorrect config?)" ) - run_command(' '.join([f'{buf_path} build', *buf_args])) + subprocess.run([buf_path, "build"] + buf_args, capture_output=True) def check_breaking( @@ -87,7 +91,7 @@ def check_breaking( additional_args {List[str]} -- additional arguments passed into the buf binary invocations Returns: - Tuple[int, List[str], List[str]] -- tuple of (exit status code, stdout, stderr) as provided by run_command. Note stdout/stderr are provided as string lists + Tuple[int, List[str], List[str]] -- tuple of (exit status code, stdout, stderr). Note stdout/stderr are provided as string lists """ with _cd_into_config_parent(config_file_loc): if not Path(git_path).exists(): @@ -100,6 +104,8 @@ def check_breaking( if subdir: initial_state_input += f',subdir={subdir}' - final_code, final_out, final_err = run_command( - ' '.join([buf_path, f"breaking --against {initial_state_input}", *buf_args])) - return final_code, final_out, final_err + response = subprocess.run([buf_path, "breaking", "--against", initial_state_input] + + buf_args, + encoding="utf-8", + capture_output=True) + return response.returncode, response.stdout.split("\n"), response.stderr.split("\n") diff --git a/tools/api_proto_breaking_change_detector/detector.py b/tools/api_proto_breaking_change_detector/detector.py index c5a66be94214c..8a4273cb76e22 100644 --- a/tools/api_proto_breaking_change_detector/detector.py +++ b/tools/api_proto_breaking_change_detector/detector.py @@ -16,8 +16,8 @@ from pathlib import Path from typing import List -from buf_utils import check_breaking, pull_buf_deps -from detector_errors import ChangeDetectorError +from tools.api_proto_breaking_change_detector.buf_utils import check_breaking, pull_buf_deps +from tools.api_proto_breaking_change_detector.detector_errors import ChangeDetectorError class ProtoBreakingChangeDetector(object): diff --git a/tools/api_proto_breaking_change_detector/detector_ci.py b/tools/api_proto_breaking_change_detector/detector_ci.py index 84278d6d4b075..4460423c68bfa 100755 --- a/tools/api_proto_breaking_change_detector/detector_ci.py +++ b/tools/api_proto_breaking_change_detector/detector_ci.py @@ -1,13 +1,16 @@ #!/usr/bin/env python3 import argparse +import os import sys from pathlib import Path -from detector import BufWrapper +from tools.api_proto_breaking_change_detector.detector import BufWrapper -API_DIR = Path("api").resolve() -GIT_PATH = Path.cwd().joinpath(".git") +import envoy_repo + +API_DIR = Path(envoy_repo.PATH).joinpath("api") +GIT_PATH = Path(envoy_repo.PATH).joinpath(".git") CONFIG_FILE_LOC = Path(API_DIR, "buf.yaml") @@ -39,6 +42,7 @@ def detect_breaking_changes_git(path_to_buf, ref): parser.add_argument( 'git_ref', type=str, help='git reference to check against for breaking changes') args = parser.parse_args() - - exit_status = detect_breaking_changes_git(args.buf_path, args.git_ref) + buf_path = os.path.abspath(args.buf_path) + os.chdir(envoy_repo.PATH) + exit_status = detect_breaking_changes_git(buf_path, args.git_ref) sys.exit(exit_status) diff --git a/tools/api_proto_breaking_change_detector/detector_ci.sh b/tools/api_proto_breaking_change_detector/detector_ci.sh deleted file mode 100755 index 7f0ec3271e1e5..0000000000000 --- a/tools/api_proto_breaking_change_detector/detector_ci.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -tools="$(dirname "$(dirname "$(realpath "$0")")")" -root=$(realpath "$tools/..") - -cd "$root" || exit 1 -# to satisfy dependency on run_command (as done in tools/code_format/check_format_test_helper.sh) -export PYTHONPATH="$root" -./tools/api_proto_breaking_change_detector/detector_ci.py "$@" diff --git a/tools/api_proto_breaking_change_detector/detector_test.py b/tools/api_proto_breaking_change_detector/detector_test.py index d34bd1528d4d2..0417f21c3fd97 100644 --- a/tools/api_proto_breaking_change_detector/detector_test.py +++ b/tools/api_proto_breaking_change_detector/detector_test.py @@ -7,6 +7,7 @@ and ensure that tool behavior is consistent across dependency updates. """ +import subprocess import tempfile import unittest from pathlib import Path @@ -14,10 +15,9 @@ from rules_python.python.runfiles import runfiles -from buf_utils import pull_buf_deps -from detector import BufWrapper -from tools.base.utils import cd_and_return -from tools.run_command import run_command +from tools.api_proto_breaking_change_detector.buf_utils import pull_buf_deps +from tools.api_proto_breaking_change_detector.detector import BufWrapper +from envoy.base.utils import cd_and_return class BreakingChangeDetectorTests(object): @@ -89,8 +89,8 @@ class BufTests(TestAllowedChanges, TestBreakingChanges, unittest.TestCase): @classmethod def _run_command_print_error(cls, cmd): - code, out, err = run_command(cmd) - out, err = '\n'.join(out), '\n'.join(err) + response = subprocess.run([cmd], shell=True, capture_output=True, encoding="utf-8") + code, out, err = response.returncode, response.stdout, response.stderr if code != 0: raise Exception( f"Error running command {cmd}\nExit code: {code} | stdout: {out} | stderr: {err}") From 3078a4b2b176f18d4cd0ca2dcc0ff1e6577df4d5 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Fri, 17 Sep 2021 12:40:45 -0700 Subject: [PATCH 067/121] dns: add v4 preferred option (#18108) Signed-off-by: Jose Nino Signed-off-by: gayang --- api/envoy/config/cluster/v3/cluster.proto | 8 ++ docs/root/version_history/current.rst | 1 + envoy/network/dns.h | 2 +- source/common/network/apple_dns_impl.cc | 51 +++++++-- source/common/network/apple_dns_impl.h | 11 +- source/common/network/dns_impl.cc | 19 +++- source/common/network/dns_impl.h | 6 +- source/common/upstream/upstream_impl.cc | 2 + test/common/network/apple_dns_impl_test.cc | 116 ++++++++++++++++++++- test/common/network/dns_impl_test.cc | 54 ++++++++++ 10 files changed, 246 insertions(+), 24 deletions(-) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index d6213d6fe9488..495d6ce39788f 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -123,15 +123,23 @@ message Cluster { // only perform a lookup for addresses in the IPv6 family. If AUTO is // specified, the DNS resolver will first perform a lookup for addresses in // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + // This is semantically equivalent to a non-existent V6_PREFERRED option. + // AUTO is a legacy name that is more opaque than + // necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API. + // If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the + // IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback + // target will only get v6 addresses if there were NO v4 addresses to return. // For cluster types other than // :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS`, // this setting is // ignored. + // [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.] enum DnsLookupFamily { AUTO = 0; V4_ONLY = 1; V6_ONLY = 2; + V4_PREFERRED = 3; } enum ClusterProtocolSelection { diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 29a0f443fbb57..fa19b6c3ea455 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -102,6 +102,7 @@ New Features * access_log: added :ref:`METADATA` token to handle all types of metadata (DYNAMIC, CLUSTER, ROUTE). * bootstrap: added :ref:`inline_headers ` in the bootstrap to make custom inline headers bootstrap configurable. * contrib: added new :ref:`contrib images ` which contain contrib extensions. +* dns: added :ref:`V4_PREFERRED ` option to return V6 addresses only if V4 addresses are not available. * grpc reverse bridge: added a new :ref:`option ` to support streaming response bodies when withholding gRPC frames from the upstream. * http: added :ref:`alternate_protocols_cache_options ` for enabling HTTP/3 connections to servers which advertise HTTP/3 support via `HTTP Alternative Services `_. * http: added :ref:`string_match ` in the header matcher. diff --git a/envoy/network/dns.h b/envoy/network/dns.h index d2f7e23f5bae9..38dd39bf45760 100644 --- a/envoy/network/dns.h +++ b/envoy/network/dns.h @@ -46,7 +46,7 @@ struct DnsResponse { const std::chrono::seconds ttl_; }; -enum class DnsLookupFamily { V4Only, V6Only, Auto }; +enum class DnsLookupFamily { V4Only, V6Only, Auto, V4Preferred }; /** * An asynchronous DNS resolver. diff --git a/source/common/network/apple_dns_impl.cc b/source/common/network/apple_dns_impl.cc index 520bdada5eede..9a423621acbf3 100644 --- a/source/common/network/apple_dns_impl.cc +++ b/source/common/network/apple_dns_impl.cc @@ -73,10 +73,10 @@ AppleDnsResolverImpl::startResolution(const std::string& dns_name, } ENVOY_LOG(trace, "Performing DNS resolution via Apple APIs"); - auto pending_resolution = - std::make_unique(*this, callback, dispatcher_, dns_name); + auto pending_resolution = std::make_unique(*this, callback, dispatcher_, + dns_name, dns_lookup_family); - DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(dns_lookup_family); + DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(); if (error != kDNSServiceErr_NoError) { ENVOY_LOG(warn, "DNS resolver error ({}) in dnsServiceGetAddrInfo for {}", error, dns_name); chargeGetAddrInfoErrorStats(error); @@ -136,9 +136,10 @@ void AppleDnsResolverImpl::chargeGetAddrInfoErrorStats(DNSServiceErrorType error AppleDnsResolverImpl::PendingResolution::PendingResolution(AppleDnsResolverImpl& parent, ResolveCb callback, Event::Dispatcher& dispatcher, - const std::string& dns_name) + const std::string& dns_name, + DnsLookupFamily dns_lookup_family) : parent_(parent), callback_(callback), dispatcher_(dispatcher), dns_name_(dns_name), - pending_cb_({ResolutionStatus::Success, {}}) {} + pending_cb_({ResolutionStatus::Success, {}, {}}), dns_lookup_family_(dns_lookup_family) {} AppleDnsResolverImpl::PendingResolution::~PendingResolution() { ENVOY_LOG(debug, "Destroying PendingResolution for {}", dns_name_); @@ -185,10 +186,32 @@ void AppleDnsResolverImpl::PendingResolution::onEventCallback(uint32_t events) { } } +std::list& AppleDnsResolverImpl::PendingResolution::finalAddressList() { + switch (dns_lookup_family_) { + case DnsLookupFamily::V4Only: + return pending_cb_.v4_responses_; + case DnsLookupFamily::V6Only: + return pending_cb_.v6_responses_; + case DnsLookupFamily::Auto: + // Per API docs only give v4 if v6 is not available. + if (pending_cb_.v6_responses_.empty()) { + return pending_cb_.v4_responses_; + } + return pending_cb_.v6_responses_; + case DnsLookupFamily::V4Preferred: + // Per API docs only give v6 if v4 is not available. + if (pending_cb_.v4_responses_.empty()) { + return pending_cb_.v6_responses_; + } + return pending_cb_.v4_responses_; + } + NOT_REACHED_GCOVR_EXCL_LINE; +} + void AppleDnsResolverImpl::PendingResolution::finishResolve() { ENVOY_LOG_EVENT(debug, "apple_dns_resolution_complete", "dns resolution for {} completed with status {}", dns_name_, pending_cb_.status_); - callback_(pending_cb_.status_, std::move(pending_cb_.responses_)); + callback_(pending_cb_.status_, std::move(finalAddressList())); if (owned_) { ENVOY_LOG(debug, "Resolution for {} completed (async)", dns_name_); @@ -199,10 +222,9 @@ void AppleDnsResolverImpl::PendingResolution::finishResolve() { } } -DNSServiceErrorType -AppleDnsResolverImpl::PendingResolution::dnsServiceGetAddrInfo(DnsLookupFamily dns_lookup_family) { +DNSServiceErrorType AppleDnsResolverImpl::PendingResolution::dnsServiceGetAddrInfo() { DNSServiceProtocol protocol; - switch (dns_lookup_family) { + switch (dns_lookup_family_) { case DnsLookupFamily::V4Only: protocol = kDNSServiceProtocol_IPv4; break; @@ -210,6 +232,7 @@ AppleDnsResolverImpl::PendingResolution::dnsServiceGetAddrInfo(DnsLookupFamily d protocol = kDNSServiceProtocol_IPv6; break; case DnsLookupFamily::Auto: + case DnsLookupFamily::V4Preferred: protocol = kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6; break; } @@ -255,7 +278,8 @@ void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( parent_.chargeGetAddrInfoErrorStats(error_code); pending_cb_.status_ = ResolutionStatus::Failure; - pending_cb_.responses_.clear(); + pending_cb_.v4_responses_.clear(); + pending_cb_.v6_responses_.clear(); finishResolve(); // Note: Nothing can follow this call to flushPendingQueries due to deletion of this @@ -271,7 +295,12 @@ void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( auto dns_response = buildDnsResponse(address, ttl); ENVOY_LOG(debug, "Address to add address={}, ttl={}", dns_response.address_->ip()->addressAsString(), ttl); - pending_cb_.responses_.push_back(dns_response); + if (dns_response.address_->ip()->ipv4()) { + pending_cb_.v4_responses_.push_back(dns_response); + } else { + ASSERT(dns_response.address_->ip()->ipv6()); + pending_cb_.v6_responses_.push_back(dns_response); + } } if (!(flags & kDNSServiceFlagsMoreComing)) { diff --git a/source/common/network/apple_dns_impl.h b/source/common/network/apple_dns_impl.h index 17328b484b3d8..3eeaba854c165 100644 --- a/source/common/network/apple_dns_impl.h +++ b/source/common/network/apple_dns_impl.h @@ -84,7 +84,8 @@ class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable& finalAddressList(); + // Small wrapping struct to accumulate addresses from firings of the // onDNSServiceGetAddrInfoReply callback. struct FinalResponse { ResolutionStatus status_; - std::list responses_; + std::list v4_responses_; + std::list v6_responses_; }; AppleDnsResolverImpl& parent_; @@ -124,6 +128,7 @@ class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable(*this, callback, dispatcher_, channel_, dns_name); - if (dns_lookup_family == DnsLookupFamily::Auto) { + auto pending_resolution = std::make_unique( + *this, callback, dispatcher_, channel_, dns_name, dns_lookup_family); + if (dns_lookup_family == DnsLookupFamily::Auto || + dns_lookup_family == DnsLookupFamily::V4Preferred) { pending_resolution->fallback_if_failed_ = true; } - if (dns_lookup_family == DnsLookupFamily::V4Only) { + if (dns_lookup_family == DnsLookupFamily::V4Only || + dns_lookup_family == DnsLookupFamily::V4Preferred) { pending_resolution->getAddrInfo(AF_INET); } else { pending_resolution->getAddrInfo(AF_INET6); diff --git a/source/common/network/dns_impl.h b/source/common/network/dns_impl.h index abcea92a4f885..059d82073f58f 100644 --- a/source/common/network/dns_impl.h +++ b/source/common/network/dns_impl.h @@ -40,9 +40,10 @@ class DnsResolverImpl : public DnsResolver, protected Logger::Loggableip()->ipv4()); - } else if (lookup_family == DnsLookupFamily::V6Only) { + } else if (lookup_family == DnsLookupFamily::V6Only || + lookup_family == DnsLookupFamily::Auto) { EXPECT_NE(nullptr, result.address_->ip()->ipv6()); } } @@ -152,6 +154,10 @@ TEST_F(AppleDnsImplTest, DnsIpAddressVersion) { DnsResolver::ResolutionStatus::Success, true)); dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Success, true)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_NE(nullptr, resolveWithExpectations("google.com", DnsLookupFamily::V4Only, DnsResolver::ResolutionStatus::Success, true)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -189,6 +195,10 @@ TEST_F(AppleDnsImplTest, DnsIpAddressVersionInvalid) { DnsResolver::ResolutionStatus::Failure, false)); dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_NE(nullptr, resolveWithExpectations("invalidDnsName", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Failure, false)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + EXPECT_NE(nullptr, resolveWithExpectations("invalidDnsName", DnsLookupFamily::V4Only, DnsResolver::ResolutionStatus::Failure, false)); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -320,6 +330,84 @@ class AppleDnsImplFakeApiTest : public testing::Test { checkErrorStat(error_code); } + enum AddressType { V4, V6, Both }; + + void fallbackWith(DnsLookupFamily dns_lookup_family, AddressType address_type) { + const std::string hostname = "foo.com"; + sockaddr_in addr4; + addr4.sin_family = AF_INET; + EXPECT_EQ(1, inet_pton(AF_INET, "1.2.3.4", &addr4.sin_addr)); + addr4.sin_port = htons(6502); + Network::Address::Ipv4Instance address(&addr4); + + sockaddr_in6 addr6; + addr6.sin6_family = AF_INET6; + EXPECT_EQ(1, inet_pton(AF_INET6, "102:304:506:708:90a:b0c:d0e:f00", &addr6.sin6_addr)); + addr6.sin6_port = 0; + Network::Address::Ipv6Instance address_v6(addr6); + + DNSServiceGetAddrInfoReply reply_callback; + absl::Notification dns_callback_executed; + + EXPECT_CALL(dns_service_, + dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, + kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, + StrEq(hostname.c_str()), _, _)) + .WillOnce(DoAll(SaveArg<5>(&reply_callback), Return(kDNSServiceErr_NoError))); + + EXPECT_CALL(dns_service_, dnsServiceRefSockFD(_)).WillOnce(Return(0)); + EXPECT_CALL(dispatcher_, createFileEvent_(0, _, _, _)) + .WillOnce(Return(new NiceMock)); + + auto query = resolver_->resolve( + hostname, dns_lookup_family, + [&dns_callback_executed, dns_lookup_family, address_type]( + DnsResolver::ResolutionStatus status, std::list&& response) -> void { + EXPECT_EQ(DnsResolver::ResolutionStatus::Success, status); + EXPECT_EQ(1, response.size()); + + if (dns_lookup_family == DnsLookupFamily::Auto) { + if (address_type == AddressType::V4) { + EXPECT_NE(nullptr, response.front().address_->ip()->ipv4()); + } else { + EXPECT_NE(nullptr, response.front().address_->ip()->ipv6()); + } + } + + if (dns_lookup_family == DnsLookupFamily::V4Preferred) { + if (address_type == AddressType::V6) { + EXPECT_NE(nullptr, response.front().address_->ip()->ipv6()); + } else { + EXPECT_NE(nullptr, response.front().address_->ip()->ipv4()); + } + } + dns_callback_executed.Notify(); + }); + ASSERT_NE(nullptr, query); + + switch (address_type) { + case V4: + reply_callback(nullptr, kDNSServiceFlagsAdd, 0, kDNSServiceErr_NoError, hostname.c_str(), + address.sockAddr(), 30, query); + break; + case V6: + reply_callback(nullptr, kDNSServiceFlagsAdd, 0, kDNSServiceErr_NoError, hostname.c_str(), + address_v6.sockAddr(), 30, query); + break; + case Both: + reply_callback(nullptr, kDNSServiceFlagsAdd | kDNSServiceFlagsMoreComing, 0, + kDNSServiceErr_NoError, hostname.c_str(), address.sockAddr(), 30, query); + + reply_callback(nullptr, kDNSServiceFlagsAdd, 0, kDNSServiceErr_NoError, hostname.c_str(), + address_v6.sockAddr(), 30, query); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + dns_callback_executed.WaitForNotification(); + } + protected: MockDnsService dns_service_; TestThreadsafeSingletonInjector dns_service_injector_{&dns_service_}; @@ -561,6 +649,30 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleAddresses) { dns_callback_executed.WaitForNotification(); } +TEST_F(AppleDnsImplFakeApiTest, AutoOnlyV6IfBothV6andV4) { + fallbackWith(DnsLookupFamily::Auto, AddressType::Both); +} + +TEST_F(AppleDnsImplFakeApiTest, AutoV6IfOnlyV6) { + fallbackWith(DnsLookupFamily::Auto, AddressType::V6); +} + +TEST_F(AppleDnsImplFakeApiTest, AutoV4IfOnlyV4) { + fallbackWith(DnsLookupFamily::Auto, AddressType::V4); +} + +TEST_F(AppleDnsImplFakeApiTest, V4PreferredOnlyV4IfBothV6andV4) { + fallbackWith(DnsLookupFamily::V4Preferred, AddressType::Both); +} + +TEST_F(AppleDnsImplFakeApiTest, V4PreferredV6IfOnlyV6) { + fallbackWith(DnsLookupFamily::V4Preferred, AddressType::V6); +} + +TEST_F(AppleDnsImplFakeApiTest, V4PreferredV4IfOnlyV4) { + fallbackWith(DnsLookupFamily::V4Preferred, AddressType::V4); +} + TEST_F(AppleDnsImplFakeApiTest, MultipleAddressesSecondOneFails) { const std::string hostname = "foo.com"; sockaddr_in addr4; diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 20251102575f8..0d5afe756ce21 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -796,6 +796,60 @@ TEST_P(DnsImplTest, MultiARecordLookupWithV6) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +TEST_P(DnsImplTest, AutoOnlyV6IfBothV6andV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::Auto, + DnsResolver::ResolutionStatus::Success, {{"1::2"}}, {}, + absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, AutoV6IfOnlyV6) { + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::Auto, + DnsResolver::ResolutionStatus::Success, {{"1::2"}}, {}, + absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, AutoV4IfOnlyV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::Auto, + DnsResolver::ResolutionStatus::Success, + {{"201.134.56.7"}}, {}, absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, V4PreferredOnlyV4IfBothV6andV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Success, + {{"201.134.56.7"}}, {}, absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, V4PreferredV6IfOnlyV6) { + server_->addHosts("some.good.domain", {"1::2"}, RecordType::AAAA); + + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Success, {{"1::2"}}, {}, + absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +TEST_P(DnsImplTest, V4PreferredV4IfOnlyV4) { + server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); + EXPECT_NE(nullptr, resolveWithExpectations("some.good.domain", DnsLookupFamily::V4Preferred, + DnsResolver::ResolutionStatus::Success, + {{"201.134.56.7"}}, {}, absl::nullopt)); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + // Validate working of cancellation provided by ActiveDnsQuery return. TEST_P(DnsImplTest, Cancel) { server_->addHosts("some.good.domain", {"201.134.56.7"}, RecordType::A); From 4a433a226977f3cadcd5f7a8ce29e7e7b67ec560 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Sat, 18 Sep 2021 08:43:33 +0900 Subject: [PATCH 068/121] logger: fix lifetime issue of AccessLogConfig in tls callback. (#18081) Signed-off-by: Takeshi Yoneda Signed-off-by: gayang --- .../grpc/http_grpc_access_log_impl.cc | 32 +++++++++------- .../grpc/http_grpc_access_log_impl.h | 8 ++-- .../grpc/tcp_grpc_access_log_impl.cc | 26 +++++++------ .../grpc/tcp_grpc_access_log_impl.h | 8 ++-- .../grpc/http_grpc_access_log_impl_test.cc | 30 +++++++++++++++ .../access_loggers/grpc/tcp_config_test.cc | 38 +++++++++++++++++++ 6 files changed, 111 insertions(+), 31 deletions(-) diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc index f35715c37ad98..c5bc8c1f2c499 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.cc @@ -23,28 +23,32 @@ HttpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger( GrpcCommon::GrpcAccessLoggerSharedPtr logger) : logger_(std::move(logger)) {} -HttpGrpcAccessLog::HttpGrpcAccessLog( - AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope) - : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)), +HttpGrpcAccessLog::HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, + const HttpGrpcAccessLogConfig config, + ThreadLocal::SlotAllocator& tls, + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope) + : Common::ImplBase(std::move(filter)), scope_(scope), + config_(std::make_shared(std::move(config))), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { - for (const auto& header : config_.additional_request_headers_to_log()) { + for (const auto& header : config_->additional_request_headers_to_log()) { request_headers_to_log_.emplace_back(header); } - for (const auto& header : config_.additional_response_headers_to_log()) { + for (const auto& header : config_->additional_response_headers_to_log()) { response_headers_to_log_.emplace_back(header); } - for (const auto& header : config_.additional_response_trailers_to_log()) { + for (const auto& header : config_->additional_response_trailers_to_log()) { response_trailers_to_log_.emplace_back(header); } - Envoy::Config::Utility::checkTransportVersion(config_.common_config()); - tls_slot_->set([this](Event::Dispatcher&) { - return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), Common::GrpcAccessLoggerType::HTTP, scope_)); + Envoy::Config::Utility::checkTransportVersion(config_->common_config()); + // Note that &scope might have died by the time when this callback is called on each thread. + // This is supposed to be fixed by https://github.com/envoyproxy/envoy/issues/18066. + tls_slot_->set([config = config_, access_logger_cache = access_logger_cache_, + &scope = scope_](Event::Dispatcher&) { + return std::make_shared(access_logger_cache->getOrCreateLogger( + config->common_config(), Common::GrpcAccessLoggerType::HTTP, scope)); }); } @@ -56,7 +60,7 @@ void HttpGrpcAccessLog::emitLog(const Http::RequestHeaderMap& request_headers, // TODO(mattklein123): Populate sample_rate field. envoy::data::accesslog::v3::HTTPAccessLogEntry log_entry; GrpcCommon::Utility::extractCommonAccessLogProperties(*log_entry.mutable_common_properties(), - stream_info, config_.common_config()); + stream_info, config_->common_config()); if (stream_info.protocol()) { switch (stream_info.protocol().value()) { diff --git a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h index d403596f8decc..25da566a10355 100644 --- a/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/http_grpc_access_log_impl.h @@ -21,13 +21,15 @@ namespace HttpGrpc { // TODO(mattklein123): Stats +using envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig; +using HttpGrpcAccessLogConfigConstSharedPtr = std::shared_ptr; + /** * Access log Instance that streams HTTP logs over gRPC. */ class HttpGrpcAccessLog : public Common::ImplBase { public: - HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config, + HttpGrpcAccessLog(AccessLog::FilterPtr&& filter, const HttpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, Stats::Scope& scope); @@ -49,7 +51,7 @@ class HttpGrpcAccessLog : public Common::ImplBase { const StreamInfo::StreamInfo& stream_info) override; Stats::Scope& scope_; - const envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config_; + const HttpGrpcAccessLogConfigConstSharedPtr config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; std::vector request_headers_to_log_; diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc index 7fbcee911d5bc..63eb08d4b277f 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.cc @@ -17,17 +17,21 @@ namespace TcpGrpc { TcpGrpcAccessLog::ThreadLocalLogger::ThreadLocalLogger(GrpcCommon::GrpcAccessLoggerSharedPtr logger) : logger_(std::move(logger)) {} -TcpGrpcAccessLog::TcpGrpcAccessLog( - AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config, - ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, - Stats::Scope& scope) - : Common::ImplBase(std::move(filter)), scope_(scope), config_(std::move(config)), +TcpGrpcAccessLog::TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, + const TcpGrpcAccessLogConfig config, + ThreadLocal::SlotAllocator& tls, + GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, + Stats::Scope& scope) + : Common::ImplBase(std::move(filter)), scope_(scope), + config_(std::make_shared(std::move(config))), tls_slot_(tls.allocateSlot()), access_logger_cache_(std::move(access_logger_cache)) { - Config::Utility::checkTransportVersion(config_.common_config()); - tls_slot_->set([this](Event::Dispatcher&) { - return std::make_shared(access_logger_cache_->getOrCreateLogger( - config_.common_config(), Common::GrpcAccessLoggerType::TCP, scope_)); + Config::Utility::checkTransportVersion(config_->common_config()); + // Note that &scope might have died by the time when this callback is called on each thread. + // This is supposed to be fixed by https://github.com/envoyproxy/envoy/issues/18066. + tls_slot_->set([config = config_, access_logger_cache = access_logger_cache_, + &scope = scope_](Event::Dispatcher&) { + return std::make_shared(access_logger_cache->getOrCreateLogger( + config->common_config(), Common::GrpcAccessLoggerType::TCP, scope)); }); } @@ -37,7 +41,7 @@ void TcpGrpcAccessLog::emitLog(const Http::RequestHeaderMap&, const Http::Respon // Common log properties. envoy::data::accesslog::v3::TCPAccessLogEntry log_entry; GrpcCommon::Utility::extractCommonAccessLogProperties(*log_entry.mutable_common_properties(), - stream_info, config_.common_config()); + stream_info, config_->common_config()); envoy::data::accesslog::v3::ConnectionProperties& connection_properties = *log_entry.mutable_connection_properties(); diff --git a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h index fba13f16d6c6d..a0b3842a49df0 100644 --- a/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h +++ b/source/extensions/access_loggers/grpc/tcp_grpc_access_log_impl.h @@ -20,13 +20,15 @@ namespace TcpGrpc { // TODO(mattklein123): Stats +using envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig; +using TcpGrpcAccessLogConfigConstSharedPtr = std::shared_ptr; + /** * Access log Instance that streams TCP logs over gRPC. */ class TcpGrpcAccessLog : public Common::ImplBase { public: - TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, - envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config, + TcpGrpcAccessLog(AccessLog::FilterPtr&& filter, const TcpGrpcAccessLogConfig config, ThreadLocal::SlotAllocator& tls, GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache, Stats::Scope& scope); @@ -48,7 +50,7 @@ class TcpGrpcAccessLog : public Common::ImplBase { const StreamInfo::StreamInfo& stream_info) override; Stats::Scope& scope_; - const envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config_; + const TcpGrpcAccessLogConfigConstSharedPtr config_; const ThreadLocal::SlotPtr tls_slot_; const GrpcCommon::GrpcAccessLoggerCacheSharedPtr access_logger_cache_; }; diff --git a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc index 8c78f302f5612..61946a9ed4b6c 100644 --- a/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc +++ b/test/extensions/access_loggers/grpc/http_grpc_access_log_impl_test.cc @@ -48,6 +48,36 @@ class MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache { Common::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); }; +// Test for the issue described in https://github.com/envoyproxy/envoy/pull/18081 +TEST(HttpGrpcAccessLog, TlsLifetimeCheck) { + NiceMock tls; + Stats::IsolatedStoreImpl scope; + std::shared_ptr logger_cache{new MockGrpcAccessLoggerCache()}; + tls.defer_data_ = true; + { + AccessLog::MockFilter* filter{new NiceMock()}; + envoy::extensions::access_loggers::grpc::v3::HttpGrpcAccessLogConfig config; + config.mutable_common_config()->set_transport_api_version( + envoy::config::core::v3::ApiVersion::V3); + EXPECT_CALL(*logger_cache, getOrCreateLogger(_, _, _)) + .WillOnce([](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& + common_config, + Common::GrpcAccessLoggerType type, Stats::Scope&) { + // This is a part of the actual getOrCreateLogger code path and shouldn't crash. + std::make_pair(MessageUtil::hash(common_config), type); + return nullptr; + }); + // Set tls callback in the HttpGrpcAccessLog constructor, + // but it is not called yet since we have defer_data_ = true. + const auto access_log = std::make_unique(AccessLog::FilterPtr{filter}, + config, tls, logger_cache, scope); + // Intentionally make access_log die earlier in this scope to simulate the situation where the + // creator has been deleted yet the tls callback is not called yet. + } + // Verify the tls callback does not crash since it captures the env with proper lifetime. + tls.call(); +} + class HttpGrpcAccessLogTest : public testing::Test { public: void init() { diff --git a/test/extensions/access_loggers/grpc/tcp_config_test.cc b/test/extensions/access_loggers/grpc/tcp_config_test.cc index 9889c337de476..7a2c5f50b2004 100644 --- a/test/extensions/access_loggers/grpc/tcp_config_test.cc +++ b/test/extensions/access_loggers/grpc/tcp_config_test.cc @@ -76,6 +76,44 @@ TEST_F(TcpGrpcAccessLogConfigTest, Ok) { run("good_cluster"); } // Wrong configuration with invalid clusters. TEST_F(TcpGrpcAccessLogConfigTest, InvalidCluster) { run("invalid"); } +class MockGrpcAccessLoggerCache : public GrpcCommon::GrpcAccessLoggerCache { +public: + // GrpcAccessLoggerCache + MOCK_METHOD(GrpcCommon::GrpcAccessLoggerSharedPtr, getOrCreateLogger, + (const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& config, + Common::GrpcAccessLoggerType logger_type, Stats::Scope& scope)); +}; + +// Test for the issue described in https://github.com/envoyproxy/envoy/pull/18081 +TEST(TcpGrpcAccessLog, TlsLifetimeCheck) { + NiceMock tls; + Stats::IsolatedStoreImpl scope; + std::shared_ptr logger_cache{new MockGrpcAccessLoggerCache()}; + tls.defer_data_ = true; + { + AccessLog::MockFilter* filter{new NiceMock()}; + envoy::extensions::access_loggers::grpc::v3::TcpGrpcAccessLogConfig config; + config.mutable_common_config()->set_transport_api_version( + envoy::config::core::v3::ApiVersion::V3); + EXPECT_CALL(*logger_cache, getOrCreateLogger(_, _, _)) + .WillOnce([](const envoy::extensions::access_loggers::grpc::v3::CommonGrpcAccessLogConfig& + common_config, + Common::GrpcAccessLoggerType type, Stats::Scope&) { + // This is a part of the actual getOrCreateLogger code path and shouldn't crash. + std::make_pair(MessageUtil::hash(common_config), type); + return nullptr; + }); + // Set tls callback in the TcpGrpcAccessLog constructor, + // but it is not called yet since we have defer_data_ = true. + const auto access_log = std::make_unique(AccessLog::FilterPtr{filter}, config, + tls, logger_cache, scope); + // Intentionally make access_log die earlier in this scope to simulate the situation where the + // creator has been deleted yet the tls callback is not called yet. + } + // Verify the tls callback does not crash since it captures the env with proper lifetime. + tls.call(); +} + } // namespace } // namespace TcpGrpc } // namespace AccessLoggers From 76f7c3c70903048c33c9d72183f20a0d2eb95dde Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Sat, 18 Sep 2021 21:51:00 +0900 Subject: [PATCH 069/121] doc: add note about rate limit on Docker Hub. (#18169) Following the comment: #17047 (comment) Signed-off-by: Takeshi Yoneda Signed-off-by: gayang --- docs/root/start/install.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/root/start/install.rst b/docs/root/start/install.rst index 30d6a17c03503..85b039e9aa4e2 100644 --- a/docs/root/start/install.rst +++ b/docs/root/start/install.rst @@ -252,3 +252,6 @@ The following table shows the available Docker images The ``envoy-build-ubuntu`` image does not contain a working Envoy server, but can be used for building Envoy and related containers. This image requires 4-5GB of available disk space to use. + + All the docker images are available in Docker Hub, but `its rate limit policy `_ + doesn't apply to users since the "envoyproxy" namespace is whitelisted. From 787f8739faaa1913a4db8fd4381c72dea8c69583 Mon Sep 17 00:00:00 2001 From: phlax Date: Sun, 19 Sep 2021 01:31:16 +0100 Subject: [PATCH 070/121] tooling: Use upstream docs.sphinx-runner (#18145) Signed-off-by: Ryan Northey Signed-off-by: gayang --- tools/base/requirements.in | 1 + tools/base/requirements.txt | 20 +- tools/docs/BUILD | 30 +- tools/docs/sphinx_runner.py | 222 -------- tools/docs/tests/test_sphinx_runner.py | 672 ------------------------- 5 files changed, 33 insertions(+), 912 deletions(-) delete mode 100644 tools/docs/sphinx_runner.py delete mode 100644 tools/docs/tests/test_sphinx_runner.py diff --git a/tools/base/requirements.in b/tools/base/requirements.in index 7649830e6caab..db00988a40f69 100644 --- a/tools/base/requirements.in +++ b/tools/base/requirements.in @@ -7,6 +7,7 @@ envoy.code_format.python_check>=0.0.4 envoy.dependency.pip_check>=0.0.4 envoy.distribution.release envoy.distribution.verify +envoy.docs.sphinx-runner>=0.0.3 envoy.gpg.sign flake8 frozendict diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index feee128ea615b..570fe423f0f23 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -169,7 +169,9 @@ charset-normalizer==2.0.4 \ colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 - # via -r requirements.in + # via + # -r requirements.in + # envoy.docs.sphinx-runner coloredlogs==15.0.1 \ --hash=sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934 \ --hash=sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0 @@ -259,6 +261,7 @@ docutils==0.16 \ --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc # via + # envoy.docs.sphinx-runner # sphinx # sphinx-rtd-theme # sphinx-tabs @@ -279,6 +282,7 @@ envoy.base.runner==0.0.4 \ # via # envoy.base.checker # envoy.distribution.release + # envoy.docs.sphinx-runner # envoy.github.abstract # envoy.gpg.sign envoy.base.utils==0.0.8 \ @@ -288,6 +292,7 @@ envoy.base.utils==0.0.8 \ # envoy.code-format.python-check # envoy.dependency.pip-check # envoy.distribution.distrotest + # envoy.docs.sphinx-runner # envoy.github.release # envoy.gpg.sign envoy.code-format.python-check==0.0.4 \ @@ -308,6 +313,10 @@ envoy.distribution.verify==0.0.2 \ envoy.docker.utils==0.0.2 \ --hash=sha256:a12cb57f0b6e204d646cbf94f927b3a8f5a27ed15f60d0576176584ec16a4b76 # via envoy.distribution.distrotest +envoy.docs.sphinx-runner==0.0.3 \ + --hash=sha256:6da14a524cb1ede4c3d3f07c3bf2659405e8fe9191af9041979046c54b0ed35f \ + --hash=sha256:b497c0ed9756e91a9b5f6fbd3bef637b3b5b8597af040c9f89d8a7a414dbecec + # via -r requirements.in envoy.github.abstract==0.0.16 \ --hash=sha256:badf04104492fb6b37ba2163f2b225132ed04aba680beb218e7c7d918564f8ee # via @@ -676,7 +685,9 @@ sphinxcontrib-htmlhelp==2.0.0 \ sphinxcontrib-httpdomain==1.7.0 \ --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 - # via -r requirements.in + # via + # -r requirements.in + # envoy.docs.sphinx-runner sphinxcontrib-jsmath==1.0.1 \ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 @@ -690,11 +701,14 @@ sphinxcontrib-serializinghtml==1.1.5 \ --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 # via # -r requirements.in + # envoy.docs.sphinx-runner # sphinx sphinxext-rediraffe==0.2.7 \ --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c - # via -r requirements.in + # via + # -r requirements.in + # envoy.docs.sphinx-runner toml==0.10.2 \ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f diff --git a/tools/docs/BUILD b/tools/docs/BUILD index 4f82feb9de76e..6438eeac3af41 100644 --- a/tools/docs/BUILD +++ b/tools/docs/BUILD @@ -1,6 +1,6 @@ load("@rules_python//python:defs.bzl", "py_binary") +load("@base_pip3//:requirements.bzl", "entry_point") load("//bazel:envoy_build_system.bzl", "envoy_package") -load("@base_pip3//:requirements.bzl", "requirement") load("//tools/base:envoy_python.bzl", "envoy_py_binary") licenses(["notice"]) # Apache 2 @@ -33,20 +33,20 @@ py_binary( ], ) -envoy_py_binary( - name = "tools.docs.sphinx_runner", - deps = [ - "//tools/base:runner", - "//tools/base:utils", - requirement("colorama"), - requirement("Sphinx"), - requirement("sphinx-copybutton"), - requirement("sphinx-rtd-theme"), - requirement("sphinx-tabs"), - requirement("sphinxcontrib-httpdomain"), - requirement("sphinxcontrib-serializinghtml"), - requirement("sphinxext-rediraffe"), - ], +# The upstream lib is maintained here: +# +# https://github.com/envoyproxy/pytooling/tree/main/envoy.docs.sphinx_runner +# +# Please submit issues/PRs to the pytooling repo: +# +# https://github.com/envoyproxy/pytooling + +alias( + name = "sphinx_runner", + actual = entry_point( + pkg = "envoy.docs.sphinx_runner", + script = "envoy.docs.sphinx_runner", + ), ) envoy_py_binary( diff --git a/tools/docs/sphinx_runner.py b/tools/docs/sphinx_runner.py deleted file mode 100644 index 53b330c16d2b9..0000000000000 --- a/tools/docs/sphinx_runner.py +++ /dev/null @@ -1,222 +0,0 @@ -import argparse -import os -import pathlib -import platform -import re -import sys -import tarfile -from functools import cached_property -from typing import Tuple - -from colorama import Fore, Style # type:ignore - -from sphinx.cmd.build import main as sphinx_build # type:ignore - -from tools.base import runner, utils - - -class SphinxBuildError(Exception): - pass - - -class SphinxEnvError(Exception): - pass - - -class SphinxRunner(runner.Runner): - _build_dir = "." - _build_sha = "UNKNOWN" - - @property - def blob_sha(self) -> str: - """Returns either the version tag or the current build sha""" - return self.docs_tag or self.build_sha - - @property - def build_dir(self) -> pathlib.Path: - """Returns current build_dir - most likely a temp directory""" - return pathlib.Path(self.tempdir.name) - - @property - def build_sha(self) -> str: - """Returns either a provided build_sha or a default""" - return self.args.build_sha or self._build_sha - - @cached_property - def colors(self) -> dict: - """Color scheme for build summary""" - return dict(chrome=Fore.LIGHTYELLOW_EX, key=Fore.LIGHTCYAN_EX, value=Fore.LIGHTMAGENTA_EX) - - @cached_property - def config_file(self) -> pathlib.Path: - """Populates a config file with self.configs and returns the file path""" - return utils.to_yaml(self.configs, self.config_file_path) - - @property - def config_file_path(self) -> pathlib.Path: - """Path to a (temporary) build config""" - return self.build_dir.joinpath("build.yaml") - - @cached_property - def configs(self) -> dict: - """Build configs derived from provided args""" - _configs = dict( - version_string=self.version_string, - release_level=self.release_level, - blob_sha=self.blob_sha, - version_number=self.version_number, - docker_image_tag_name=self.docker_image_tag_name) - if self.validator_path: - _configs["validator_path"] = str(self.validator_path) - if self.descriptor_path: - _configs["descriptor_path"] = str(self.descriptor_path) - return _configs - - @property - def descriptor_path(self) -> pathlib.Path: - """Path to a descriptor file for config validation""" - return pathlib.Path(self.args.descriptor_path) - - @property - def docker_image_tag_name(self) -> str: - """Tag name of current docker image""" - return re.sub(r"([0-9]+\.[0-9]+)\.[0-9]+.*", r"v\1-latest", self.version_number) - - @property - def docs_tag(self) -> str: - """Tag name - ie named version for this docs build""" - return self.args.docs_tag - - @cached_property - def html_dir(self) -> pathlib.Path: - """Path to (temporary) directory for outputting html""" - return self.build_dir.joinpath("generated", "html") - - @property - def output_filename(self) -> pathlib.Path: - """Path to tar file for saving generated html docs""" - return pathlib.Path(self.args.output_filename) - - @property - def py_compatible(self) -> bool: - """Current python version is compatible""" - return bool(sys.version_info.major == 3 and sys.version_info.minor >= 8) - - @property - def release_level(self) -> str: - """Current python version is compatible""" - return "tagged" if self.docs_tag else "pre-release" - - @cached_property - def rst_dir(self) -> pathlib.Path: - """Populates an rst directory with contents of given rst tar, - and returns the path to the directory - """ - rst_dir = self.build_dir.joinpath("generated", "rst") - if self.rst_tar: - utils.extract(rst_dir, self.rst_tar) - return rst_dir - - @property - def rst_tar(self) -> pathlib.Path: - """Path to the rst tarball""" - return pathlib.Path(self.args.rst_tar) - - @property - def sphinx_args(self) -> Tuple[str, ...]: - """Command args for sphinx""" - return ( - "-W", "--keep-going", "--color", "-b", "html", str(self.rst_dir), str(self.html_dir)) - - @property - def validator_path(self) -> pathlib.Path: - """Path to validator utility for validating snippets""" - return pathlib.Path(self.args.validator_path) - - @property - def version_file(self) -> pathlib.Path: - """Path to version files for deriving docs version""" - return pathlib.Path(self.args.version_file) - - @cached_property - def version_number(self) -> str: - """Semantic version""" - return self.version_file.read_text().strip() - - @property - def version_string(self) -> str: - """Version string derived from either docs_tag or build_sha""" - return ( - f"tag-{self.docs_tag}" - if self.docs_tag else f"{self.version_number}-{self.build_sha[:6]}") - - def add_arguments(self, parser: argparse.ArgumentParser) -> None: - super().add_arguments(parser) - parser.add_argument("--build_sha") - parser.add_argument("--docs_tag") - parser.add_argument("--version_file") - parser.add_argument("--validator_path") - parser.add_argument("--descriptor_path") - parser.add_argument("rst_tar") - parser.add_argument("output_filename") - - def build_html(self) -> None: - if sphinx_build(self.sphinx_args): - raise SphinxBuildError("BUILD FAILED") - - def build_summary(self) -> None: - print() - print(self._color("#### Sphinx build configs #####################")) - print(self._color("###")) - for k, v in self.configs.items(): - print(f"{self._color('###')} {self._color(k, 'key')}: {self._color(v, 'value')}") - print(self._color("###")) - print(self._color("###############################################")) - print() - - def check_env(self) -> None: - if not self.py_compatible: - raise SphinxEnvError( - f"ERROR: python version must be >= 3.8, you have {platform.python_version()}") - if not self.configs["release_level"] == "tagged": - return - if f"v{self.version_number}" != self.docs_tag: - raise SphinxEnvError( - "Given git tag does not match the VERSION file content:" - f"{self.docs_tag} vs v{self.version_number}") - # this should probs only check the first line - version_current = self.rst_dir.joinpath("version_history", "current.rst").read_text() - if not self.version_number in version_current: - raise SphinxEnvError( - f"Git tag ({self.version_number}) not found in version_history/current.rst") - - def create_tarball(self) -> None: - with tarfile.open(self.output_filename, "w") as tar: - tar.add(self.html_dir, arcname=".") - - @runner.cleansup - def run(self): - os.environ["ENVOY_DOCS_BUILD_CONFIG"] = str(self.config_file) - try: - self.check_env() - except SphinxEnvError as e: - print(e) - return 1 - self.build_summary() - try: - self.build_html() - except SphinxBuildError as e: - print(e) - return 1 - self.create_tarball() - - def _color(self, msg, name=None): - return f"{self.colors[name or 'chrome']}{msg}{Style.RESET_ALL}" - - -def main(*args) -> int: - return SphinxRunner(*args).run() - - -if __name__ == "__main__": - sys.exit(main(*sys.argv[1:])) diff --git a/tools/docs/tests/test_sphinx_runner.py b/tools/docs/tests/test_sphinx_runner.py deleted file mode 100644 index ad6a3fe1f06c8..0000000000000 --- a/tools/docs/tests/test_sphinx_runner.py +++ /dev/null @@ -1,672 +0,0 @@ -from unittest.mock import MagicMock, PropertyMock - -import pytest - -from tools.docs import sphinx_runner - - -def test_sphinx_runner_constructor(): - runner = sphinx_runner.SphinxRunner() - assert runner._build_sha == "UNKNOWN" - assert "blob_dir" not in runner.__dict__ - - -@pytest.mark.parametrize("docs_tag", [None, "", "SOME_DOCS_TAG"]) -def test_sphinx_runner_blob_sha(patches, docs_tag): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.build_sha", dict(new_callable=PropertyMock)), - ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_sha, m_tag): - m_tag.return_value = docs_tag - if docs_tag: - assert runner.blob_sha == docs_tag - else: - assert runner.blob_sha == m_sha.return_value - assert "blob_sha" not in runner.__dict__ - - -def test_sphinx_runner_build_dir(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.tempdir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_temp): - assert runner.build_dir == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(m_temp.return_value.name, ), {}]) - assert "build_dir" not in runner.__dict__ - - -@pytest.mark.parametrize("build_sha", [None, "", "SOME_BUILD_SHA"]) -def test_sphinx_runner_build_sha(patches, build_sha): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_args, ): - m_args.return_value.build_sha = build_sha - if build_sha: - assert runner.build_sha == build_sha - else: - assert runner.build_sha == "UNKNOWN" - - assert "build_sha" not in runner.__dict__ - - -def test_sphinx_runner_colors(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "Fore", - prefix="tools.docs.sphinx_runner") - - with patched as (m_colors, ): - assert ( - runner.colors - == dict( - chrome=m_colors.LIGHTYELLOW_EX, - key=m_colors.LIGHTCYAN_EX, - value=m_colors.LIGHTMAGENTA_EX)) - - assert "colors" in runner.__dict__ - - -def test_sphinx_runner_config_file(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "utils", - ("SphinxRunner.config_file_path", dict(new_callable=PropertyMock)), - ("SphinxRunner.configs", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_utils, m_fpath, m_configs): - assert ( - runner.config_file - == m_utils.to_yaml.return_value) - - assert ( - list(m_utils.to_yaml.call_args) - == [(m_configs.return_value, m_fpath.return_value), {}]) - assert "config_file" in runner.__dict__ - - -def test_sphinx_runner_config_file_path(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_build, ): - assert runner.config_file_path == m_build.return_value.joinpath.return_value - - assert ( - list(m_build.return_value.joinpath.call_args) - == [('build.yaml',), {}]) - assert "config_file_path" not in runner.__dict__ - - -def test_sphinx_runner_configs(patches): - runner = sphinx_runner.SphinxRunner() - mapping = dict( - version_string="version_string", - release_level="release_level", - blob_sha="blob_sha", - version_number="version_number", - docker_image_tag_name="docker_image_tag_name", - validator_path="validator_path", - descriptor_path="descriptor_path") - - patched = patches( - *[f"SphinxRunner.{v}" for v in mapping.values()], - prefix="tools.docs.sphinx_runner") - - with patched as _mocks: - result = runner.configs - - _configs = {} - for k, v in mapping.items(): - _v = _mocks[list(mapping.values()).index(v)] - if k in ["validator_path", "descriptor_path"]: - _v = str(_v) - _configs[k] = _v - assert result == _configs - assert "configs" in runner.__dict__ - - -def test_sphinx_runner_descriptor_path(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert ( - runner.descriptor_path - == m_plib.Path.return_value) - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.descriptor_path,), {}]) - assert "descriptor_path" not in runner.__dict__ - - -def test_sphinx_runner_docker_image_tag_name(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "re", - ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_re, m_version): - assert ( - runner.docker_image_tag_name - == m_re.sub.return_value) - - assert ( - list(m_re.sub.call_args) - == [('([0-9]+\\.[0-9]+)\\.[0-9]+.*', 'v\\1-latest', - m_version.return_value), {}]) - assert "docker_image_tag_name" not in runner.__dict__ - - -def test_sphinx_runner_docs_tag(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_args, ): - assert runner.docs_tag == m_args.return_value.docs_tag - - assert "docs_tag" not in runner.__dict__ - - -def test_sphinx_runner_html_dir(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_build, ): - assert runner.html_dir == m_build.return_value.joinpath.return_value - - assert ( - list(m_build.return_value.joinpath.call_args) - == [('generated', 'html'), {}]) - assert "html_dir" in runner.__dict__ - - -def test_sphinx_runner_output_filename(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert runner.output_filename == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.output_filename, ), {}]) - assert "output_filename" not in runner.__dict__ - - -@pytest.mark.parametrize("major", [2, 3, 4]) -@pytest.mark.parametrize("minor", [5, 6, 7, 8, 9]) -def test_sphinx_runner_py_compatible(patches, major, minor): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "bool", - "sys", - prefix="tools.docs.sphinx_runner") - - with patched as (m_bool, m_sys): - m_sys.version_info.major = major - m_sys.version_info.minor = minor - assert runner.py_compatible == m_bool.return_value - expected = ( - True - if major == 3 and minor >= 8 - else False) - assert ( - list(m_bool.call_args) - == [(expected,), {}]) - assert "py_compatible" not in runner.__dict__ - - -@pytest.mark.parametrize("docs_tag", [None, "", "SOME_DOCS_TAG"]) -def test_sphinx_runner_release_level(patches, docs_tag): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_tag, ): - m_tag.return_value = docs_tag - if docs_tag: - assert runner.release_level == "tagged" - else: - assert runner.release_level == "pre-release" - assert "release_level" not in runner.__dict__ - - -@pytest.mark.parametrize("rst_tar", [None, "", "SOME_DOCS_TAG"]) -def test_sphinx_runner_rst_dir(patches, rst_tar): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - "utils", - ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), - ("SphinxRunner.rst_tar", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_utils, m_dir, m_rst): - m_rst.return_value = rst_tar - assert runner.rst_dir == m_dir.return_value.joinpath.return_value - - assert ( - list(m_dir.return_value.joinpath.call_args) - == [('generated', 'rst'), {}]) - - if rst_tar: - assert ( - list(m_utils.extract.call_args) - == [(m_dir.return_value.joinpath.return_value, rst_tar), {}]) - else: - assert not m_utils.extract.called - assert "rst_dir" in runner.__dict__ - - -def test_sphinx_runner_rst_tar(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert runner.rst_tar == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.rst_tar, ), {}]) - assert "rst_tar" not in runner.__dict__ - - -def test_sphinx_runner_sphinx_args(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.html_dir", dict(new_callable=PropertyMock)), - ("SphinxRunner.rst_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_html, m_rst): - assert ( - runner.sphinx_args - == ('-W', '--keep-going', '--color', '-b', 'html', - str(m_rst.return_value), - str(m_html.return_value))) - - assert "sphinx_args" not in runner.__dict__ - - -def test_sphinx_runner_validator_path(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert ( - runner.validator_path - == m_plib.Path.return_value) - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.validator_path,), {}]) - assert "validator_path" not in runner.__dict__ - - -def test_sphinx_runner_version_file(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "pathlib", - ("SphinxRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_plib, m_args): - assert runner.version_file == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(m_args.return_value.version_file, ), {}]) - assert "version_file" not in runner.__dict__ - - -def test_sphinx_runner_version_number(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.version_file", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_file, ): - assert ( - runner.version_number - == m_file.return_value.read_text.return_value.strip.return_value) - - assert ( - list(m_file.return_value.read_text.call_args) - == [(), {}]) - assert ( - list(m_file.return_value.read_text.return_value.strip.call_args) - == [(), {}]) - - assert "version_number" in runner.__dict__ - - -@pytest.mark.parametrize("docs_tag", [None, "", "SOME_DOCS_TAG"]) -def test_sphinx_runner_version_string(patches, docs_tag): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), - ("SphinxRunner.build_sha", dict(new_callable=PropertyMock)), - ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_tag, m_sha, m_version): - m_tag.return_value = docs_tag - if docs_tag: - assert runner.version_string == f"tag-{docs_tag}" - else: - assert runner.version_string == f"{m_version.return_value}-{m_sha.return_value.__getitem__.return_value}" - assert ( - list(m_sha.return_value.__getitem__.call_args) - == [(slice(None, 6, None),), {}]) - - assert "version_string" not in runner.__dict__ - - -def test_sphinx_runner_add_arguments(patches): - runner = sphinx_runner.SphinxRunner() - parser = MagicMock() - patched = patches( - "runner.Runner.add_arguments", - prefix="tools.docs.sphinx_runner") - - with patched as (m_super, ): - runner.add_arguments(parser) - - assert ( - list(m_super.call_args) - == [(parser, ), {}]) - assert ( - list(list(c) for c in parser.add_argument.call_args_list) - == [[('--build_sha',), {}], - [('--docs_tag',), {}], - [('--version_file',), {}], - [('--validator_path',), {}], - [('--descriptor_path',), {}], - [('rst_tar',), {}], - [('output_filename',), {}]]) - - -@pytest.mark.parametrize("fails", [True, False]) -def test_sphinx_runner_build_html(patches, fails): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "sphinx_build", - ("SphinxRunner.sphinx_args", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_sphinx, m_args): - m_sphinx.side_effect = lambda s: fails - e = None - if fails: - with pytest.raises(sphinx_runner.SphinxBuildError) as e: - runner.build_html() - else: - runner.build_html() - - assert ( - list(m_sphinx.call_args) - == [(m_args.return_value,), {}]) - - if fails: - assert e.value.args == ('BUILD FAILED',) - else: - assert not e - - -def test_sphinx_runner_build_summary(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "print", - "SphinxRunner._color", - ("SphinxRunner.configs", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_print, m_color, m_configs): - m_configs.return_value.items.return_value = (("a", "A"), ("b", "B")) - runner.build_summary() - - assert ( - list(list(c) for c in m_print.call_args_list) - == [[(), {}], - [(m_color.return_value,), {}], - [(m_color.return_value,), {}], - [(f"{m_color.return_value} {m_color.return_value}: {m_color.return_value}",), {}], - [(f"{m_color.return_value} {m_color.return_value}: {m_color.return_value}",), {}], - [(m_color.return_value,), {}], - [(m_color.return_value,), {}], - [(), {}]]) - assert ( - list(list(c) for c in m_color.call_args_list) - == [[('#### Sphinx build configs #####################',), {}], - [('###',), {}], - [('###',), {}], - [('a', 'key'), {}], - [('A', 'value'), {}], - [('###',), {}], - [('b', 'key'), {}], - [('B', 'value'), {}], - [('###',), {}], - [('###############################################',), {}]]) - - -@pytest.mark.parametrize("py_compat", [True, False]) -@pytest.mark.parametrize("release_level", ["pre-release", "tagged"]) -@pytest.mark.parametrize("version_number", ["1.17", "1.23", "1.43"]) -@pytest.mark.parametrize("docs_tag", ["v1.17", "v1.23", "v1.73"]) -@pytest.mark.parametrize("current", ["XXX v1.17 ZZZ", "AAA v1.23 VVV", "BBB v1.73 EEE"]) -def test_sphinx_runner_check_env(patches, py_compat, release_level, version_number, docs_tag, current): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "platform", - ("SphinxRunner.configs", dict(new_callable=PropertyMock)), - ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), - ("SphinxRunner.docs_tag", dict(new_callable=PropertyMock)), - ("SphinxRunner.py_compatible", dict(new_callable=PropertyMock)), - ("SphinxRunner.rst_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - fails = ( - not py_compat - or (release_level == "tagged" - and (f"v{version_number}" != docs_tag - or version_number not in current))) - - with patched as (m_platform, m_configs, m_version, m_tag, m_py, m_rst): - m_py.return_value = py_compat - m_configs.return_value.__getitem__.return_value = release_level - m_version.return_value = version_number - m_tag.return_value = docs_tag - m_rst.return_value.joinpath.return_value.read_text.return_value = current - - if fails: - with pytest.raises(sphinx_runner.SphinxEnvError) as e: - runner.check_env() - else: - runner.check_env() - - if not py_compat: - assert ( - e.value.args - == ("ERROR: python version must be >= 3.8, " - f"you have {m_platform.python_version.return_value}", )) - return - - if release_level != "tagged": - return - - if f"v{version_number}" != docs_tag: - assert ( - e.value.args - == ("Given git tag does not match the VERSION file content:" - f"{docs_tag} vs v{version_number}", )) - return - - assert ( - list(m_rst.return_value.joinpath.call_args) - == [("version_history", "current.rst"), {}]) - - if version_number not in current: - assert ( - e.value.args - == (f"Git tag ({version_number}) not found in version_history/current.rst", )) - - -@pytest.mark.parametrize("exists", [True, False]) -def test_sphinx_runner_cleanup(patches, exists): - runner = sphinx_runner.SphinxRunner() - patched = patches( - ("SphinxRunner.tempdir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_temp, ): - if exists: - runner.__dict__["tempdir"] = m_temp.return_value - assert not runner.cleanup() - - assert not "tempdir" in runner.__dict__ - if exists: - assert ( - list(m_temp.return_value.cleanup.call_args) - == [(), {}]) - else: - assert not m_temp.called - - -def test_sphinx_runner_create_tarball(patches): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "tarfile", - ("SphinxRunner.output_filename", dict(new_callable=PropertyMock)), - ("SphinxRunner.html_dir", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_tar, m_out, m_html): - runner.create_tarball() - - assert ( - list(m_tar.open.call_args) - == [(m_out.return_value, 'w'), {}]) - assert ( - list(m_tar.open.return_value.__enter__.return_value.add.call_args) - == [(m_html.return_value,), {'arcname': '.'}]) - - -@pytest.mark.parametrize("check_fails", [True, False]) -@pytest.mark.parametrize("build_fails", [True, False]) -def test_sphinx_runner_run(patches, check_fails, build_fails): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "print", - "os", - "SphinxRunner.build_summary", - "SphinxRunner.check_env", - "SphinxRunner.build_html", - "SphinxRunner.create_tarball", - ("SphinxRunner.config_file", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - def _raise(error): - raise error - - assert runner.run.__wrapped__.__cleansup__ - - with patched as (m_print, m_os, m_summary, m_check, m_build, m_create, m_config): - if check_fails: - _check_error = sphinx_runner.SphinxEnvError("CHECK FAILED") - m_check.side_effect = lambda: _raise(_check_error) - if build_fails: - _build_error = sphinx_runner.SphinxBuildError("BUILD FAILED") - m_build.side_effect = lambda: _raise(_build_error) - assert runner.run() == (1 if (check_fails or build_fails) else None) - - assert ( - list(m_check.call_args) - == [(), {}]) - assert ( - list(m_os.environ.__setitem__.call_args) - == [('ENVOY_DOCS_BUILD_CONFIG', str(m_config.return_value)), {}]) - - if check_fails: - assert ( - list(m_print.call_args) - == [(_check_error,), {}]) - assert not m_summary.called - assert not m_build.called - assert not m_create.called - return - - assert ( - list(m_summary.call_args) - == [(), {}]) - assert ( - list(m_build.call_args) - == [(), {}]) - - if build_fails: - assert ( - list(m_print.call_args) - == [(_build_error,), {}]) - assert not m_create.called - return - - assert not m_print.called - assert ( - list(m_create.call_args) - == [(), {}]) - - -@pytest.mark.parametrize("color", [None, "COLOR"]) -def test_sphinx_runner__color(patches, color): - runner = sphinx_runner.SphinxRunner() - patched = patches( - "Style", - ("SphinxRunner.colors", dict(new_callable=PropertyMock)), - prefix="tools.docs.sphinx_runner") - - with patched as (m_style, m_colors): - assert ( - runner._color("MSG", color) - == f"{m_colors.return_value.__getitem__.return_value}MSG{m_style.RESET_ALL}") - assert ( - list(m_colors.return_value.__getitem__.call_args) - == [(color or "chrome",), {}]) - - -def test_sphinx_runner_main(command_main): - command_main( - sphinx_runner.main, - "tools.docs.sphinx_runner.SphinxRunner") From 56f4f2ede76e336d51e9a13ab115b1941d9fbdac Mon Sep 17 00:00:00 2001 From: ankatare Date: Sun, 19 Sep 2021 20:58:04 +0530 Subject: [PATCH 071/121] correcting urls in deployment types docs (#18176) Signed-off-by: Abhay Narayan Katare Signed-off-by: gayang --- docs/root/intro/deployment_types/double_proxy.rst | 2 +- docs/root/intro/deployment_types/front_proxy.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/root/intro/deployment_types/double_proxy.rst b/docs/root/intro/deployment_types/double_proxy.rst index cbf6cef40f4b6..dc21aaf5a23a7 100644 --- a/docs/root/intro/deployment_types/double_proxy.rst +++ b/docs/root/intro/deployment_types/double_proxy.rst @@ -22,4 +22,4 @@ Configuration template ^^^^^^^^^^^^^^^^^^^^^^ The source distribution includes an example double proxy configuration. See -:ref:`here ` for more information. +:ref:`here ` for more information. diff --git a/docs/root/intro/deployment_types/front_proxy.rst b/docs/root/intro/deployment_types/front_proxy.rst index a8a11d4177c96..efd6fa300a26a 100644 --- a/docs/root/intro/deployment_types/front_proxy.rst +++ b/docs/root/intro/deployment_types/front_proxy.rst @@ -22,4 +22,4 @@ Configuration template ^^^^^^^^^^^^^^^^^^^^^^ The source distribution includes an example front proxy configuration. See -:ref:`here ` for more information. +:ref:`here ` for more information. From 925c7fd2a97ed47243ed9a1a9032cf42e651817e Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Mon, 20 Sep 2021 20:59:45 +0900 Subject: [PATCH 072/121] stats: check emptiness in stripRegisteredPrefix before search. (#18127) Commit Message: This commit checks the emptiness of CustomStatNamespaces in its stripRegisteredPrefix before doing string search to improve the performance in the case where no custom stat namespace is registered. cc @jmarantz Additional Description: NA Risk Level: low Testing: unittest Docs Changes: NA Release Notes: NA Platform Specific Features: NA Signed-off-by: Takeshi Yoneda Signed-off-by: gayang --- envoy/stats/custom_stat_namespaces.h | 3 ++- source/common/stats/custom_stat_namespaces_impl.cc | 10 ++++++---- test/common/stats/custom_stat_namespaces_impl_test.cc | 2 ++ 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/envoy/stats/custom_stat_namespaces.h b/envoy/stats/custom_stat_namespaces.h index a966fb950a49e..175a80efe6b7a 100644 --- a/envoy/stats/custom_stat_namespaces.h +++ b/envoy/stats/custom_stat_namespaces.h @@ -2,7 +2,8 @@ #include "envoy/common/pure.h" -#include "absl/container/flat_hash_set.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" namespace Envoy { namespace Stats { diff --git a/source/common/stats/custom_stat_namespaces_impl.cc b/source/common/stats/custom_stat_namespaces_impl.cc index f59e52ef0078e..24c2f6d246f6c 100644 --- a/source/common/stats/custom_stat_namespaces_impl.cc +++ b/source/common/stats/custom_stat_namespaces_impl.cc @@ -19,10 +19,12 @@ void CustomStatNamespacesImpl::registerStatNamespace(const absl::string_view nam absl::optional CustomStatNamespacesImpl::stripRegisteredPrefix(const absl::string_view stat_name) const { ASSERT(Thread::MainThread::isMainThread()); - const auto pos = stat_name.find_first_of('.'); - if (pos != std::string::npos && registered(stat_name.substr(0, pos))) { - // Trim the custom namespace. - return stat_name.substr(pos + 1); + if (!namespaces_.empty()) { + const auto pos = stat_name.find_first_of('.'); + if (pos != std::string::npos && registered(stat_name.substr(0, pos))) { + // Trim the custom namespace. + return stat_name.substr(pos + 1); + } } return absl::nullopt; }; diff --git a/test/common/stats/custom_stat_namespaces_impl_test.cc b/test/common/stats/custom_stat_namespaces_impl_test.cc index 0bc09bf4121b6..e0500fe5edcf7 100644 --- a/test/common/stats/custom_stat_namespaces_impl_test.cc +++ b/test/common/stats/custom_stat_namespaces_impl_test.cc @@ -16,6 +16,8 @@ TEST(CustomStatNamespacesImpl, Registration) { TEST(CustomStatNamespacesImpl, StripRegisteredPrefix) { CustomStatNamespacesImpl namespaces; + // no namespace is registered. + EXPECT_FALSE(namespaces.stripRegisteredPrefix("foo.bar").has_value()); namespaces.registerStatNamespace("foo"); // namespace is not registered. EXPECT_FALSE(namespaces.stripRegisteredPrefix("bar.my.value").has_value()); From 4bd376c2feac9eed7298d59131b44dcbbac69128 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Mon, 20 Sep 2021 21:52:42 +0900 Subject: [PATCH 073/121] Fix wrong comment for closing brace (#18170) Signed-off-by: Kenjiro Nakayama Signed-off-by: gayang --- source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc index 841e56989eeae..7280a419d21da 100644 --- a/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc +++ b/source/extensions/filters/http/grpc_stats/grpc_stats_filter.cc @@ -275,7 +275,7 @@ class GrpcStatsFilter : public Http::PassThroughFilter { Grpc::FrameInspector response_counter_; Upstream::ClusterInfoConstSharedPtr cluster_; absl::optional request_names_; -}; // namespace +}; } // namespace From df784a5523cb08e1ef89e0457a52361ecc71415e Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 21 Sep 2021 00:00:33 +0800 Subject: [PATCH 074/121] upstream: more factory cleanup (#18143) Risk Level: Low Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- source/common/http/BUILD | 1 + .../alternate_protocols_cache_manager_impl.cc | 8 +++---- .../alternate_protocols_cache_manager_impl.h | 21 +++++++++++++------ source/common/upstream/cluster_manager_impl.h | 3 +-- .../http/alternate_protocols_cache/config.cc | 3 +-- .../alternate_protocols_cache_manager_test.cc | 20 ++++++++++++++---- test/mocks/server/factory_context.cc | 1 + test/mocks/server/factory_context.h | 1 + 8 files changed, 40 insertions(+), 18 deletions(-) diff --git a/source/common/http/BUILD b/source/common/http/BUILD index ebc5bdcf0cf67..3f1323c8c8cd9 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -177,6 +177,7 @@ envoy_cc_library( "//envoy/thread_local:thread_local_interface", "//envoy/upstream:resource_manager_interface", "//source/common/common:logger_lib", + "//source/common/config:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/common/http/alternate_protocols_cache_manager_impl.cc b/source/common/http/alternate_protocols_cache_manager_impl.cc index fc04a6d084f42..59434006dcf0d 100644 --- a/source/common/http/alternate_protocols_cache_manager_impl.cc +++ b/source/common/http/alternate_protocols_cache_manager_impl.cc @@ -11,8 +11,8 @@ namespace Http { SINGLETON_MANAGER_REGISTRATION(alternate_protocols_cache_manager); AlternateProtocolsCacheManagerImpl::AlternateProtocolsCacheManagerImpl( - TimeSource& time_source, ThreadLocal::SlotAllocator& tls) - : time_source_(time_source), slot_(tls) { + AlternateProtocolsData& data, ThreadLocal::SlotAllocator& tls) + : data_(data), slot_(tls) { slot_.set([](Event::Dispatcher& /*dispatcher*/) { return std::make_shared(); }); } @@ -31,7 +31,7 @@ AlternateProtocolsCacheSharedPtr AlternateProtocolsCacheManagerImpl::getCache( } AlternateProtocolsCacheSharedPtr new_cache = - std::make_shared(time_source_); + std::make_shared(data_.dispatcher_.timeSource()); (*slot_).caches_.emplace(options.name(), CacheWithOptions{options, new_cache}); return new_cache; } @@ -39,7 +39,7 @@ AlternateProtocolsCacheSharedPtr AlternateProtocolsCacheManagerImpl::getCache( AlternateProtocolsCacheManagerSharedPtr AlternateProtocolsCacheManagerFactoryImpl::get() { return singleton_manager_.getTyped( SINGLETON_MANAGER_REGISTERED_NAME(alternate_protocols_cache_manager), - [this] { return std::make_shared(time_source_, tls_); }); + [this] { return std::make_shared(data_, tls_); }); } } // namespace Http diff --git a/source/common/http/alternate_protocols_cache_manager_impl.h b/source/common/http/alternate_protocols_cache_manager_impl.h index 227569ac19a60..6524128d329f3 100644 --- a/source/common/http/alternate_protocols_cache_manager_impl.h +++ b/source/common/http/alternate_protocols_cache_manager_impl.h @@ -2,6 +2,7 @@ #include "envoy/config/core/v3/protocol.pb.h" #include "envoy/http/alternate_protocols_cache.h" +#include "envoy/server/factory_context.h" #include "envoy/singleton/instance.h" #include "envoy/singleton/manager.h" #include "envoy/thread_local/thread_local.h" @@ -11,10 +12,18 @@ namespace Envoy { namespace Http { +struct AlternateProtocolsData { + AlternateProtocolsData(Server::Configuration::FactoryContextBase& context) + : dispatcher_(context.mainThreadDispatcher()), + validation_visitor_(context.messageValidationVisitor()) {} + Event::Dispatcher& dispatcher_; + ProtobufMessage::ValidationVisitor& validation_visitor_; +}; + class AlternateProtocolsCacheManagerImpl : public AlternateProtocolsCacheManager, public Singleton::Instance { public: - AlternateProtocolsCacheManagerImpl(TimeSource& time_source, ThreadLocal::SlotAllocator& tls); + AlternateProtocolsCacheManagerImpl(AlternateProtocolsData& data, ThreadLocal::SlotAllocator& tls); // AlternateProtocolsCacheManager AlternateProtocolsCacheSharedPtr @@ -37,7 +46,7 @@ class AlternateProtocolsCacheManagerImpl : public AlternateProtocolsCacheManager absl::flat_hash_map caches_; }; - TimeSource& time_source_; + AlternateProtocolsData& data_; // Thread local state for the cache. ThreadLocal::TypedSlot slot_; @@ -46,16 +55,16 @@ class AlternateProtocolsCacheManagerImpl : public AlternateProtocolsCacheManager class AlternateProtocolsCacheManagerFactoryImpl : public AlternateProtocolsCacheManagerFactory { public: AlternateProtocolsCacheManagerFactoryImpl(Singleton::Manager& singleton_manager, - TimeSource& time_source, - ThreadLocal::SlotAllocator& tls) - : singleton_manager_(singleton_manager), time_source_(time_source), tls_(tls) {} + ThreadLocal::SlotAllocator& tls, + AlternateProtocolsData data) + : singleton_manager_(singleton_manager), tls_(tls), data_(data) {} AlternateProtocolsCacheManagerSharedPtr get() override; private: Singleton::Manager& singleton_manager_; - TimeSource& time_source_; ThreadLocal::SlotAllocator& tls_; + AlternateProtocolsData data_; }; } // namespace Http diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 949d62ef5cd20..30d58cf5d8def 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -62,8 +62,7 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { tls_(tls), dns_resolver_(dns_resolver), ssl_context_manager_(ssl_context_manager), local_info_(local_info), secret_manager_(secret_manager), log_manager_(log_manager), singleton_manager_(singleton_manager), quic_stat_names_(quic_stat_names), - alternate_protocols_cache_manager_factory_(singleton_manager, - main_thread_dispatcher.timeSource(), tls_), + alternate_protocols_cache_manager_factory_(singleton_manager, tls_, {context_}), alternate_protocols_cache_manager_(alternate_protocols_cache_manager_factory_.get()) {} // Upstream::ClusterManagerFactory diff --git a/source/extensions/filters/http/alternate_protocols_cache/config.cc b/source/extensions/filters/http/alternate_protocols_cache/config.cc index dea1f9904b0a5..295ea64530298 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/config.cc +++ b/source/extensions/filters/http/alternate_protocols_cache/config.cc @@ -16,8 +16,7 @@ Http::FilterFactoryCb AlternateProtocolsCacheFilterFactory::createFilterFactoryF proto_config, const std::string&, Server::Configuration::FactoryContext& context) { Http::AlternateProtocolsCacheManagerFactoryImpl alternate_protocol_cache_manager_factory( - context.singletonManager(), context.mainThreadDispatcher().timeSource(), - context.threadLocal()); + context.singletonManager(), context.threadLocal(), {context}); FilterConfigSharedPtr filter_config( std::make_shared(proto_config, alternate_protocol_cache_manager_factory, context.mainThreadDispatcher().timeSource())); diff --git a/test/common/http/alternate_protocols_cache_manager_test.cc b/test/common/http/alternate_protocols_cache_manager_test.cc index c26570eac76fa..3763c6e9795c6 100644 --- a/test/common/http/alternate_protocols_cache_manager_test.cc +++ b/test/common/http/alternate_protocols_cache_manager_test.cc @@ -1,6 +1,7 @@ #include "source/common/http/alternate_protocols_cache_manager_impl.h" #include "source/common/singleton/manager_impl.h" +#include "test/mocks/server/factory_context.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/simulated_time_system.h" @@ -13,18 +14,24 @@ namespace { class AlternateProtocolsCacheManagerTest : public testing::Test, public Event::TestUsingSimulatedTime { public: - AlternateProtocolsCacheManagerTest() - : factory_(singleton_manager_, simTime(), tls_), manager_(factory_.get()) { + AlternateProtocolsCacheManagerTest() { options1_.set_name(name1_); options1_.mutable_max_entries()->set_value(max_entries1_); options2_.set_name(name2_); options2_.mutable_max_entries()->set_value(max_entries2_); } + void initialize() { + AlternateProtocolsData data(context_); + factory_ = std::make_unique(singleton_manager_, + tls_, data); + manager_ = factory_->get(); + } Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; + NiceMock context_; testing::NiceMock tls_; - Http::AlternateProtocolsCacheManagerFactoryImpl factory_; + std::unique_ptr factory_; AlternateProtocolsCacheManagerSharedPtr manager_; const std::string name1_ = "name1"; const std::string name2_ = "name2"; @@ -36,17 +43,21 @@ class AlternateProtocolsCacheManagerTest : public testing::Test, }; TEST_F(AlternateProtocolsCacheManagerTest, FactoryGet) { + initialize(); + EXPECT_NE(nullptr, manager_); - EXPECT_EQ(manager_, factory_.get()); + EXPECT_EQ(manager_, factory_->get()); } TEST_F(AlternateProtocolsCacheManagerTest, GetCache) { + initialize(); AlternateProtocolsCacheSharedPtr cache = manager_->getCache(options1_); EXPECT_NE(nullptr, cache); EXPECT_EQ(cache, manager_->getCache(options1_)); } TEST_F(AlternateProtocolsCacheManagerTest, GetCacheForDifferentOptions) { + initialize(); AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_); AlternateProtocolsCacheSharedPtr cache2 = manager_->getCache(options2_); EXPECT_NE(nullptr, cache2); @@ -54,6 +65,7 @@ TEST_F(AlternateProtocolsCacheManagerTest, GetCacheForDifferentOptions) { } TEST_F(AlternateProtocolsCacheManagerTest, GetCacheForConflictingOptions) { + initialize(); AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_); options2_.set_name(options1_.name()); EXPECT_THROW_WITH_REGEX( diff --git a/test/mocks/server/factory_context.cc b/test/mocks/server/factory_context.cc index 5e59724def957..70d440be4a102 100644 --- a/test/mocks/server/factory_context.cc +++ b/test/mocks/server/factory_context.cc @@ -40,6 +40,7 @@ MockFactoryContext::MockFactoryContext() ON_CALL(*this, messageValidationVisitor()) .WillByDefault(ReturnRef(ProtobufMessage::getStrictValidationVisitor())); ON_CALL(*this, api()).WillByDefault(ReturnRef(api_)); + ON_CALL(*this, options()).WillByDefault(ReturnRef(options_)); } MockFactoryContext::~MockFactoryContext() = default; diff --git a/test/mocks/server/factory_context.h b/test/mocks/server/factory_context.h index 0de704bbee3c7..53092aa382b4d 100644 --- a/test/mocks/server/factory_context.h +++ b/test/mocks/server/factory_context.h @@ -64,6 +64,7 @@ class MockFactoryContext : public virtual FactoryContext { testing::NiceMock runtime_loader_; testing::NiceMock scope_; testing::NiceMock thread_local_; + testing::NiceMock options_; Singleton::ManagerPtr singleton_manager_; testing::NiceMock admin_; Stats::IsolatedStoreImpl listener_scope_; From 179fbaf742df6b7614b79986d05221176447cd3b Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 21 Sep 2021 00:56:51 +0800 Subject: [PATCH 075/121] tools: exempting contrib PRs from unassigned PRs (#18182) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- .github/actions/pr_notifier/pr_notifier.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/actions/pr_notifier/pr_notifier.py b/.github/actions/pr_notifier/pr_notifier.py index 9c6277ddac81d..de2aba2f6c2b3 100644 --- a/.github/actions/pr_notifier/pr_notifier.py +++ b/.github/actions/pr_notifier/pr_notifier.py @@ -69,6 +69,10 @@ def is_waiting(labels): return False +def is_contrib(labels): + return any(label.name == "contrib" for label in labels) + + # Return true if the PR has an API tag, false otherwise. def is_api(labels): for label in labels: @@ -174,7 +178,7 @@ def track_prs(): pr_info.assignees, maintainers_and_prs, message, MAINTAINERS, FIRST_PASS) # If there was no maintainer, track it as unassigned. - if not has_maintainer_assignee: + if not has_maintainer_assignee and not is_contrib(labels): maintainers_and_prs['unassigned'] = maintainers_and_prs['unassigned'] + message # Return the dict of {maintainers : PR notifications}, From 2bf93724f4e8521e3fe70ead732a4c1939b8a972 Mon Sep 17 00:00:00 2001 From: pradeepcrao <84025829+pradeepcrao@users.noreply.github.com> Date: Mon, 20 Sep 2021 17:06:34 +0000 Subject: [PATCH 076/121] Keep snapped stats in MetricSnapshotImpl. (#18144) Defensively keep RefCountPtrs to stats around during the flush, guarding against stat deletions on the worker thread. Stats allocated with ThreadLocalStore are always deleted on the main thread after being cleared from the TLS caches, so this is not a problem in practice, but it does make sense to hold onto a shared-ptr in the sink. Signed-off-by: Pradeep Rao Signed-off-by: gayang --- source/server/server.cc | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/source/server/server.cc b/source/server/server.cc index 152058301be54..658b77a5734bb 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -166,16 +166,26 @@ void InstanceImpl::failHealthcheck(bool fail) { } MetricSnapshotImpl::MetricSnapshotImpl(Stats::Store& store, TimeSource& time_source) { - store.forEachCounter([this](std::size_t size) mutable { counters_.reserve(size); }, - [this](Stats::Counter& counter) mutable { - counters_.push_back({counter.latch(), counter}); - }); - - store.forEachGauge([this](std::size_t size) mutable { gauges_.reserve(size); }, - [this](Stats::Gauge& gauge) mutable { - ASSERT(gauge.importMode() != Stats::Gauge::ImportMode::Uninitialized); - gauges_.push_back(gauge); - }); + store.forEachCounter( + [this](std::size_t size) mutable { + snapped_counters_.reserve(size); + counters_.reserve(size); + }, + [this](Stats::Counter& counter) mutable { + snapped_counters_.push_back(Stats::CounterSharedPtr(&counter)); + counters_.push_back({counter.latch(), counter}); + }); + + store.forEachGauge( + [this](std::size_t size) mutable { + snapped_gauges_.reserve(size); + gauges_.reserve(size); + }, + [this](Stats::Gauge& gauge) mutable { + ASSERT(gauge.importMode() != Stats::Gauge::ImportMode::Uninitialized); + snapped_gauges_.push_back(Stats::GaugeSharedPtr(&gauge)); + gauges_.push_back(gauge); + }); snapped_histograms_ = store.histograms(); histograms_.reserve(snapped_histograms_.size()); @@ -184,8 +194,14 @@ MetricSnapshotImpl::MetricSnapshotImpl(Stats::Store& store, TimeSource& time_sou } store.forEachTextReadout( - [this](std::size_t size) mutable { text_readouts_.reserve(size); }, - [this](Stats::TextReadout& text_readout) { text_readouts_.push_back(text_readout); }); + [this](std::size_t size) mutable { + snapped_text_readouts_.reserve(size); + text_readouts_.reserve(size); + }, + [this](Stats::TextReadout& text_readout) { + snapped_text_readouts_.push_back(Stats::TextReadoutSharedPtr(&text_readout)); + text_readouts_.push_back(text_readout); + }); snapshot_time_ = time_source.systemTime(); } From e8f5f624605f622b08b34b93e693113568e9d2c8 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 21 Sep 2021 01:22:18 +0800 Subject: [PATCH 077/121] dfp: adding failed resolution test (#18150) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- .../proxy_filter_integration_test.cc | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index 7cf64c0bbb608..c521df204f6e6 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -173,6 +173,21 @@ TEST_P(ProxyFilterIntegrationTest, RequestWithBody) { EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); } +// Currently if the first DNS resolution fails, the filter will continue with +// a null address. Make sure this mode fails gracefully. +TEST_P(ProxyFilterIntegrationTest, RequestWithUnknownDomain) { + initializeWithArgs(); + codec_client_ = makeHttpConnection(lookupPort("http")); + const Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "doesnotexist.example.com"}}; + + auto response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + // Verify that after we populate the cache and reload the cluster we reattach to the cache with // its existing hosts. TEST_P(ProxyFilterIntegrationTest, ReloadClusterAndAttachToCache) { From cebcef9f632804afcbbcf7019df1436316eba0fb Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 20 Sep 2021 11:59:26 -0600 Subject: [PATCH 078/121] dns cache: add force refresh API (#18165) Will allow EM to force a DNS refresh for all hosts on network changes. Signed-off-by: Matt Klein Signed-off-by: gayang --- docs/root/start/install.rst | 2 +- .../common/dynamic_forward_proxy/dns_cache.h | 6 +++ .../dynamic_forward_proxy/dns_cache_impl.cc | 21 ++++++-- .../dynamic_forward_proxy/dns_cache_impl.h | 1 + .../dns_cache_impl_test.cc | 49 +++++++++++++++++++ .../common/dynamic_forward_proxy/mocks.h | 1 + 6 files changed, 75 insertions(+), 5 deletions(-) diff --git a/docs/root/start/install.rst b/docs/root/start/install.rst index 85b039e9aa4e2..34060a73a0d69 100644 --- a/docs/root/start/install.rst +++ b/docs/root/start/install.rst @@ -254,4 +254,4 @@ The following table shows the available Docker images building Envoy and related containers. This image requires 4-5GB of available disk space to use. All the docker images are available in Docker Hub, but `its rate limit policy `_ - doesn't apply to users since the "envoyproxy" namespace is whitelisted. + doesn't apply to users since the "envoyproxy" namespace is allowlisted. diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache.h b/source/extensions/common/dynamic_forward_proxy/dns_cache.h index 218c1044a0793..8c178ecc8abfc 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache.h @@ -197,6 +197,12 @@ class DnsCache { * @return RAII handle for pending request circuit breaker if the request was allowed. */ virtual Upstream::ResourceAutoIncDecPtr canCreateDnsRequest() PURE; + + /** + * Force a DNS refresh of all known hosts, ignoring any ongoing failure or success timers. This + * can be used in response to network changes which might alter DNS responses, for example. + */ + virtual void forceRefreshHosts() PURE; }; using DnsCacheSharedPtr = std::shared_ptr; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index 15f1fa6a9b07a..db0b51873e2fe 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -54,10 +54,7 @@ DnsCacheImpl::DnsCacheImpl( // cache to load an entry. Further if this particular resolution fails all the is lost is the // potential optimization of having the entry be preresolved the first time a true consumer of // this DNS cache asks for it. - main_thread_dispatcher_.post( - [this, host = hostname.address(), default_port = hostname.port_value()]() { - startCacheLoad(host, default_port); - }); + startCacheLoad(hostname.address(), hostname.port_value()); } } @@ -274,6 +271,22 @@ void DnsCacheImpl::onReResolve(const std::string& host) { } } +void DnsCacheImpl::forceRefreshHosts() { + absl::ReaderMutexLock reader_lock{&primary_hosts_lock_}; + for (auto& primary_host : primary_hosts_) { + // Avoid holding the lock for longer than necessary by just triggering the refresh timer for + // each host IFF the host is not already refreshing. + // TODO(mattklein123): In the future we may want to cancel an ongoing refresh and start a new + // one to avoid a situation in which an older refresh races with a concurrent network change, + // for example. + if (primary_host.second->active_query_ == nullptr) { + ASSERT(!primary_host.second->timeout_timer_->enabled()); + primary_host.second->refresh_timer_->enableTimer(std::chrono::milliseconds(0), nullptr); + ENVOY_LOG(debug, "force refreshing host='{}'", primary_host.first); + } + } +} + void DnsCacheImpl::startResolve(const std::string& host, PrimaryHostInfo& host_info) { ENVOY_LOG(debug, "starting main thread resolve for host='{}' dns='{}' port='{}'", host, host_info.host_info_->resolvedHost(), host_info.port_); diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index c2526da13222c..d10c88bd4feb2 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -61,6 +61,7 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable getHost(absl::string_view host_name) override; Upstream::ResourceAutoIncDecPtr canCreateDnsRequest() override; + void forceRefreshHosts() override; private: struct LoadDnsCacheEntryHandleImpl diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index 689e9deb9a688..8ae4b11eb071f 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -20,6 +20,7 @@ #include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" +using testing::AtLeast; using testing::DoAll; using testing::InSequence; using testing::Return; @@ -207,6 +208,54 @@ TEST_F(DnsCacheImplTest, ResolveSuccess) { 1 /* added */, 0 /* removed */, 1 /* num hosts */); } +// Verify the force refresh API works as expected. +TEST_F(DnsCacheImplTest, ForceRefresh) { + initialize(); + InSequence s; + + // No hosts so should not do anything. + dns_cache_->forceRefreshHosts(); + checkStats(0 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 0 /* added */, 0 /* removed */, 0 /* num hosts */); + + MockLoadDnsCacheEntryCallbacks callbacks; + Network::DnsResolver::ResolveCb resolve_cb; + Event::MockTimer* resolve_timer = new Event::MockTimer(&context_.dispatcher_); + Event::MockTimer* timeout_timer = new Event::MockTimer(&context_.dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); + EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + auto result = dns_cache_->loadDnsCacheEntry("foo.com", 80, callbacks); + EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_); + EXPECT_NE(result.handle_, nullptr); + EXPECT_EQ(absl::nullopt, result.host_info_); + + checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Query in progress so should do nothing. + dns_cache_->forceRefreshHosts(); + checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(update_callbacks_, + onDnsHostAddOrUpdate("foo.com", DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); + EXPECT_CALL(callbacks, + onLoadDnsCacheComplete(DnsHostInfoEquals("10.0.0.1:80", "foo.com", false))); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(60000), _)); + resolve_cb(Network::DnsResolver::ResolutionStatus::Success, + TestUtility::makeDnsResponse({"10.0.0.1"})); + + checkStats(1 /* attempt */, 1 /* success */, 0 /* failure */, 1 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Should force a refresh. Ignore strict mock failures on the enabled() call. + EXPECT_CALL(*timeout_timer, enabled()).Times(AtLeast(0)); + EXPECT_CALL(*resolve_timer, enableTimer(std::chrono::milliseconds(0), _)); + dns_cache_->forceRefreshHosts(); +} + // Ipv4 address. TEST_F(DnsCacheImplTest, Ipv4Address) { initialize(); diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.h b/test/extensions/common/dynamic_forward_proxy/mocks.h index d65583e21c596..6cc4acc14c3f7 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.h +++ b/test/extensions/common/dynamic_forward_proxy/mocks.h @@ -59,6 +59,7 @@ class MockDnsCache : public DnsCache { MOCK_METHOD((void), iterateHostMap, (IterateHostMapCb)); MOCK_METHOD((absl::optional), getHost, (absl::string_view)); MOCK_METHOD(Upstream::ResourceAutoIncDec*, canCreateDnsRequest_, ()); + MOCK_METHOD(void, forceRefreshHosts, ()); }; class MockLoadDnsCacheEntryHandle : public DnsCache::LoadDnsCacheEntryHandle { From 4df9e7eaf6f408e0038d0296c318f5a88c250efc Mon Sep 17 00:00:00 2001 From: phlax Date: Tue, 21 Sep 2021 04:50:48 +0100 Subject: [PATCH 079/121] docs: Small optimization for protodoc (#18179) this is a small optimization that removes non-envoy rst files from the output depset of protodoc this makes the api tarball smaller and allows the downstream code to not have to filter these out Signed-off-by: Ryan Northey Signed-off-by: gayang --- tools/protodoc/protodoc.bzl | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tools/protodoc/protodoc.bzl b/tools/protodoc/protodoc.bzl index 751a24bc44846..9858d190efe28 100644 --- a/tools/protodoc/protodoc.bzl +++ b/tools/protodoc/protodoc.bzl @@ -17,10 +17,15 @@ protodoc_aspect = api_proto_plugin_aspect("//tools/protodoc", _protodoc_impl) def _protodoc_rule_impl(ctx): return [ DefaultInfo( - files = depset(transitive = [ - d[OutputGroupInfo].rst - for d in ctx.attr.deps - ]), + files = depset( + transitive = [ + depset([ + x + for x in ctx.attr.deps[0][OutputGroupInfo].rst.to_list() + if x.short_path.startswith("../envoy_api") + ]), + ], + ), ), ] From c22aaf280ff796ce011e2205ad8506ca1f94b74b Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Tue, 21 Sep 2021 22:25:56 +0700 Subject: [PATCH 080/121] jwt_authn: Add header_to_metadata (#18140) This patch adds header_to_metadata field to JwtProvider config to allow setting the extracted header of a successfully verified JWT to dynamic metadata. Signed-off-by: Dhi Aurrahman Signed-off-by: gayang --- .../filters/http/jwt_authn/v3/config.proto | 42 ++++++- .../advanced/well_known_dynamic_metadata.rst | 4 +- .../security/jwt_authn_filter.rst | 6 +- docs/root/version_history/current.rst | 1 + .../filters/http/jwt_authn/authenticator.cc | 25 +++-- .../filters/http/jwt_authn/authenticator.h | 6 +- .../filters/http/jwt_authn/filter.cc | 5 +- .../filters/http/jwt_authn/filter.h | 4 +- .../filters/http/jwt_authn/verifier.cc | 29 ++--- .../filters/http/jwt_authn/verifier.h | 2 +- .../http/jwt_authn/authenticator_test.cc | 105 +++++++++++++++--- .../filters/http/jwt_authn/filter_test.cc | 25 +++-- .../http/jwt_authn/group_verifier_test.cc | 102 +++++++++-------- test/extensions/filters/http/jwt_authn/mock.h | 11 +- .../http/jwt_authn/provider_verifier_test.cc | 45 +++++++- .../filters/http/jwt_authn/test_common.h | 22 ++++ 16 files changed, 315 insertions(+), 119 deletions(-) diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 5bb6960e1c1b1..1f9a54ed7ff97 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // cache_duration: // seconds: 300 // -// [#next-free-field: 14] +// [#next-free-field: 15] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; @@ -231,6 +231,46 @@ message JwtProvider { // string payload_in_metadata = 9; + // If not empty, similar to :ref:`payload_in_metadata `, + // a successfully verified JWT header will be written to :ref:`Dynamic State ` + // as an entry (``protobuf::Struct``) in **envoy.filters.http.jwt_authn** *namespace* with the + // value of this field as the key. + // + // For example, if ``header_in_metadata`` is *my_header*: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_header: + // alg: JWT + // kid: EF71iSaosbC5C4tC6Syq1Gm647M + // alg: PS256 + // + // When the metadata has **envoy.filters.http.jwt_authn** entry already (for example if + // :ref:`payload_in_metadata ` + // is not empty), it will be inserted as a new entry in the same *namespace* as shown below: + // + // .. code-block:: yaml + // + // envoy.filters.http.jwt_authn: + // my_payload: + // iss: https://example.com + // sub: test@example.com + // aud: https://example.com + // exp: 1501281058 + // my_header: + // alg: JWT + // kid: EF71iSaosbC5C4tC6Syq1Gm647M + // alg: PS256 + // + // .. warning:: + // Using the same key name for :ref:`header_in_metadata ` + // and :ref:`payload_in_metadata ` + // is not suggested due to potential override of existing entry, while it is not enforced during + // config validation. + // + string header_in_metadata = 14; + // Specify the clock skew in seconds when verifying JWT time constraint, // such as `exp`, and `nbf`. If not specified, default is 60 seconds. uint32 clock_skew_seconds = 10; diff --git a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst index b051545ea9253..9d89ae70c28b8 100644 --- a/docs/root/configuration/advanced/well_known_dynamic_metadata.rst +++ b/docs/root/configuration/advanced/well_known_dynamic_metadata.rst @@ -16,7 +16,9 @@ The following Envoy filters emit dynamic metadata that other filters can leverag * :ref:`External Authorization Filter ` * :ref:`External Authorization Network Filter ` * :ref:`Header-To-Metadata Filter ` -* :ref:`JWT Authentication Filter ` +* :ref:`JWT Authentication Filter ` for the extracted + :ref:`header ` + and :ref:`payload ` * :ref:`Mongo Proxy Filter ` * :ref:`MySQL Proxy Filter ` * :ref:`Postgres Proxy Filter ` diff --git a/docs/root/intro/arch_overview/security/jwt_authn_filter.rst b/docs/root/intro/arch_overview/security/jwt_authn_filter.rst index 9c53106ab146f..9e7de4ae11989 100644 --- a/docs/root/intro/arch_overview/security/jwt_authn_filter.rst +++ b/docs/root/intro/arch_overview/security/jwt_authn_filter.rst @@ -23,6 +23,6 @@ could combine multiple JWT requirements for the same request. The verification could be either specified inline in the filter config or fetched from remote server via HTTP/HTTPS. -The JWT Authentication filter also supports to write the payloads of the successfully verified JWT -to :ref:`Dynamic State ` so that later filters could use -it to make their own decisions based on the JWT payloads. +The JWT Authentication filter also supports to write the header and payload of the successfully +verified JWT to :ref:`Dynamic State ` so that later +filters could use it to make their own decisions based on the JWT payloads. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index fa19b6c3ea455..ae7a634a4198c 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -112,6 +112,7 @@ New Features * http: validating outgoing HTTP/2 CONNECT requests to ensure that if ``:path`` is set that ``:protocol`` is present. This behavior can be temporarily turned off by setting runtime guard ``envoy.reloadable_features.validate_connect`` to false. * jwt_authn: added support for :ref:`Jwt Cache ` and its size can be specified by :ref:`jwt_cache_size `. * jwt_authn: added support for extracting JWTs from request cookies using :ref:`from_cookies `. +* jwt_authn: added support for setting the extracted headers from a successfully verified JWT using :ref:`header_in_metadata ` to dynamic metadata. * listener: new listener metric ``downstream_cx_transport_socket_connect_timeout`` to track transport socket timeouts. * matcher: added :ref:`invert ` for inverting the match result in the metadata matcher. * overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. diff --git a/source/extensions/filters/http/jwt_authn/authenticator.cc b/source/extensions/filters/http/jwt_authn/authenticator.cc index ef17e90fb1a6a..6d7a70179346d 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.cc +++ b/source/extensions/filters/http/jwt_authn/authenticator.cc @@ -43,9 +43,10 @@ class AuthenticatorImpl : public Logger::Loggable, // Following functions are for JwksFetcher::JwksReceiver interface void onJwksSuccess(google::jwt_verify::JwksPtr&& jwks) override; void onJwksError(Failure reason) override; - // Following functions are for Authenticator interface + // Following functions are for Authenticator interface. void verify(Http::HeaderMap& headers, Tracing::Span& parent_span, - std::vector&& tokens, SetPayloadCallback set_payload_cb, + std::vector&& tokens, + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, AuthenticatorCallback callback) override; void onDestroy() override; @@ -90,8 +91,8 @@ class AuthenticatorImpl : public Logger::Loggable, Http::HeaderMap* headers_{}; // The active span for the request Tracing::Span* parent_span_{&Tracing::NullSpan::instance()}; - // the callback function to set payload - SetPayloadCallback set_payload_cb_; + // The callback function called to set the extracted payload and header from a verified JWT. + SetExtractedJwtDataCallback set_extracted_jwt_data_cb_; // The on_done function. AuthenticatorCallback callback_; // check audience object. @@ -119,12 +120,13 @@ std::string AuthenticatorImpl::name() const { void AuthenticatorImpl::verify(Http::HeaderMap& headers, Tracing::Span& parent_span, std::vector&& tokens, - SetPayloadCallback set_payload_cb, AuthenticatorCallback callback) { + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, + AuthenticatorCallback callback) { ASSERT(!callback_); headers_ = &headers; parent_span_ = &parent_span; tokens_ = std::move(tokens); - set_payload_cb_ = std::move(set_payload_cb); + set_extracted_jwt_data_cb_ = std::move(set_extracted_jwt_data_cb); callback_ = std::move(callback); ENVOY_LOG(debug, "{}: JWT authentication starts (allow_failed={}), tokens size={}", name(), @@ -291,8 +293,15 @@ void AuthenticatorImpl::handleGoodJwt(bool cache_hit) { // Remove JWT from headers. curr_token_->removeJwt(*headers_); } - if (set_payload_cb_ && !provider.payload_in_metadata().empty()) { - set_payload_cb_(provider.payload_in_metadata(), jwt_->payload_pb_); + + if (set_extracted_jwt_data_cb_) { + if (!provider.header_in_metadata().empty()) { + set_extracted_jwt_data_cb_(provider.header_in_metadata(), jwt_->header_pb_); + } + + if (!provider.payload_in_metadata().empty()) { + set_extracted_jwt_data_cb_(provider.payload_in_metadata(), jwt_->payload_pb_); + } } if (provider_ && !cache_hit) { // move the ownership of "owned_jwt_" into the function. diff --git a/source/extensions/filters/http/jwt_authn/authenticator.h b/source/extensions/filters/http/jwt_authn/authenticator.h index 62803498d6941..8e16c5044ce4e 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.h +++ b/source/extensions/filters/http/jwt_authn/authenticator.h @@ -19,7 +19,8 @@ using AuthenticatorPtr = std::unique_ptr; using AuthenticatorCallback = std::function; -using SetPayloadCallback = std::function; +using SetExtractedJwtDataCallback = + std::function; /** * Authenticator object to handle all JWT authentication flow. @@ -31,7 +32,8 @@ class Authenticator { // Verify if headers satisfies the JWT requirements. Can be limited to single provider with // extract_param. virtual void verify(Http::HeaderMap& headers, Tracing::Span& parent_span, - std::vector&& tokens, SetPayloadCallback set_payload_cb, + std::vector&& tokens, + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, AuthenticatorCallback callback) PURE; // Called when the object is about to be destroyed. diff --git a/source/extensions/filters/http/jwt_authn/filter.cc b/source/extensions/filters/http/jwt_authn/filter.cc index c069e3a87b8d6..768e58e73d351 100644 --- a/source/extensions/filters/http/jwt_authn/filter.cc +++ b/source/extensions/filters/http/jwt_authn/filter.cc @@ -100,8 +100,9 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, return Http::FilterHeadersStatus::StopIteration; } -void Filter::setPayload(const ProtobufWkt::Struct& payload) { - decoder_callbacks_->streamInfo().setDynamicMetadata("envoy.filters.http.jwt_authn", payload); +void Filter::setExtractedData(const ProtobufWkt::Struct& extracted_data) { + decoder_callbacks_->streamInfo().setDynamicMetadata("envoy.filters.http.jwt_authn", + extracted_data); } void Filter::onComplete(const Status& status) { diff --git a/source/extensions/filters/http/jwt_authn/filter.h b/source/extensions/filters/http/jwt_authn/filter.h index e743324f8040a..9330d07be6126 100644 --- a/source/extensions/filters/http/jwt_authn/filter.h +++ b/source/extensions/filters/http/jwt_authn/filter.h @@ -31,8 +31,8 @@ class Filter : public Http::StreamDecoderFilter, private: // Following two functions are for Verifier::Callbacks interface. - // Pass the payload as Struct. - void setPayload(const ProtobufWkt::Struct& payload) override; + // Pass the extracted data from a verified JWT as an opaque ProtobufWkt::Struct. + void setExtractedData(const ProtobufWkt::Struct& extracted_data) override; // It will be called when its verify() call is completed. void onComplete(const ::google::jwt_verify::Status& status) override; diff --git a/source/extensions/filters/http/jwt_authn/verifier.cc b/source/extensions/filters/http/jwt_authn/verifier.cc index c22c446cc98e7..6c6449cced31e 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.cc +++ b/source/extensions/filters/http/jwt_authn/verifier.cc @@ -55,14 +55,14 @@ class ContextImpl : public Verifier::Context { // Stores an authenticator object for this request. void storeAuth(AuthenticatorPtr&& auth) { auths_.emplace_back(std::move(auth)); } - // Add a pair of (name, payload), called by Authenticator - void addPayload(const std::string& name, const ProtobufWkt::Struct& payload) { - *(*payload_.mutable_fields())[name].mutable_struct_value() = payload; + // Add a pair of (name, payload), called by Authenticator. It can be either JWT header or payload. + void addExtractedData(const std::string& name, const ProtobufWkt::Struct& extracted_data) { + *(*extrated_data_.mutable_fields())[name].mutable_struct_value() = extracted_data; } - void setPayload() { - if (!payload_.fields().empty()) { - callback_->setPayload(payload_); + void setExtractedData() { + if (!extrated_data_.fields().empty()) { + callback_->setExtractedData(extrated_data_); } } @@ -72,7 +72,7 @@ class ContextImpl : public Verifier::Context { Verifier::Callbacks* callback_; absl::node_hash_map completion_states_; std::vector auths_; - ProtobufWkt::Struct payload_; + ProtobufWkt::Struct extrated_data_; }; // base verifier for provider_name, provider_and_audiences, and allow_missing_or_failed. @@ -88,7 +88,8 @@ class BaseVerifierImpl : public Logger::Loggable, public Verifi } if (Status::Ok == status) { - context.setPayload(); + // We only set the extracted data to context when the JWT is verified. + context.setExtractedData(); } context.callback()->onComplete(status); context.cancel(); @@ -123,8 +124,8 @@ class ProviderVerifierImpl : public BaseVerifierImpl { extractor_->sanitizePayloadHeaders(ctximpl.headers()); auth->verify( ctximpl.headers(), ctximpl.parentSpan(), extractor_->extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& extracted_data) { + ctximpl.addExtractedData(name, extracted_data); }, [this, context](const Status& status) { onComplete(status, static_cast(*context)); @@ -174,8 +175,8 @@ class AllowFailedVerifierImpl : public BaseVerifierImpl { extractor_->sanitizePayloadHeaders(ctximpl.headers()); auth->verify( ctximpl.headers(), ctximpl.parentSpan(), extractor_->extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& extracted_data) { + ctximpl.addExtractedData(name, extracted_data); }, [this, context](const Status& status) { onComplete(status, static_cast(*context)); @@ -209,8 +210,8 @@ class AllowMissingVerifierImpl : public BaseVerifierImpl { extractor_->sanitizePayloadHeaders(ctximpl.headers()); auth->verify( ctximpl.headers(), ctximpl.parentSpan(), extractor_->extract(ctximpl.headers()), - [&ctximpl](const std::string& name, const ProtobufWkt::Struct& payload) { - ctximpl.addPayload(name, payload); + [&ctximpl](const std::string& name, const ProtobufWkt::Struct& extracted_data) { + ctximpl.addExtractedData(name, extracted_data); }, [this, context](const Status& status) { onComplete(status, static_cast(*context)); diff --git a/source/extensions/filters/http/jwt_authn/verifier.h b/source/extensions/filters/http/jwt_authn/verifier.h index 2b62ed4bcec90..7d20e709660a8 100644 --- a/source/extensions/filters/http/jwt_authn/verifier.h +++ b/source/extensions/filters/http/jwt_authn/verifier.h @@ -32,7 +32,7 @@ class Verifier { * This function is called before onComplete() function. * It will not be called if no payload to write. */ - virtual void setPayload(const ProtobufWkt::Struct& payload) PURE; + virtual void setExtractedData(const ProtobufWkt::Struct& payload) PURE; /** * Called on completion of request. diff --git a/test/extensions/filters/http/jwt_authn/authenticator_test.cc b/test/extensions/filters/http/jwt_authn/authenticator_test.cc index b2b92cf75cdd2..ef39bdd17a357 100644 --- a/test/extensions/filters/http/jwt_authn/authenticator_test.cc +++ b/test/extensions/filters/http/jwt_authn/authenticator_test.cc @@ -61,13 +61,13 @@ class AuthenticatorTest : public testing::Test { std::function on_complete_cb = [&expected_status](const Status& status) { ASSERT_EQ(status, expected_status); }; - auto set_payload_cb = [this](const std::string& name, const ProtobufWkt::Struct& payload) { - out_name_ = name; - out_payload_ = payload; + auto set_extracted_jwt_data_cb = [this](const std::string& name, + const ProtobufWkt::Struct& extracted_data) { + this->addExtractedData(name, extracted_data); }; initTokenExtractor(); auto tokens = extractor_->extract(headers); - auth_->verify(headers, parent_span_, std::move(tokens), std::move(set_payload_cb), + auth_->verify(headers, parent_span_, std::move(tokens), std::move(set_extracted_jwt_data_cb), std::move(on_complete_cb)); } @@ -79,6 +79,12 @@ class AuthenticatorTest : public testing::Test { extractor_ = Extractor::create(providers); } + // This is like ContextImpl::addExtractedData in + // source/extensions/filters/http/jwt_authn/verifier.cc. + void addExtractedData(const std::string& name, const ProtobufWkt::Struct& extracted_data) { + *(*out_extracted_data_.mutable_fields())[name].mutable_struct_value() = extracted_data; + } + JwtAuthentication proto_config_; ExtractorConstPtr extractor_; std::shared_ptr filter_config_; @@ -87,8 +93,7 @@ class AuthenticatorTest : public testing::Test { AuthenticatorPtr auth_; ::google::jwt_verify::JwksPtr jwks_; NiceMock mock_factory_ctx_; - std::string out_name_; - ProtobufWkt::Struct out_payload_; + ProtobufWkt::Struct out_extracted_data_; NiceMock parent_span_; }; @@ -149,14 +154,14 @@ TEST_F(AuthenticatorTest, TestForwardJwt) { // Verify the token is NOT removed. EXPECT_TRUE(headers.has(Http::CustomHeaders::get().Authorization)); - // Payload not set by default - EXPECT_EQ(out_name_, ""); + // Payload is not set by default. + EXPECT_TRUE(out_extracted_data_.fields().empty()); EXPECT_EQ(1U, filter_config_->stats().jwks_fetch_success_.value()); EXPECT_EQ(0U, filter_config_->stats().jwks_fetch_failed_.value()); } -// This test verifies the Jwt payload is set. +// This test verifies the JWT payload is set. TEST_F(AuthenticatorTest, TestSetPayload) { // Config payload_in_metadata flag (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata( @@ -172,12 +177,74 @@ TEST_F(AuthenticatorTest, TestSetPayload) { expectVerifyStatus(Status::Ok, headers); - // Payload is set - EXPECT_EQ(out_name_, "my_payload"); + // Only one field is set. + EXPECT_EQ(1, out_extracted_data_.fields().size()); - ProtobufWkt::Struct expected_payload; + ProtobufWkt::Value expected_payload; TestUtility::loadFromJson(ExpectedPayloadJSON, expected_payload); - EXPECT_TRUE(TestUtility::protoEqual(out_payload_, expected_payload)); + EXPECT_TRUE( + TestUtility::protoEqual(expected_payload, out_extracted_data_.fields().at("my_payload"))); +} + +// This test verifies setting only the extracted header to metadata. +TEST_F(AuthenticatorTest, TestSetHeader) { + // Set the extracted header to metadata. + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_header_in_metadata( + "my_header"); + + createAuthenticator(); + EXPECT_CALL(*raw_fetcher_, fetch(_, _)) + .WillOnce(Invoke([this](Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { + receiver.onJwksSuccess(std::move(jwks_)); + })); + + // Expect to have a valid JWT. + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; + + expectVerifyStatus(Status::Ok, headers); + + // Only one field is set. + EXPECT_EQ(1, out_extracted_data_.fields().size()); + + // We should expect empty JWT payload. + ProtobufWkt::Value expected_payload; + TestUtility::loadFromJson(ExpectedHeaderJSON, expected_payload); + EXPECT_TRUE( + TestUtility::protoEqual(expected_payload, out_extracted_data_.fields().at("my_header"))); +} + +// This test verifies setting the extracted payload and header to metadata. +TEST_F(AuthenticatorTest, TestSetPayloadAndHeader) { + // Set the extracted payload and header to metadata. + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata( + "my_payload"); + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_header_in_metadata( + "my_header"); + + createAuthenticator(); + EXPECT_CALL(*raw_fetcher_, fetch(_, _)) + .WillOnce(Invoke([this](Tracing::Span&, JwksFetcher::JwksReceiver& receiver) { + receiver.onJwksSuccess(std::move(jwks_)); + })); + + // Expect to have a valid JWT. + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; + + expectVerifyStatus(Status::Ok, headers); + + // Payload and header are set. + EXPECT_EQ(2, out_extracted_data_.fields().size()); + + // We should expect both JWT payload and header are set. + ProtobufWkt::Value expected_payload; + TestUtility::loadFromJson(ExpectedPayloadJSON, expected_payload); + EXPECT_TRUE( + TestUtility::protoEqual(expected_payload, out_extracted_data_.fields().at("my_payload"))); + + ProtobufWkt::Value expected_header; + TestUtility::loadFromJson(ExpectedHeaderJSON, expected_header); + EXPECT_TRUE( + TestUtility::protoEqual(expected_header, out_extracted_data_.fields().at("my_header"))); } // This test verifies the Jwt with non existing kid @@ -669,12 +736,14 @@ class AuthenticatorJwtCacheTest : public testing::Test { std::function on_complete_cb = [&expected_status](const Status& status) { ASSERT_EQ(status, expected_status); }; - auto set_payload_cb = [this](const std::string& name, const ProtobufWkt::Struct& payload) { + auto set_extracted_jwt_data_cb = [this](const std::string& name, + const ProtobufWkt::Struct& extracted_data) { out_name_ = name; - out_payload_ = payload; + out_extracted_data_ = extracted_data; }; auto tokens = extractor_->extract(headers); - auth_->verify(headers, parent_span_, std::move(tokens), set_payload_cb, on_complete_cb); + auth_->verify(headers, parent_span_, std::move(tokens), set_extracted_jwt_data_cb, + on_complete_cb); } ::google::jwt_verify::JwksPtr jwks_; @@ -686,7 +755,7 @@ class AuthenticatorJwtCacheTest : public testing::Test { ExtractorConstPtr extractor_; NiceMock parent_span_; std::string out_name_; - ProtobufWkt::Struct out_payload_; + ProtobufWkt::Struct out_extracted_data_; }; TEST_F(AuthenticatorJwtCacheTest, TestNonProvider) { @@ -751,7 +820,7 @@ TEST_F(AuthenticatorJwtCacheTest, TestCacheHit) { ProtobufWkt::Struct expected_payload; TestUtility::loadFromJson(ExpectedPayloadJSON, expected_payload); - EXPECT_TRUE(TestUtility::protoEqual(out_payload_, expected_payload)); + EXPECT_TRUE(TestUtility::protoEqual(out_extracted_data_, expected_payload)); } } // namespace diff --git a/test/extensions/filters/http/jwt_authn/filter_test.cc b/test/extensions/filters/http/jwt_authn/filter_test.cc index 6afa8ac3e8ec4..17e84718d573b 100644 --- a/test/extensions/filters/http/jwt_authn/filter_test.cc +++ b/test/extensions/filters/http/jwt_authn/filter_test.cc @@ -72,7 +72,6 @@ class FilterTest : public testing::Test { NiceMock filter_callbacks_; std::unique_ptr filter_; std::unique_ptr mock_verifier_; - NiceMock verifier_callback_; Http::TestRequestTrailerMapImpl trailers_; std::shared_ptr> mock_route_; std::shared_ptr per_route_config_; @@ -143,21 +142,23 @@ TEST_F(FilterTest, CorsPreflightMssingAccessControlRequestMethod) { EXPECT_EQ(0U, mock_config_->stats().denied_.value()); } -// This test verifies the setPayload call is handled correctly -TEST_F(FilterTest, TestSetPayloadCall) { +// This test verifies the setExtractedData call is handled correctly +TEST_F(FilterTest, TestSetExtractedData) { setupMockConfig(); - ProtobufWkt::Struct payload; + ProtobufWkt::Struct extracted_data; // A successful authentication completed inline: callback is called inside verify(). - EXPECT_CALL(*mock_verifier_, verify(_)).WillOnce(Invoke([&payload](ContextSharedPtr context) { - context->callback()->setPayload(payload); - context->callback()->onComplete(Status::Ok); - })); + EXPECT_CALL(*mock_verifier_, verify(_)) + .WillOnce(Invoke([&extracted_data](ContextSharedPtr context) { + context->callback()->setExtractedData(extracted_data); + context->callback()->onComplete(Status::Ok); + })); EXPECT_CALL(filter_callbacks_.stream_info_, setDynamicMetadata(_, _)) - .WillOnce(Invoke([&payload](const std::string& ns, const ProtobufWkt::Struct& out_payload) { - EXPECT_EQ(ns, "envoy.filters.http.jwt_authn"); - EXPECT_TRUE(TestUtility::protoEqual(out_payload, payload)); - })); + .WillOnce( + Invoke([&extracted_data](const std::string& ns, const ProtobufWkt::Struct& out_payload) { + EXPECT_EQ(ns, "envoy.filters.http.jwt_authn"); + EXPECT_TRUE(TestUtility::protoEqual(out_payload, extracted_data)); + })); auto headers = Http::TestRequestHeaderMapImpl{}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->decodeHeaders(headers, false)); diff --git a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc index d68164e6ae11a..a9980189ef7fe 100644 --- a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc @@ -84,10 +84,11 @@ class GroupVerifierTest : public testing::Test { EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _)) .WillOnce(Invoke([issuer = it.first, status = it.second]( Http::HeaderMap&, Tracing::Span&, std::vector*, - SetPayloadCallback set_payload_cb, AuthenticatorCallback callback) { + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, + AuthenticatorCallback callback) { if (status == Status::Ok) { ProtobufWkt::Struct empty_struct; - set_payload_cb(issuer, empty_struct); + set_extracted_jwt_data_cb(issuer, empty_struct); } callback(status); })); @@ -97,9 +98,9 @@ class GroupVerifierTest : public testing::Test { createVerifier(); } - // This expected payload is only for createSyncMockAuthsAndVerifier() function - // which set an empty payload struct for each issuer. - static ProtobufWkt::Struct getExpectedPayload(const std::vector& issuers) { + // This expected extracted data is only for createSyncMockAuthsAndVerifier() function + // which set an empty extracted data struct for each issuer. + static ProtobufWkt::Struct getExpectedExtractedData(const std::vector& issuers) { ProtobufWkt::Struct struct_obj; auto* fields = struct_obj.mutable_fields(); for (const auto& issuer : issuers) { @@ -113,9 +114,9 @@ class GroupVerifierTest : public testing::Test { for (const auto& provider : providers) { auto mock_auth = std::make_unique(); EXPECT_CALL(*mock_auth, doVerify(_, _, _, _, _)) - .WillOnce(Invoke([&, iss = provider](Http::HeaderMap&, Tracing::Span&, - std::vector*, - SetPayloadCallback, AuthenticatorCallback callback) { + .WillOnce(Invoke([&, iss = provider]( + Http::HeaderMap&, Tracing::Span&, std::vector*, + SetExtractedJwtDataCallback, AuthenticatorCallback callback) { callbacks_[iss] = std::move(callback); })); EXPECT_CALL(*mock_auth, onDestroy()); @@ -167,9 +168,11 @@ TEST_F(GroupVerifierTest, DeeplyNestedAnys) { TestUtility::loadFromYaml(config, proto_config_); createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"example_provider"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual(extracted_data, + getExpectedExtractedData({"example_provider"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{ @@ -220,10 +223,11 @@ TEST_F(GroupVerifierTest, TestRequiresAll) { createSyncMockAuthsAndVerifier( StatusMap{{"example_provider", Status::Ok}, {"other_provider", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual( - payload, getExpectedPayload({"example_provider", "other_provider"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual( + extracted_data, getExpectedExtractedData({"example_provider", "other_provider"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{ @@ -241,8 +245,8 @@ TEST_F(GroupVerifierTest, TestRequiresAllBadFormat) { TestUtility::loadFromYaml(RequiresAllConfig, proto_config_); createAsyncMockAuthsAndVerifier(std::vector{"example_provider", "other_provider"}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtBadFormat)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -264,8 +268,8 @@ TEST_F(GroupVerifierTest, TestRequiresAllMissing) { TestUtility::loadFromYaml(RequiresAllConfig, proto_config_); createAsyncMockAuthsAndVerifier(std::vector{"example_provider", "other_provider"}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtMissed)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -287,8 +291,8 @@ TEST_F(GroupVerifierTest, TestRequiresAllBothFailed) { TestUtility::loadFromYaml(RequiresAllConfig, proto_config_); createAsyncMockAuthsAndVerifier(std::vector{"example_provider", "other_provider"}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtUnknownIssuer)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -307,9 +311,11 @@ TEST_F(GroupVerifierTest, TestRequiresAnyFirstAuthOK) { TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_); createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"example_provider"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual(extracted_data, + getExpectedExtractedData({"example_provider"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{ @@ -328,9 +334,11 @@ TEST_F(GroupVerifierTest, TestRequiresAnyLastAuthOk) { createSyncMockAuthsAndVerifier( StatusMap{{"example_provider", Status::JwtUnknownIssuer}, {"other_provider", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"other_provider"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE( + TestUtility::protoEqual(extracted_data, getExpectedExtractedData({"other_provider"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{ @@ -351,8 +359,8 @@ TEST_F(GroupVerifierTest, TestRequiresAnyAllAuthFailed) { createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::JwtMissed}, {"other_provider", Status::JwtHeaderBadKid}}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -375,8 +383,8 @@ TEST_F(GroupVerifierTest, TestRequiresAnyLastIsJwtMissed) { createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::JwtHeaderBadKid}, {"other_provider", Status::JwtMissed}}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -396,8 +404,8 @@ TEST_F(GroupVerifierTest, TestRequiresAnyLastIsJwtUnknownIssuer) { createSyncMockAuthsAndVerifier(StatusMap{{"example_provider", Status::JwtHeaderBadKid}, {"other_provider", Status::JwtUnknownIssuer}}); - // onComplete with failure status, not payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // onComplete with a failure status, no extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtHeaderBadKid)); auto headers = Http::TestRequestHeaderMapImpl{ {"example-auth-userinfo", ""}, @@ -415,9 +423,11 @@ TEST_F(GroupVerifierTest, TestAnyInAllFirstAnyIsOk) { TestUtility::loadFromYaml(AllWithAny, proto_config_); createSyncMockAuthsAndVerifier(StatusMap{{"provider_1", Status::Ok}, {"provider_3", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"provider_1", "provider_3"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual( + extracted_data, getExpectedExtractedData({"provider_1", "provider_3"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{}; @@ -433,9 +443,11 @@ TEST_F(GroupVerifierTest, TestAnyInAllLastAnyIsOk) { {"provider_2", Status::Ok}, {"provider_3", Status::Ok}}); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload({"provider_2", "provider_3"}))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& extracted_data) { + EXPECT_TRUE(TestUtility::protoEqual( + extracted_data, getExpectedExtractedData({"provider_2", "provider_3"}))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{}; @@ -450,8 +462,8 @@ TEST_F(GroupVerifierTest, TestAnyInAllBothInRequireAnyIsOk) { createAsyncMockAuthsAndVerifier( std::vector{"provider_1", "provider_2", "provider_3"}); - // AsyncMockVerifier doesn't set payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // AsyncMockVerifier doesn't set the extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); @@ -468,7 +480,7 @@ TEST_F(GroupVerifierTest, TestAnyInAllBothInRequireAnyFailed) { createAsyncMockAuthsAndVerifier( std::vector{"provider_1", "provider_2", "provider_3"}); - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwksFetchFail)); auto headers = Http::TestRequestHeaderMapImpl{}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); @@ -486,7 +498,7 @@ TEST_F(GroupVerifierTest, TestAllInAnyBothRequireAllFailed) { createSyncMockAuthsAndVerifier( StatusMap{{"provider_1", Status::JwksFetchFail}, {"provider_3", Status::JwtExpired}}); - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::JwtExpired)); auto headers = Http::TestRequestHeaderMapImpl{}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); @@ -500,8 +512,8 @@ TEST_F(GroupVerifierTest, TestAllInAnyFirstAllIsOk) { createAsyncMockAuthsAndVerifier( std::vector{"provider_1", "provider_2", "provider_3", "provider_4"}); - // AsyncMockVerifier doesn't set payload - EXPECT_CALL(mock_cb_, setPayload(_)).Times(0); + // AsyncMockVerifier doesn't set the extracted data. + EXPECT_CALL(mock_cb_, setExtractedData(_)).Times(0); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); diff --git a/test/extensions/filters/http/jwt_authn/mock.h b/test/extensions/filters/http/jwt_authn/mock.h index ff235c5aed00d..7b12ec7ed4dde 100644 --- a/test/extensions/filters/http/jwt_authn/mock.h +++ b/test/extensions/filters/http/jwt_authn/mock.h @@ -29,13 +29,16 @@ class MockAuthenticator : public Authenticator { public: MOCK_METHOD(void, doVerify, (Http::HeaderMap & headers, Tracing::Span& parent_span, - std::vector* tokens, SetPayloadCallback set_payload_cb, + std::vector* tokens, + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, AuthenticatorCallback callback)); void verify(Http::HeaderMap& headers, Tracing::Span& parent_span, - std::vector&& tokens, SetPayloadCallback set_payload_cb, + std::vector&& tokens, + SetExtractedJwtDataCallback set_extracted_jwt_data_cb, AuthenticatorCallback callback) override { - doVerify(headers, parent_span, &tokens, std::move(set_payload_cb), std::move(callback)); + doVerify(headers, parent_span, &tokens, std::move(set_extracted_jwt_data_cb), + std::move(callback)); } MOCK_METHOD(void, onDestroy, ()); @@ -43,7 +46,7 @@ class MockAuthenticator : public Authenticator { class MockVerifierCallbacks : public Verifier::Callbacks { public: - MOCK_METHOD(void, setPayload, (const ProtobufWkt::Struct& payload)); + MOCK_METHOD(void, setExtractedData, (const ProtobufWkt::Struct& payload)); MOCK_METHOD(void, onComplete, (const Status& status)); }; diff --git a/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc b/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc index bdf518ea2c7b8..31ac259c603a1 100644 --- a/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/provider_verifier_test.cc @@ -58,9 +58,41 @@ TEST_F(ProviderVerifierTest, TestOkJWT) { createVerifier(); MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, PublicKey); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload("my_payload"))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { + EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload("my_payload"))); + })); + + EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); + + auto headers = Http::TestRequestHeaderMapImpl{ + {"Authorization", "Bearer " + std::string(GoodToken)}, + {"sec-istio-auth-userinfo", ""}, + }; + context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); + verifier_->verify(context_); + EXPECT_EQ(ExpectedPayloadValue, headers.get_("sec-istio-auth-userinfo")); +} + +// Test to set the payload (hence dynamic metadata) with the header and payload extracted from the +// verified JWT. +TEST_F(ProviderVerifierTest, TestOkJWTWithExtractedHeaderAndPayload) { + TestUtility::loadFromYaml(ExampleConfig, proto_config_); + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_payload_in_metadata( + "my_payload"); + (*proto_config_.mutable_providers())[std::string(ProviderName)].set_header_in_metadata( + "my_header"); + createVerifier(); + MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, PublicKey); + + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { + // The expected payload is a merged struct of the extracted (from the JWT) payload and + // header data with "my_payload" and "my_header" as the keys. + ProtobufWkt::Struct expected_payload; + MessageUtil::loadFromJson(ExpectedPayloadAndHeaderJSON, expected_payload); + EXPECT_TRUE(TestUtility::protoEqual(payload, expected_payload)); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); @@ -80,9 +112,10 @@ TEST_F(ProviderVerifierTest, TestSpanPassedDown) { createVerifier(); MockUpstream mock_pubkey(mock_factory_ctx_.cluster_manager_, PublicKey); - EXPECT_CALL(mock_cb_, setPayload(_)).WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { - EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload("my_payload"))); - })); + EXPECT_CALL(mock_cb_, setExtractedData(_)) + .WillOnce(Invoke([](const ProtobufWkt::Struct& payload) { + EXPECT_TRUE(TestUtility::protoEqual(payload, getExpectedPayload("my_payload"))); + })); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); diff --git a/test/extensions/filters/http/jwt_authn/test_common.h b/test/extensions/filters/http/jwt_authn/test_common.h index 13c083163eb14..114ee7e5ac65f 100644 --- a/test/extensions/filters/http/jwt_authn/test_common.h +++ b/test/extensions/filters/http/jwt_authn/test_common.h @@ -193,6 +193,28 @@ const char ExpectedPayloadJSON[] = R"( } )"; +const char ExpectedHeaderJSON[] = R"( +{ + "alg": "RS256", + "typ": "JWT" +} +)"; + +const char ExpectedPayloadAndHeaderJSON[] = R"( +{ + "my_payload":{ + "iss":"https://example.com", + "exp":2001001001, + "sub":"test@example.com", + "aud":"example_service" + }, + "my_header":{ + "typ":"JWT", + "alg":"RS256" + } +} +)"; + // Token copied from https://github.com/google/jwt_verify_lib/blob/master/src/verify_jwk_ec_test.cc // Use jwt.io to modify payload as: // { From 676f84b9256a30fc3e9cd50ab0ca14b104eead4c Mon Sep 17 00:00:00 2001 From: Rohit Agrawal Date: Tue, 21 Sep 2021 11:26:23 -0400 Subject: [PATCH 081/121] lua: add two new methods to get the value of a header at certain index and get the value size of a given header (#18177) Signed-off-by: Rohit Agrawal Signed-off-by: gayang --- .../http/http_filters/lua_filter.rst | 27 ++++++++- docs/root/version_history/current.rst | 1 + .../extensions/filters/http/lua/wrappers.cc | 23 +++++++- source/extensions/filters/http/lua/wrappers.h | 17 ++++++ .../filters/http/lua/lua_integration_test.cc | 58 +++++++++++++++++-- .../filters/http/lua/wrappers_test.cc | 51 ++++++++++++++++ 6 files changed, 168 insertions(+), 9 deletions(-) diff --git a/docs/root/configuration/http/http_filters/lua_filter.rst b/docs/root/configuration/http/http_filters/lua_filter.rst index 72ec360587ca3..78777d4e9c7b6 100644 --- a/docs/root/configuration/http/http_filters/lua_filter.rst +++ b/docs/root/configuration/http/http_filters/lua_filter.rst @@ -510,8 +510,6 @@ base64Escape() Encodes the input string as base64. This can be useful for escaping binary data. -.. _config_http_filters_lua_header_wrapper: - timestamp() ^^^^^^^^^^^ @@ -523,6 +521,8 @@ High resolution timestamp function. *format* is an optional enum parameter to in *EnvoyTimestampResolution.MILLISECOND* is supported The function returns timestamp in milliseconds since epoch by default if format is not set. +.. _config_http_filters_lua_header_wrapper: + Header object API ----------------- @@ -546,6 +546,29 @@ get() Gets a header. *key* is a string that supplies the header key. Returns a string that is the header value or nil if there is no such header. +getAtIndex() +^^^^^^^^^^^^ + +.. code-block:: lua + + headers:getAtIndex(key, index) + +Gets the header value at the given index. It can be used to fetch a specific value in case the +given header has multiple values. *key* is a string that supplies the header key and index is +an integer that supplies the position. It returns a string that is the header value or nil if +there is no such header or if there is no value at the specified index. + +getNumValues() +^^^^^^^^^^^^^^ + +.. code-block:: lua + + headers:getNumValues(key) + +Gets the number of values of a given header. It can be used to fetch the total number of values in case +the given header has multiple values. *key* is a string that supplies the header key. It returns +an integer with the value size for the given header or *0* if there is no such header. + __pairs() ^^^^^^^^^ diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index ae7a634a4198c..a25f048717d6b 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -114,6 +114,7 @@ New Features * jwt_authn: added support for extracting JWTs from request cookies using :ref:`from_cookies `. * jwt_authn: added support for setting the extracted headers from a successfully verified JWT using :ref:`header_in_metadata ` to dynamic metadata. * listener: new listener metric ``downstream_cx_transport_socket_connect_timeout`` to track transport socket timeouts. +* lua: added ``header:getAtIndex()`` and ``header:getNumValues()`` methods to :ref:`header object ` for retrieving the value of a header at certain index and get the total number of values for a given header. * matcher: added :ref:`invert ` for inverting the match result in the metadata matcher. * overload: add a new overload action that resets streams using a lot of memory. To enable the tracking of allocated bytes in buffers that a stream is using we need to configure the minimum threshold for tracking via:ref:`buffer_factory_config `. We have an overload action ``Envoy::Server::OverloadActionNameValues::ResetStreams`` that takes advantage of the tracking to reset the most expensive stream first. * rbac: added :ref:`destination_port_range ` for matching range of destination ports. diff --git a/source/extensions/filters/http/lua/wrappers.cc b/source/extensions/filters/http/lua/wrappers.cc index 3f2c02a2fe5da..19a6d025ed2cb 100644 --- a/source/extensions/filters/http/lua/wrappers.cc +++ b/source/extensions/filters/http/lua/wrappers.cc @@ -41,8 +41,8 @@ int HeaderMapWrapper::luaAdd(lua_State* state) { } int HeaderMapWrapper::luaGet(lua_State* state) { - const char* key = luaL_checkstring(state, 2); - const auto value = + absl::string_view key = Filters::Common::Lua::getStringViewFromLuaString(state, 2); + const Http::HeaderUtility::GetAllOfHeaderAsStringResult value = Http::HeaderUtility::getAllOfHeaderAsString(headers_, Http::LowerCaseString(key)); if (value.result().has_value()) { lua_pushlstring(state, value.result().value().data(), value.result().value().length()); @@ -52,6 +52,25 @@ int HeaderMapWrapper::luaGet(lua_State* state) { } } +int HeaderMapWrapper::luaGetAtIndex(lua_State* state) { + absl::string_view key = Filters::Common::Lua::getStringViewFromLuaString(state, 2); + const int index = luaL_checknumber(state, 3); + const Http::HeaderMap::GetResult header_value = headers_.get(Http::LowerCaseString(key)); + if (index >= 0 && header_value.size() > static_cast(index)) { + absl::string_view value = header_value[index]->value().getStringView(); + lua_pushlstring(state, value.data(), value.length()); + return 1; + } + return 0; +} + +int HeaderMapWrapper::luaGetNumValues(lua_State* state) { + absl::string_view key = Filters::Common::Lua::getStringViewFromLuaString(state, 2); + const Http::HeaderMap::GetResult header_value = headers_.get(Http::LowerCaseString(key)); + lua_pushnumber(state, header_value.size()); + return 1; +} + int HeaderMapWrapper::luaPairs(lua_State* state) { if (iterator_.get() != nullptr) { luaL_error(state, "cannot create a second iterator before completing the first"); diff --git a/source/extensions/filters/http/lua/wrappers.h b/source/extensions/filters/http/lua/wrappers.h index e04ca24f47392..dad5db7ac3c37 100644 --- a/source/extensions/filters/http/lua/wrappers.h +++ b/source/extensions/filters/http/lua/wrappers.h @@ -47,6 +47,8 @@ class HeaderMapWrapper : public Filters::Common::Lua::BaseLuaObjectstartRequest(request_headers); Http::StreamEncoder& encoder = encoder_decoder.first; @@ -320,6 +333,41 @@ name: lua encoder.encodeData(request_data2, true); waitForNextUpstreamRequest(); + EXPECT_EQ("foo", upstream_request_->headers() + .get(Http::LowerCaseString("test_header_value_0"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("bar", upstream_request_->headers() + .get(Http::LowerCaseString("test_header_value_1"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("nil_value", upstream_request_->headers() + .get(Http::LowerCaseString("test_header_value_2"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("2", upstream_request_->headers() + .get(Http::LowerCaseString("test_header_value_size"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("foo;bar;", upstream_request_->headers() + .get(Http::LowerCaseString("cookie_0"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("1,3;2,5;", upstream_request_->headers() + .get(Http::LowerCaseString("cookie_1"))[0] + ->value() + .getStringView()); + + EXPECT_EQ("2", upstream_request_->headers() + .get(Http::LowerCaseString("cookie_size"))[0] + ->value() + .getStringView()); + EXPECT_EQ("10", upstream_request_->headers() .get(Http::LowerCaseString("request_body_size"))[0] ->value() diff --git a/test/extensions/filters/http/lua/wrappers_test.cc b/test/extensions/filters/http/lua/wrappers_test.cc index 546073baa67ed..e4ffb8e8cda03 100644 --- a/test/extensions/filters/http/lua/wrappers_test.cc +++ b/test/extensions/filters/http/lua/wrappers_test.cc @@ -68,6 +68,57 @@ TEST_F(LuaHeaderMapWrapperTest, Methods) { start("callMe"); } +// Get the total number of values for a certain header with multiple values. +TEST_F(LuaHeaderMapWrapperTest, GetNumValues) { + const std::string SCRIPT{R"EOF( + function callMe(object) + testPrint(object:getNumValues("X-Test")) + testPrint(object:getNumValues(":path")) + testPrint(object:getNumValues("foobar")) + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl headers{{":path", "/"}, {"x-test", "foo"}, {"x-test", "bar"}}; + HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); + EXPECT_CALL(printer_, testPrint("2")); + EXPECT_CALL(printer_, testPrint("1")); + EXPECT_CALL(printer_, testPrint("0")); + start("callMe"); +} + +// Get the value on a certain index for a header with multiple values. +TEST_F(LuaHeaderMapWrapperTest, GetAtIndex) { + const std::string SCRIPT{R"EOF( + function callMe(object) + if object:getAtIndex("x-test", -1) == nil then + testPrint("invalid_negative_index") + end + testPrint(object:getAtIndex("X-Test", 0)) + testPrint(object:getAtIndex("x-test", 1)) + testPrint(object:getAtIndex("x-test", 2)) + if object:getAtIndex("x-test", 3) == nil then + testPrint("nil_value") + end + end + )EOF"}; + + InSequence s; + setup(SCRIPT); + + Http::TestRequestHeaderMapImpl headers{ + {":path", "/"}, {"x-test", "foo"}, {"x-test", "bar"}, {"x-test", ""}}; + HeaderMapWrapper::create(coroutine_->luaState(), headers, []() { return true; }); + EXPECT_CALL(printer_, testPrint("invalid_negative_index")); + EXPECT_CALL(printer_, testPrint("foo")); + EXPECT_CALL(printer_, testPrint("bar")); + EXPECT_CALL(printer_, testPrint("")); + EXPECT_CALL(printer_, testPrint("nil_value")); + start("callMe"); +} + // Test modifiable methods. TEST_F(LuaHeaderMapWrapperTest, ModifiableMethods) { const std::string SCRIPT{R"EOF( From 3eb22b58853e8a3f684de918991d105decbe7b31 Mon Sep 17 00:00:00 2001 From: Rohit Agrawal Date: Tue, 21 Sep 2021 11:34:06 -0400 Subject: [PATCH 082/121] filter: set additional termination details and response flag for auth_digest_no_match in client_ssl_auth (#18180) Fixes #17847 Signed-off-by: Rohit Agrawal Signed-off-by: gayang --- docs/root/version_history/current.rst | 1 + .../filters/network/client_ssl_auth/client_ssl_auth.cc | 5 +++++ .../filters/network/client_ssl_auth/client_ssl_auth_test.cc | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index a25f048717d6b..fb71478c267c1 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -32,6 +32,7 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* +* client_ssl_auth filter: now sets additional termination details and **UAEX** response flag when the client certificate is not in the allowed-list. * config: configuration files ending in .yml now load as YAML. * config: configuration file extensions now ignore case when deciding the file type. E.g., .JSON file load as JSON. * config: reduced log level for "Unable to establish new stream" xDS logs to debug. The log level diff --git a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc index 669cc315ed3e0..098add77b56a4 100644 --- a/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc +++ b/source/extensions/filters/network/client_ssl_auth/client_ssl_auth.cc @@ -22,6 +22,8 @@ namespace Extensions { namespace NetworkFilters { namespace ClientSslAuth { +constexpr absl::string_view AuthDigestNoMatch = "auth_digest_no_match"; + ClientSslAuthConfig::ClientSslAuthConfig( const envoy::extensions::filters::network::client_ssl_auth::v3::ClientSSLAuth& config, ThreadLocal::SlotAllocator& tls, Upstream::ClusterManager& cm, Event::Dispatcher& dispatcher, @@ -121,6 +123,9 @@ void ClientSslAuthFilter::onEvent(Network::ConnectionEvent event) { if (!config_->allowedPrincipals().allowed( read_callbacks_->connection().ssl()->sha256PeerCertificateDigest())) { + read_callbacks_->connection().streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::UpstreamProtocolError); + read_callbacks_->connection().streamInfo().setResponseCodeDetails(AuthDigestNoMatch); config_->stats().auth_digest_no_match_.inc(); read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); return; diff --git a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc index a301eee1665e2..d58c361e40ac1 100644 --- a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc @@ -163,6 +163,10 @@ TEST_F(ClientSslAuthFilterTest, Ssl) { std::make_shared("192.168.1.1")); std::string expected_sha_1("digest"); EXPECT_CALL(*ssl_, sha256PeerCertificateDigest()).WillOnce(ReturnRef(expected_sha_1)); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::UpstreamProtocolError)); + EXPECT_CALL(filter_callbacks_.connection_.stream_info_, + setResponseCodeDetails("auth_digest_no_match")); EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_EQ(Network::FilterStatus::StopIteration, instance_->onNewConnection()); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::Connected); From 428285c34f663ee2ab339d709fb5a520c13c87c7 Mon Sep 17 00:00:00 2001 From: tyxia <72890320+tyxia@users.noreply.github.com> Date: Tue, 21 Sep 2021 13:16:36 -0400 Subject: [PATCH 083/121] Add support to allow routing to the weighted cluster specified in the request_header (#17816) Signed-off-by: Tianyu Xia Signed-off-by: gayang --- docs/root/version_history/current.rst | 1 + source/common/router/config_impl.cc | 82 +++++++-- source/common/router/config_impl.h | 8 +- test/common/router/config_impl_test.cc | 33 ++++ .../redis/redis_cluster_integration_test.cc | 2 +- .../redis_proxy_integration_test.cc | 2 +- .../dns_filter/dns_filter_integration_test.cc | 2 +- .../udp_proxy/udp_proxy_integration_test.cc | 2 +- .../host/previous_hosts/integration_test.cc | 2 +- test/integration/BUILD | 13 ++ test/integration/base_integration_test.cc | 8 +- test/integration/base_integration_test.h | 8 +- test/integration/filters/BUILD | 19 ++ .../filters/repick_cluster_filter.cc | 48 +++++ .../filters/repick_cluster_filter.h | 15 ++ test/integration/http_integration.cc | 21 ++- test/integration/http_integration.h | 8 +- .../load_stats_integration_test.cc | 2 +- test/integration/server.cc | 33 ++-- test/integration/server.h | 14 +- .../weighted_cluster_integration_test.cc | 165 ++++++++++++++++++ test/mocks/common.cc | 5 + test/mocks/common.h | 2 + tools/spelling/spelling_dictionary.txt | 1 + 24 files changed, 435 insertions(+), 61 deletions(-) create mode 100644 test/integration/filters/repick_cluster_filter.cc create mode 100644 test/integration/filters/repick_cluster_filter.h create mode 100644 test/integration/weighted_cluster_integration_test.cc diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index fb71478c267c1..681b5b39f2625 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -105,6 +105,7 @@ New Features * contrib: added new :ref:`contrib images ` which contain contrib extensions. * dns: added :ref:`V4_PREFERRED ` option to return V6 addresses only if V4 addresses are not available. * grpc reverse bridge: added a new :ref:`option ` to support streaming response bodies when withholding gRPC frames from the upstream. +* http: added cluster_header in :ref:`weighted_clusters ` to allow routing to the weighted cluster specified in the request_header. * http: added :ref:`alternate_protocols_cache_options ` for enabling HTTP/3 connections to servers which advertise HTTP/3 support via `HTTP Alternative Services `_. * http: added :ref:`string_match ` in the header matcher. * http: added :ref:`x-envoy-upstream-stream-duration-ms ` that allows configuring the max stream duration via a request header. diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index e0f9f4245521f..d756c607966a3 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -983,6 +983,23 @@ const RouteEntry* RouteEntryImplBase::routeEntry() const { } } +RouteConstSharedPtr +RouteEntryImplBase::pickClusterViaClusterHeader(const Http::LowerCaseString& cluster_header_name, + const Http::HeaderMap& headers) const { + const auto entry = headers.get(cluster_header_name); + std::string final_cluster_name; + if (!entry.empty()) { + // This is an implicitly untrusted header, so per the API documentation only + // the first value is used. + final_cluster_name = std::string(entry[0]->value().getStringView()); + } + + // NOTE: Though we return a shared_ptr here, the current ownership model + // assumes that the route table sticks around. See snapped_route_config_ in + // ConnectionManagerImpl::ActiveStream. + return std::make_shared(this, final_cluster_name); +} + RouteConstSharedPtr RouteEntryImplBase::clusterEntry(const Http::HeaderMap& headers, uint64_t random_value) const { // Gets the route object chosen from the list of weighted clusters @@ -992,23 +1009,42 @@ RouteConstSharedPtr RouteEntryImplBase::clusterEntry(const Http::HeaderMap& head return shared_from_this(); } else { ASSERT(!cluster_header_name_.get().empty()); - const auto entry = headers.get(cluster_header_name_); - std::string final_cluster_name; - if (!entry.empty()) { - // This is an implicitly untrusted header, so per the API documentation only the first - // value is used. - final_cluster_name = std::string(entry[0]->value().getStringView()); - } + return pickClusterViaClusterHeader(cluster_header_name_, headers); + } + } + return pickWeightedCluster(headers, random_value, true); +} + +RouteConstSharedPtr RouteEntryImplBase::pickWeightedCluster(const Http::HeaderMap& headers, + const uint64_t random_value, + const bool ignore_overflow) const { + const uint64_t selected_value = random_value % total_cluster_weight_; + uint64_t begin = 0; + uint64_t end = 0; + + // Find the right cluster to route to based on the interval in which + // the selected value falls. The intervals are determined as + // [0, cluster1_weight), [cluster1_weight, cluster1_weight+cluster2_weight),.. + for (const WeightedClusterEntrySharedPtr& cluster : weighted_clusters_) { + end = begin + cluster->clusterWeight(); + if (!ignore_overflow) { + // end > total_cluster_weight: This case can only occur with Runtimes, + // when the user specifies invalid weights such that + // sum(weights) > total_cluster_weight. + ASSERT(end <= total_cluster_weight_); + } - // NOTE: Though we return a shared_ptr here, the current ownership model assumes that - // the route table sticks around. See snapped_route_config_ in - // ConnectionManagerImpl::ActiveStream. - return std::make_shared(this, final_cluster_name); + if (selected_value >= begin && selected_value < end) { + if (!cluster->clusterHeaderName().get().empty() && + !headers.get(cluster->clusterHeaderName()).empty()) { + return pickClusterViaClusterHeader(cluster->clusterHeaderName(), headers); + } + return cluster; } + begin = end; } - return WeightedClusterUtil::pickCluster(weighted_clusters_, total_cluster_weight_, random_value, - true); + NOT_REACHED_GCOVR_EXCL_LINE; } void RouteEntryImplBase::validateClusters( @@ -1029,9 +1065,17 @@ void RouteEntryImplBase::validateClusters( } } else if (!weighted_clusters_.empty()) { for (const WeightedClusterEntrySharedPtr& cluster : weighted_clusters_) { - if (!cluster_info_maps.hasCluster(cluster->clusterName())) { - throw EnvoyException( - fmt::format("route: unknown weighted cluster '{}'", cluster->clusterName())); + if (!cluster->clusterName().empty()) { + if (!cluster_info_maps.hasCluster(cluster->clusterName())) { + throw EnvoyException( + fmt::format("route: unknown weighted cluster '{}'", cluster->clusterName())); + } + } + // For weighted clusters with `cluster_header_name`, we only verify that this field is + // not empty because the cluster name is not set yet at config time (hence the validation + // here). + else if (cluster->clusterHeaderName().get().empty()) { + throw EnvoyException("route: unknown weighted cluster with no cluster_header field"); } } } @@ -1378,9 +1422,9 @@ RouteMatcher::RouteMatcher(const envoy::config::route::v3::RouteConfiguration& r validation_clusters = factory_context.clusterManager().clusters(); } for (const auto& virtual_host_config : route_config.virtual_hosts()) { - VirtualHostSharedPtr virtual_host( - new VirtualHostImpl(virtual_host_config, optional_http_filters, global_route_config, - factory_context, *vhost_scope_, validator, validation_clusters)); + VirtualHostSharedPtr virtual_host = std::make_shared( + virtual_host_config, optional_http_filters, global_route_config, factory_context, + *vhost_scope_, validator, validation_clusters); for (const std::string& domain_name : virtual_host_config.domains()) { const std::string domain = Http::LowerCaseString(domain_name).get(); bool duplicate_found = false; diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 6688b14a548cd..659861f3b11c9 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -819,7 +819,7 @@ class RouteEntryImplBase : public RouteEntry, const std::string& filter_name, std::function cb) const override; - const Http::LowerCaseString& clusterHeaderName() { return cluster_header_name_; } + const Http::LowerCaseString& clusterHeaderName() const { return cluster_header_name_; } private: const std::string runtime_key_; @@ -863,6 +863,12 @@ class RouteEntryImplBase : public RouteEntry, ProtobufMessage::ValidationVisitor& validator, absl::string_view current_route_name) const; + RouteConstSharedPtr pickClusterViaClusterHeader(const Http::LowerCaseString& cluster_header_name, + const Http::HeaderMap& headers) const; + + RouteConstSharedPtr pickWeightedCluster(const Http::HeaderMap& headers, uint64_t random_value, + bool ignore_overflow) const; + // Default timeout is 15s if nothing is specified in the route config. static const uint64_t DEFAULT_ROUTE_TIMEOUT_MS = 15000; diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 9961f532b698c..57a82d958a388 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -2891,6 +2891,39 @@ TEST_F(RouteMatcherTest, ClusterHeader) { } } +TEST_F(RouteMatcherTest, WeightedClusterHeader) { + const std::string yaml = R"EOF( + virtual_hosts: + - name: www1 + domains: ["www1.lyft.com"] + routes: + - match: { prefix: "/" } + route: + weighted_clusters: + total_weight: 100 + clusters: + - cluster_header: some_header + weight: 30 + - name: cluster1 + weight: 30 + - name: cluster2 + weight: 40 + )EOF"; + + factory_context_.cluster_manager_.initializeClusters({"some_header", "cluster1", "cluster2"}, {}); + TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); + + Http::TestRequestHeaderMapImpl headers = genHeaders("www1.lyft.com", "/foo", "GET"); + // The configured cluster header isn't present in the request headers, therefore cluster selection + // fails and we get the empty string + EXPECT_EQ("", config.route(headers, 115)->routeEntry()->clusterName()); + // Modify the header mapping. + headers.addCopy("some_header", "some_cluster"); + EXPECT_EQ("some_cluster", config.route(headers, 115)->routeEntry()->clusterName()); + EXPECT_EQ("cluster1", config.route(headers, 445)->routeEntry()->clusterName()); + EXPECT_EQ("cluster2", config.route(headers, 560)->routeEntry()->clusterName()); +} + TEST_F(RouteMatcherTest, ContentType) { const std::string yaml = R"EOF( virtual_hosts: diff --git a/test/extensions/clusters/redis/redis_cluster_integration_test.cc b/test/extensions/clusters/redis/redis_cluster_integration_test.cc index 960c8d1f9338c..346d47b0c3446 100644 --- a/test/extensions/clusters/redis/redis_cluster_integration_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_integration_test.cc @@ -149,7 +149,7 @@ class RedisClusterIntegrationTest : public testing::TestWithParam 1) { - setDeterministic(); + setDeterministicValue(); setUpstreamCount(upstream_count); config_helper_.addConfigModifier( [upstream_count](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc index a2a3231e37c0b..f2c4dd185a06d 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_integration_test.cc @@ -20,7 +20,7 @@ class UdpProxyIntegrationTest : public testing::TestWithParam 1) { - setDeterministic(); + setDeterministicValue(); setUpstreamCount(upstream_count); config_helper_.addConfigModifier( [upstream_count](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { diff --git a/test/extensions/retry/host/previous_hosts/integration_test.cc b/test/extensions/retry/host/previous_hosts/integration_test.cc index a433ad4e224b6..a05dfe82e3a7c 100644 --- a/test/extensions/retry/host/previous_hosts/integration_test.cc +++ b/test/extensions/retry/host/previous_hosts/integration_test.cc @@ -17,7 +17,7 @@ class PrevioustHostsIntegrationTest : public testing::Test, public HttpIntegrati : HttpIntegrationTest(Http::CodecType::HTTP2, Network::Address::IpVersion::v4) {} void initialize() override { - setDeterministic(); + setDeterministicValue(); // Add the retry configuration to a new virtual host. const auto vhost_config = R"EOF( diff --git a/test/integration/BUILD b/test/integration/BUILD index 08e2cf881426d..060cda5cdb94c 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -1836,3 +1836,16 @@ envoy_cc_test( "@envoy_api//envoy/extensions/http/original_ip_detection/custom_header/v3:pkg_cc_proto", ], ) + +envoy_cc_test( + name = "weighted_cluster_integration_test", + srcs = ["weighted_cluster_integration_test.cc"], + deps = [ + ":http_integration_lib", + ":integration_lib", + "//test/integration/filters:repick_cluster_filter_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) diff --git a/test/integration/base_integration_test.cc b/test/integration/base_integration_test.cc index 5b870e5e5cf47..3caf9f43b3ec9 100644 --- a/test/integration/base_integration_test.cc +++ b/test/integration/base_integration_test.cc @@ -332,10 +332,12 @@ std::string getListenerDetails(Envoy::Server::Instance& server) { void BaseIntegrationTest::createGeneratedApiTestServer( const std::string& bootstrap_path, const std::vector& port_names, Server::FieldValidationConfig validator_config, bool allow_lds_rejection) { + test_server_ = IntegrationTestServer::create( - bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, deterministic_, - timeSystem(), *api_, defer_listener_finalization_, process_object_, validator_config, - concurrency_, drain_time_, drain_strategy_, proxy_buffer_factory_, use_real_stats_); + bootstrap_path, version_, on_server_ready_function_, on_server_init_function_, + deterministic_value_, timeSystem(), *api_, defer_listener_finalization_, process_object_, + validator_config, concurrency_, drain_time_, drain_strategy_, proxy_buffer_factory_, + use_real_stats_); if (config_helper_.bootstrap().static_resources().listeners_size() > 0 && !defer_listener_finalization_) { diff --git a/test/integration/base_integration_test.h b/test/integration/base_integration_test.h index d629b0572ff35..6b7039c34da28 100644 --- a/test/integration/base_integration_test.h +++ b/test/integration/base_integration_test.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -69,7 +70,7 @@ class BaseIntegrationTest : protected Logger::Loggable { // configuration generated in ConfigHelper::finalize. void skipPortUsageValidation() { config_helper_.skipPortUsageValidation(); } // Make test more deterministic by using a fixed RNG value. - void setDeterministic() { deterministic_ = true; } + void setDeterministicValue(uint64_t value = 0) { deterministic_value_ = value; } Http::CodecType upstreamProtocol() const { return upstream_config_.upstream_protocol_; } @@ -415,8 +416,9 @@ class BaseIntegrationTest : protected Logger::Loggable { // This does nothing if autonomous_upstream_ is false bool autonomous_allow_incomplete_streams_{false}; - // True if test will use a fixed RNG value. - bool deterministic_{}; + // If this member is not empty, the test will use a fixed RNG value specified + // by it. + absl::optional deterministic_value_{}; // Set true when your test will itself take care of ensuring listeners are up, and registering // them in the port_map_. diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index c8e796b6c89a0..79e08f2533a4b 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -599,3 +599,22 @@ envoy_cc_test_library( "//test/extensions/filters/http/common:empty_http_filter_config_lib", ], ) + +envoy_cc_test_library( + name = "repick_cluster_filter_lib", + srcs = [ + "repick_cluster_filter.cc", + ], + hdrs = [ + "repick_cluster_filter.h", + ], + deps = [ + ":common_lib", + "//envoy/http:filter_interface", + "//envoy/registry", + "//envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + "@com_google_absl//absl/strings:str_format", + ], +) diff --git a/test/integration/filters/repick_cluster_filter.cc b/test/integration/filters/repick_cluster_filter.cc new file mode 100644 index 0000000000000..40b5e25bf88c6 --- /dev/null +++ b/test/integration/filters/repick_cluster_filter.cc @@ -0,0 +1,48 @@ +#include "test/integration/filters/repick_cluster_filter.h" + +#include + +#include "envoy/http/filter.h" +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "source/extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" + +#include "absl/strings/str_format.h" + +namespace Envoy { +namespace RepickClusterFilter { + +// A test filter that modifies the request header (i.e. map the cluster header +// to cluster name), clear the route cache. +class RepickClusterFilter : public Http::PassThroughFilter { +public: + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& request_header, bool) override { + request_header.addCopy(Envoy::Http::LowerCaseString(ClusterHeaderName), ClusterName); + decoder_callbacks_->clearRouteCache(); + return Http::FilterHeadersStatus::Continue; + } +}; + +class RepickClusterFilterConfig : public Extensions::HttpFilters::Common::EmptyHttpFilterConfig { +public: + RepickClusterFilterConfig() : EmptyHttpFilterConfig("repick-cluster-filter") {} + + Http::FilterFactoryCb createFilter(const std::string&, + Server::Configuration::FactoryContext&) override { + return [](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamFilter( + std::make_shared<::Envoy::RepickClusterFilter::RepickClusterFilter>()); + }; + } +}; + +// Perform static registration +static Registry::RegisterFactory + register_; + +} // namespace RepickClusterFilter +} // namespace Envoy diff --git a/test/integration/filters/repick_cluster_filter.h b/test/integration/filters/repick_cluster_filter.h new file mode 100644 index 0000000000000..e0e5741572581 --- /dev/null +++ b/test/integration/filters/repick_cluster_filter.h @@ -0,0 +1,15 @@ +#pragma once + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace RepickClusterFilter { + +// The cluster names and cluster headers here need to be correlated correctly with +// weighted_cluster_integration_test or any other end users, to make sure that header modifications +// have been done on the correct target. So they are declared and defined here. +inline constexpr absl::string_view ClusterName = "cluster_1"; +inline constexpr absl::string_view ClusterHeaderName = "cluster_header_1"; + +} // namespace RepickClusterFilter +} // namespace Envoy diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index dc63faba92e39..771e2ac186ee5 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -382,8 +382,8 @@ ConfigHelper::ConfigModifierFunction HttpIntegrationTest::setEnableUpstreamTrail IntegrationStreamDecoderPtr HttpIntegrationTest::sendRequestAndWaitForResponse( const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size, - const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_size, - int upstream_index, std::chrono::milliseconds timeout) { + const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_body_size, + const std::vector& upstream_indices, std::chrono::milliseconds timeout) { ASSERT(codec_client_ != nullptr); // Send the request to Envoy. IntegrationStreamDecoderPtr response; @@ -392,18 +392,27 @@ IntegrationStreamDecoderPtr HttpIntegrationTest::sendRequestAndWaitForResponse( } else { response = codec_client_->makeHeaderOnlyRequest(request_headers); } - waitForNextUpstreamRequest(upstream_index, timeout); + waitForNextUpstreamRequest(upstream_indices, timeout); // Send response headers, and end_stream if there is no response body. - upstream_request_->encodeHeaders(response_headers, response_size == 0); + upstream_request_->encodeHeaders(response_headers, response_body_size == 0); // Send any response data, with end_stream true. - if (response_size) { - upstream_request_->encodeData(response_size, true); + if (response_body_size) { + upstream_request_->encodeData(response_body_size, true); } // Wait for the response to be read by the codec client. RELEASE_ASSERT(response->waitForEndStream(timeout), "unexpected timeout"); return response; } +IntegrationStreamDecoderPtr HttpIntegrationTest::sendRequestAndWaitForResponse( + const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size, + const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_body_size, + uint64_t upstream_index, std::chrono::milliseconds timeout) { + return sendRequestAndWaitForResponse(request_headers, request_body_size, response_headers, + response_body_size, std::vector{upstream_index}, + timeout); +} + void HttpIntegrationTest::cleanupUpstreamAndDownstream() { // Close the upstream connection first. If there's an outstanding request, // closing the client may result in a FIN being sent upstream, and FakeConnectionBase::close diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 68b61321a82bf..d6ba99bf7a02d 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -147,7 +147,13 @@ class HttpIntegrationTest : public BaseIntegrationTest { IntegrationStreamDecoderPtr sendRequestAndWaitForResponse( const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size, const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_body_size, - int upstream_index = 0, std::chrono::milliseconds time = TestUtility::DefaultTimeout); + uint64_t upstream_index = 0, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + + IntegrationStreamDecoderPtr sendRequestAndWaitForResponse( + const Http::TestRequestHeaderMapImpl& request_headers, uint32_t request_body_size, + const Http::TestResponseHeaderMapImpl& response_headers, uint32_t response_body_size, + const std::vector& upstream_indices, + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); // Wait for the end of stream on the next upstream stream on any of the provided fake upstreams. // Sets fake_upstream_connection_ to the connection and upstream_request_ to stream. diff --git a/test/integration/load_stats_integration_test.cc b/test/integration/load_stats_integration_test.cc index 213622cc8cf54..44af79d2321ba 100644 --- a/test/integration/load_stats_integration_test.cc +++ b/test/integration/load_stats_integration_test.cc @@ -23,7 +23,7 @@ class LoadStatsIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, LoadStatsIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, ipVersion()) { // We rely on some fairly specific load balancing picks in this test, so // determinize the schedule. - setDeterministic(); + setDeterministicValue(); } void addEndpoint(envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoints, diff --git a/test/integration/server.cc b/test/integration/server.cc index 451c951d20e3a..2bdeff2b63fd5 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -55,7 +55,7 @@ OptionsImpl createTestOptionsImpl(const std::string& config_path, const std::str IntegrationTestServerPtr IntegrationTestServer::create( const std::string& config_path, const Network::Address::IpVersion version, std::function server_ready_function, - std::function on_server_init_function, bool deterministic, + std::function on_server_init_function, absl::optional deterministic_value, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, @@ -65,7 +65,7 @@ IntegrationTestServerPtr IntegrationTestServer::create( if (server_ready_function != nullptr) { server->setOnServerReadyCb(server_ready_function); } - server->start(version, on_server_init_function, deterministic, defer_listener_finalization, + server->start(version, on_server_init_function, deterministic_value, defer_listener_finalization, process_object, validation_config, concurrency, drain_time, drain_strategy, watermark_factory); return server; @@ -95,21 +95,19 @@ void IntegrationTestServer::unsetDynamicContextParam(absl::string_view resource_ }); } -void IntegrationTestServer::start(const Network::Address::IpVersion version, - std::function on_server_init_function, bool deterministic, - bool defer_listener_finalization, - ProcessObjectOptRef process_object, - Server::FieldValidationConfig validator_config, - uint32_t concurrency, std::chrono::seconds drain_time, - Server::DrainStrategy drain_strategy, - Buffer::WatermarkFactorySharedPtr watermark_factory) { +void IntegrationTestServer::start( + const Network::Address::IpVersion version, std::function on_server_init_function, + absl::optional deterministic_value, bool defer_listener_finalization, + ProcessObjectOptRef process_object, Server::FieldValidationConfig validator_config, + uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, + Buffer::WatermarkFactorySharedPtr watermark_factory) { ENVOY_LOG(info, "starting integration test server"); ASSERT(!thread_); - thread_ = api_.threadFactory().createThread([version, deterministic, process_object, + thread_ = api_.threadFactory().createThread([version, deterministic_value, process_object, validator_config, concurrency, drain_time, drain_strategy, watermark_factory, this]() -> void { - threadRoutine(version, deterministic, process_object, validator_config, concurrency, drain_time, - drain_strategy, watermark_factory); + threadRoutine(version, deterministic_value, process_object, validator_config, concurrency, + drain_time, drain_strategy, watermark_factory); }); // If any steps need to be done prior to workers starting, do them now. E.g., xDS pre-init. @@ -183,7 +181,8 @@ void IntegrationTestServer::serverReady() { } void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion version, - bool deterministic, ProcessObjectOptRef process_object, + absl::optional deterministic_value, + ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, @@ -193,11 +192,13 @@ void IntegrationTestServer::threadRoutine(const Network::Address::IpVersion vers Thread::MutexBasicLockable lock; Random::RandomGeneratorPtr random_generator; - if (deterministic) { - random_generator = std::make_unique>(); + if (deterministic_value.has_value()) { + random_generator = std::make_unique>( + deterministic_value.value()); } else { random_generator = std::make_unique(); } + createAndRunEnvoyServer(options, time_system_, Network::Utility::getLocalAddress(version), *this, lock, *this, std::move(random_generator), process_object, watermark_factory); diff --git a/test/integration/server.h b/test/integration/server.h index 8ad4c633d6892..5144f2203646e 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -415,7 +415,7 @@ class IntegrationTestServer : public Logger::Loggable, static IntegrationTestServerPtr create( const std::string& config_path, const Network::Address::IpVersion version, std::function on_server_ready_function, - std::function on_server_init_function, bool deterministic, + std::function on_server_init_function, absl::optional deterministic_value, Event::TestTimeSystem& time_system, Api::Api& api, bool defer_listener_finalization = false, ProcessObjectOptRef process_object = absl::nullopt, Server::FieldValidationConfig validation_config = Server::FieldValidationConfig(), @@ -445,10 +445,11 @@ class IntegrationTestServer : public Logger::Loggable, void onWorkersStarted() override {} void start(const Network::Address::IpVersion version, - std::function on_server_init_function, bool deterministic, - bool defer_listener_finalization, ProcessObjectOptRef process_object, - Server::FieldValidationConfig validation_config, uint32_t concurrency, - std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, + std::function on_server_init_function, + absl::optional deterministic_value, bool defer_listener_finalization, + ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, + uint32_t concurrency, std::chrono::seconds drain_time, + Server::DrainStrategy drain_strategy, Buffer::WatermarkFactorySharedPtr watermark_factory); void waitForCounterEq(const std::string& name, uint64_t value, @@ -553,7 +554,8 @@ class IntegrationTestServer : public Logger::Loggable, /** * Runs the real server on a thread. */ - void threadRoutine(const Network::Address::IpVersion version, bool deterministic, + void threadRoutine(const Network::Address::IpVersion version, + absl::optional deterministic_value, ProcessObjectOptRef process_object, Server::FieldValidationConfig validation_config, uint32_t concurrency, std::chrono::seconds drain_time, Server::DrainStrategy drain_strategy, diff --git a/test/integration/weighted_cluster_integration_test.cc b/test/integration/weighted_cluster_integration_test.cc new file mode 100644 index 0000000000000..e43d784419b70 --- /dev/null +++ b/test/integration/weighted_cluster_integration_test.cc @@ -0,0 +1,165 @@ +#include +#include +#include + +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" +#include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" + +#include "test/integration/filters/repick_cluster_filter.h" +#include "test/integration/http_integration.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { + +class WeightedClusterIntegrationTest : public testing::Test, public HttpIntegrationTest { +public: + WeightedClusterIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, Network::Address::IpVersion::v6) {} + + void createUpstreams() override { + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); + // Add two fake upstreams + for (int i = 0; i < 2; ++i) { + addFakeUpstream(FakeHttpConnection::Type::HTTP2); + } + } + + void initializeConfig(const std::vector& weights) { + // Set the cluster configuration for `cluster_1` + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* cluster = bootstrap.mutable_static_resources()->add_clusters(); + cluster->MergeFrom(bootstrap.static_resources().clusters()[0]); + cluster->set_name(std::string(Envoy::RepickClusterFilter::ClusterName)); + ConfigHelper::setHttp2(*cluster); + }); + + // Add the custom filter. + config_helper_.addFilter("name: repick-cluster-filter"); + + // Modify route with weighted cluster configuration. + config_helper_.addConfigModifier( + [&weights]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* weighted_clusters = hcm.mutable_route_config() + ->mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->mutable_weighted_clusters(); + + // Add a cluster with `name` specified. + auto* cluster = weighted_clusters->add_clusters(); + cluster->set_name("cluster_0"); + cluster->mutable_weight()->set_value(weights[0]); + + // Add a cluster with `cluster_header` specified. + cluster = weighted_clusters->add_clusters(); + cluster->set_cluster_header(std::string(Envoy::RepickClusterFilter::ClusterHeaderName)); + cluster->mutable_weight()->set_value(weights[1]); + + weighted_clusters->mutable_total_weight()->set_value( + std::accumulate(weights.begin(), weights.end(), 0UL)); + }); + + HttpIntegrationTest::initialize(); + } + + const std::vector& getDefaultWeights() { return default_weights_; } + + void sendRequestAndValidateResponse(const std::vector& upstream_indices) { + // Create a client aimed at Envoy’s default HTTP port. + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + + // Create some request headers. + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}; + + // Send the request headers from the client, wait until they are received + // upstream. When they are received, send the default response headers from + // upstream and wait until they are received at by client. + IntegrationStreamDecoderPtr response = sendRequestAndWaitForResponse( + request_headers, 0, default_response_headers_, 0, upstream_indices); + + // Verify the proxied request was received upstream, as expected. + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + // Verify the proxied response was received downstream, as expected. + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_EQ(0U, response->body().size()); + + // Perform the clean-up. + cleanupUpstreamAndDownstream(); + } + +private: + std::vector default_weights_ = {20, 30}; +}; + +// Steer the traffic (i.e. send the request) to the weighted cluster with `name` specified. +TEST_F(WeightedClusterIntegrationTest, SteerTrafficToOneClusterWithName) { + setDeterministicValue(); + initializeConfig(getDefaultWeights()); + + // The expected destination cluster upstream is index 0 since the selected + // value is set to 0 indirectly via `setDeterministicValue()` above to set the weight to 0. + sendRequestAndValidateResponse({0}); + + // Check that the expected upstream cluster has incoming request. + EXPECT_EQ(test_server_->counter("cluster.cluster_0.upstream_cx_total")->value(), 1); +} + +// Steer the traffic (i.e. send the request) to the weighted cluster with `cluster_header` +// specified. +TEST_F(WeightedClusterIntegrationTest, SteerTrafficToOneClusterWithHeader) { + const std::vector& default_weights = getDefaultWeights(); + + // The index of the cluster with `cluster_header` specified is 1. + int cluster_header_index = 1; + // Set the deterministic value to the accumulation of the weights of all clusters with + // `name`, so we can route the traffic to the first cluster with `cluster_header` based on + // weighted cluster selection algorithm in `RouteEntryImplBase::pickWeightedCluster()`. + uint64_t deterministric_value = + std::accumulate(default_weights.begin(), default_weights.begin() + cluster_header_index, 0UL); + setDeterministicValue(deterministric_value); + + initializeConfig(default_weights); + + sendRequestAndValidateResponse({static_cast(cluster_header_index)}); + + // Check that the expected upstream cluster has incoming request. + std::string target_name = + absl::StrFormat("cluster.cluster_%d.upstream_cx_total", cluster_header_index); + EXPECT_EQ(test_server_->counter(target_name)->value(), 1); +} + +// Steer the traffic (i.e. send the request) to the weighted clusters randomly based on weight. +TEST_F(WeightedClusterIntegrationTest, SplitTrafficRandomly) { + std::vector weights = {50, 50}; + int upstream_count = weights.size(); + initializeConfig(weights); + + std::vector upstream_indices(upstream_count); + std::iota(std::begin(upstream_indices), std::end(upstream_indices), 0); + int request_num = 20; + for (int i = 0; i < request_num; ++i) { + // The expected destination cluster upstream is randomly selected based on + // weight, so all the upstreams needs to be available for selection. + sendRequestAndValidateResponse(upstream_indices); + } + + std::string target_name; + // Check that all the upstream clusters have been routed to at least once. + for (int i = 0; i < upstream_count; ++i) { + target_name = absl::StrFormat("cluster.cluster_%d.upstream_cx_total", i); + EXPECT_GE(test_server_->counter(target_name)->value(), 1); + } +} + +} // namespace +} // namespace Envoy diff --git a/test/mocks/common.cc b/test/mocks/common.cc index fe64936013253..3f6c122d3671f 100644 --- a/test/mocks/common.cc +++ b/test/mocks/common.cc @@ -14,6 +14,11 @@ namespace Random { MockRandomGenerator::MockRandomGenerator() { ON_CALL(*this, uuid()).WillByDefault(Return(uuid_)); } +MockRandomGenerator::MockRandomGenerator(uint64_t value) : value_(value) { + ON_CALL(*this, random()).WillByDefault(Return(value_)); + ON_CALL(*this, uuid()).WillByDefault(Return(uuid_)); +} + MockRandomGenerator::~MockRandomGenerator() = default; } // namespace Random diff --git a/test/mocks/common.h b/test/mocks/common.h index b887865fe9bf5..1fec75bb256cb 100644 --- a/test/mocks/common.h +++ b/test/mocks/common.h @@ -110,11 +110,13 @@ namespace Random { class MockRandomGenerator : public RandomGenerator { public: MockRandomGenerator(); + MockRandomGenerator(uint64_t value); ~MockRandomGenerator() override; MOCK_METHOD(uint64_t, random, ()); MOCK_METHOD(std::string, uuid, ()); + uint64_t value_; const std::string uuid_{"a121e9e1-feae-4136-9e0e-6fac343d56c9"}; }; diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 96dda9ee1dc88..61e431463cfe2 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -22,6 +22,7 @@ AWS BACKTRACE BSON BPF +Repick btree CAS CB From 2551c75cb3648ca75f64e81d3cf258a767bf581b Mon Sep 17 00:00:00 2001 From: phlax Date: Tue, 21 Sep 2021 19:33:47 +0100 Subject: [PATCH 084/121] docs: Publish rst to storage (#18173) Signed-off-by: Ryan Northey Signed-off-by: gayang --- .azure-pipelines/pipelines.yml | 10 ++++++++++ ci/upload_gcs_artifact.sh | 8 ++++++-- docs/build.sh | 32 ++++++++++++++++++++++++++++++-- docs/publish.sh | 6 +++++- 4 files changed, 51 insertions(+), 5 deletions(-) diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index ba39bb03547bb..0dd71f673b8fd 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -381,6 +381,15 @@ stages: GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) displayName: "Generate docs" + - script: | + ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/docs docs' + displayName: "Upload Docs to GCS" + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) + condition: eq(variables['Build.SourceBranch'], 'refs/heads/main') + - task: InstallSSHKey@0 inputs: hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" @@ -393,6 +402,7 @@ stages: workingDirectory: $(Build.SourcesDirectory) env: AZP_BRANCH: $(Build.SourceBranch) + NETLIFY_TRIGGER_URL: $(NetlifyTriggerURL) - stage: verify dependsOn: ["docker"] diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh index 68d8afc4d94c3..bb952610392f8 100755 --- a/ci/upload_gcs_artifact.sh +++ b/ci/upload_gcs_artifact.sh @@ -18,8 +18,12 @@ if [ ! -d "${SOURCE_DIRECTORY}" ]; then exit 1 fi -if [[ "$BUILD_REASON" == "PullRequest" ]]; then - # non-main upload to the last commit sha (first 7 chars) in the developers branch +if [[ "$BUILD_REASON" == "PullRequest" ]] || [[ "$TARGET_SUFFIX" == "docs" ]]; then + # upload to the last commit sha (first 7 chars), either + # - docs build on main + # -> https://storage.googleapis.com/envoy-postsubmit/$UPLOAD_PATH/docs/envoy-docs-rst.tar.gz + # - PR build (commit sha from the developers branch) + # -> https://storage.googleapis.com/envoy-pr/$UPLOAD_PATH/$TARGET_SUFFIX UPLOAD_PATH="$(git log --pretty=%P -n 1 | cut -d' ' -f2 | head -c7)" else UPLOAD_PATH="${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}}" diff --git a/docs/build.sh b/docs/build.sh index 55e0462fc880e..9cffa9a9dd97b 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -16,14 +16,21 @@ if [[ ! $(command -v jq) ]]; then exit 1 fi +MAIN_BRANCH="refs/heads/main" RELEASE_TAG_REGEX="^refs/tags/v.*" if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then DOCS_TAG="${AZP_BRANCH/refs\/tags\//}" export DOCS_TAG + # no need to build rst explicitly, just html + HTML_ONLY=true else BUILD_SHA=$(git rev-parse HEAD) export BUILD_SHA + if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then + # no need to build html, just rst + RST_ONLY=true + fi fi # This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files. @@ -33,9 +40,30 @@ BAZEL_BUILD_OPTIONS+=( "--action_env=BUILD_SHA" "--action_env=SPHINX_SKIP_CONFIG_VALIDATION") -bazel build "${BAZEL_BUILD_OPTIONS[@]}" //docs:html +# Building html/rst is determined by then needs of CI but can be overridden in dev. +if [[ -z "${RST_ONLY}" ]] || [[ -n "${DOCS_BUILD_HTML}" ]]; then + BUILD_HTML=1 +fi +if [[ -z "${HTML_ONLY}" ]] || [[ -n "${DOCS_BUILD_RST}" ]]; then + BUILD_RST=1 +fi + +# Build html/rst +if [[ -n "${BUILD_RST}" ]]; then + bazel build "${BAZEL_BUILD_OPTIONS[@]}" //docs:rst +fi +if [[ -n "${BUILD_HTML}" ]]; then + bazel build "${BAZEL_BUILD_OPTIONS[@]}" //docs:html +fi [[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs rm -rf "${DOCS_OUTPUT_DIR}" mkdir -p "${DOCS_OUTPUT_DIR}" -tar -xf bazel-bin/docs/html.tar -C "$DOCS_OUTPUT_DIR" + +# Save html/rst to output directory +if [[ -n "${BUILD_HTML}" ]]; then + tar -xf bazel-bin/docs/html.tar -C "$DOCS_OUTPUT_DIR" +fi +if [[ -n "${BUILD_RST}" ]]; then + gzip -c bazel-bin/docs/rst.tar > "$DOCS_OUTPUT_DIR"/envoy-docs-rst.tar.gz +fi diff --git a/docs/publish.sh b/docs/publish.sh index 1c65cdecae50f..81f01416338bd 100755 --- a/docs/publish.sh +++ b/docs/publish.sh @@ -19,7 +19,11 @@ RELEASE_TAG_REGEX="^refs/tags/v.*" if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then PUBLISH_DIR="${CHECKOUT_DIR}"/docs/envoy/"${AZP_BRANCH/refs\/tags\//}" elif [[ "$AZP_BRANCH" == "${MAIN_BRANCH}" ]]; then - PUBLISH_DIR="${CHECKOUT_DIR}"/docs/envoy/latest + if [[ -n "$NETLIFY_TRIGGER_URL" ]]; then + echo "Triggering netlify docs build for (${BUILD_SHA})" + curl -X POST -d "$BUILD_SHA" "$NETLIFY_TRIGGER_URL" + fi + exit 0 else echo "Ignoring docs push" exit 0 From 323c29deb1761211eb6d65ca3b4ae2c56c52f595 Mon Sep 17 00:00:00 2001 From: phlax Date: Wed, 22 Sep 2021 02:59:53 +0100 Subject: [PATCH 085/121] tooling: Use upstream base libs (#17978) Signed-off-by: Ryan Northey Signed-off-by: gayang --- tools/base/BUILD | 33 - tools/base/aio.py | 509 ------- tools/base/checker.py | 378 ----- tools/base/requirements.in | 4 + tools/base/requirements.txt | 2 + tools/base/runner.py | 300 ---- tools/base/tests/test_aio.py | 1289 ----------------- tools/base/tests/test_checker.py | 1017 ------------- tools/base/tests/test_runner.py | 710 --------- tools/base/tests/test_utils.py | 228 --- tools/base/utils.py | 142 -- tools/docs/BUILD | 6 +- .../docs/generate_extensions_security_rst.py | 2 +- tools/docs/rst_check.py | 2 +- tools/extensions/BUILD | 5 +- tools/extensions/extensions_check.py | 2 +- tools/protodoc/BUILD | 2 +- tools/protodoc/protodoc.py | 3 +- tools/testing/BUILD | 12 +- tools/testing/all_pytests.py | 2 +- tools/testing/python_coverage.py | 2 +- tools/testing/python_pytest.py | 2 +- tools/testing/tests/test_all_pytests.py | 2 +- 23 files changed, 28 insertions(+), 4626 deletions(-) delete mode 100644 tools/base/aio.py delete mode 100644 tools/base/checker.py delete mode 100644 tools/base/runner.py delete mode 100644 tools/base/tests/test_aio.py delete mode 100644 tools/base/tests/test_checker.py delete mode 100644 tools/base/tests/test_runner.py delete mode 100644 tools/base/tests/test_utils.py delete mode 100644 tools/base/utils.py diff --git a/tools/base/BUILD b/tools/base/BUILD index 0a4488ddb4d33..1169240338686 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -1,44 +1,11 @@ load("@rules_python//python:defs.bzl", "py_binary") load("@base_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") -load("//tools/base:envoy_python.bzl", "envoy_py_library") licenses(["notice"]) # Apache 2 envoy_package() -envoy_py_library( - "tools.base.aio", - deps = [ - requirement("aio.functional"), - ], -) - -envoy_py_library( - "tools.base.checker", - deps = [ - ":runner", - ], -) - -envoy_py_library( - "tools.base.runner", - deps = [ - requirement("colorama"), - requirement("coloredlogs"), - requirement("frozendict"), - requirement("verboselogs"), - ], -) - -envoy_py_library( - "tools.base.utils", - deps = [ - requirement("pyyaml"), - requirement("setuptools"), - ], -) - py_binary( name = "bazel_query", srcs = ["bazel_query.py"], diff --git a/tools/base/aio.py b/tools/base/aio.py deleted file mode 100644 index a07787c31133f..0000000000000 --- a/tools/base/aio.py +++ /dev/null @@ -1,509 +0,0 @@ -import asyncio -import inspect -import os -import subprocess -import types -from concurrent.futures import Executor, ProcessPoolExecutor -from functools import cached_property, partial -from typing import ( - Any, AsyncGenerator, AsyncIterable, AsyncIterator, Awaitable, Iterable, Iterator, List, - Optional, Union) - -from aio.functional import async_property - - -class ConcurrentError(Exception): - """Raised when given inputs/awaitables are incorrect""" - pass - - -class ConcurrentIteratorError(ConcurrentError): - """Raised when iteration of provided awaitables fails""" - pass - - -class ConcurrentExecutionError(ConcurrentError): - """Raised when execution of a provided awaitable fails""" - pass - - -class async_subprocess: # noqa: N801 - - @classmethod - async def parallel( - cls, commands: Iterable[Iterable[str]], - **kwargs) -> AsyncGenerator[subprocess.CompletedProcess, Iterable[Iterable[str]]]: - """Run external subprocesses in parallel - - Yields `subprocess.CompletedProcess` results as they are completed. - - Example usage: - - ``` - import asyncio - - from tools.base.aio import async_subprocess - - async def run_system_commands(commands): - async for result in async_subprocess.parallel(commands, capture_output=True): - print(result.returncode) - print(result.stdout) - print(result.stderr) - - asyncio.run(run_system_commands(["whoami"] for i in range(0, 5))) - ``` - """ - # Using a `ProcessPoolExecutor` or `ThreadPoolExecutor` here is somewhat - # arbitrary as subproc will spawn a new process regardless. - # Either way - using a custom executor of either type gives considerable speedup, - # most likely due to the number of workers allocated. - # In my testing, `ProcessPoolExecutor` gave a very small speedup over a large - # number of tasks, despite any additional overhead of creating the executor. - # Without `max_workers` set `ProcessPoolExecutor` defaults to the number of cpus - # on the machine. - with ProcessPoolExecutor() as pool: - futures = asyncio.as_completed( - tuple( - asyncio.ensure_future(cls.run(command, executor=pool, **kwargs)) - for command in commands)) - for result in futures: - yield await result - - @classmethod - async def run( - cls, - *args, - loop: Optional[asyncio.AbstractEventLoop] = None, - executor: Optional[Executor] = None, - **kwargs) -> subprocess.CompletedProcess: - """This is an asyncio wrapper for `subprocess.run` - - It can be used in a similar way to `subprocess.run` but its non-blocking to - the main thread. - - Example usage: - - ``` - import asyncio - - from tools.base.aio import async_subprocess - - async def run_system_command(): - result = await async_subprocess.run(["whoami"], capture_output=True) - print(result.returncode) - print(result.stdout) - print(result.stderr) - - asyncio.run(run_system_command()) - - ``` - - By default it will spawn the process using the main event loop, and that loop's - default (`ThreadPool`) executor. - - You can provide the loop and/or the executor to change this behaviour. - """ - loop = loop or asyncio.get_running_loop() - return await loop.run_in_executor(executor, partial(subprocess.run, *args, **kwargs)) - - -_sentinel = object() - - -class concurrent: # noqa: N801 - """This utility provides very similar functionality to - `asyncio.as_completed` in that it runs coroutines in concurrent, yielding the - results as they are available. - - There are a couple of differences: - - - `coros` can be any `iterables` including sync/async `generators` - - `limit` can be supplied to specify the maximum number of concurrent tasks - - Setting `limit` to `-1` will make all tasks run in concurrent. - - The default is `number of cores + 4` to a maximum of `32`. - - For network tasks it might make sense to set the concurrency `limit` lower - than the default, if, for example, opening many concurrent connections will trigger - rate-limiting or soak bandwidth. - - If an error is raised while trying to iterate the provided coroutines, the - error is wrapped in an `ConcurrentIteratorError` and is raised immediately. - - In this case, no further handling occurs, and `yield_exceptions` has no - effect. - - Any errors raised while trying to create or run tasks are wrapped in - `ConcurrentError`. - - Any errors raised during task execution are wrapped in - `ConcurrentExecutionError`. - - If you specify `yield_exceptions` as `True` then the wrapped errors will be - yielded in the results. - - If `yield_exceptions` is False (the default), then the wrapped error will be - raised immediately. - - If you use any kind of `Generator` or `AsyncGenerator` to produce the - awaitables, and `yield_exceptions` is `False`, in the event that an error - occurs, it is your responsibility to `close` remaining awaitables that you - might have created but which have not already been fired. - - This utility is mostly useful for concurrentizing io-bound (as opposed to - cpu-bound) tasks. - - Example usage: - - ``` - import random - - from tools.base import aio - - async def task_to_run(i): - print(f"{i} starting") - wait = random.random() * 10 - await asyncio.sleep(wait) - return i, wait - - async def run(coros): - async for (i, wait) in aio.concurrent(coros, limit=3): - print(f"{i} waited {wait}") - - def provider(): - for i in range(0, 10): - yield task_to_run(i) - - asyncio.run(run(provider())) - ``` - """ - - def __init__( - self, - coros: Union[types.AsyncGeneratorType, AsyncIterable[Awaitable], - AsyncIterator[Awaitable], types.GeneratorType, Iterator[Awaitable], - Iterable[Awaitable]], - yield_exceptions: Optional[bool] = False, - limit: Optional[int] = None): - self._coros = coros - self._limit = limit - self._running: List[asyncio.Task] = [] - self.yield_exceptions = yield_exceptions - - def __aiter__(self) -> AsyncIterator: - """Start a coroutine task to process the submit queue, and return - an async generator to deliver results back as they arrive - """ - self.submit_task = asyncio.create_task(self.submit()) - return self.output() - - @property - def active(self) -> bool: - """Checks whether the iterator is active, either because it - hasn't finished submitting or because there are still tasks running - """ - return self.submitting or self.running - - @property - def closed(self) -> bool: - """If an unhandled error occurs, the generator is closed and no further - processing should happen - """ - return self.closing_lock.locked() - - @cached_property - def closing_lock(self) -> asyncio.Lock: - """Flag to indicate whether the generator has been closed""" - return asyncio.Lock() - - @cached_property - def consumes_async(self) -> bool: - """Provided coros iterable is some kind of async provider""" - return isinstance(self._coros, (types.AsyncGeneratorType, AsyncIterator, AsyncIterable)) - - @cached_property - def consumes_generator(self) -> bool: - """Provided coros iterable is some kind of generator""" - return isinstance(self._coros, (types.AsyncGeneratorType, types.GeneratorType)) - - @async_property - async def coros(self) -> AsyncIterator[Union[ConcurrentIteratorError, Awaitable]]: - """An async iterator of the provided coroutines""" - coros = self.iter_coros() - try: - async for coro in coros: - yield coro - except GeneratorExit: - # If we exit before we finish generating we land here (ie error was raised) - # In this case we need to tell the (possibly) async generating provider to - # also close. - try: - await coros.aclose() # type:ignore - finally: - # Suppress errors closing the provider generator - # This can raise a further `GeneratorExit` but it will stop providing. - return - - @property - def default_limit(self) -> int: - """Default is to use cpu+4 to a max of 32 coroutines""" - # This reflects the default for asyncio's `ThreadPoolExecutor`, this is a fairly - # arbitrary number to use, but it seems like a reasonable default. - return min(32, (os.cpu_count() or 0) + 4) - - @cached_property - def limit(self) -> int: - """The limit for concurrent coroutines""" - return self._limit or self.default_limit - - @cached_property - def nolimit(self) -> bool: - """Flag indicating no limit to concurrency""" - return self.limit == -1 - - @cached_property - def out(self) -> asyncio.Queue: - """Queue of results to yield back""" - return asyncio.Queue() - - @property - def running(self) -> bool: - """Flag to indicate whether any tasks are running""" - return not self.running_queue.empty() - - @cached_property - def running_queue(self) -> asyncio.Queue: - """Queue which is incremented/decremented as tasks begin/end - - This is for tracking when there are no longer any tasks running. - - A queue is used here as opposed to other synchronization primitives, as - it allows us to get the size and emptiness. - - The queue values are `None`. - """ - return asyncio.Queue() - - @cached_property - def running_tasks(self) -> List[asyncio.Task]: - """Currently running asyncio tasks""" - return self._running - - @cached_property - def sem(self) -> asyncio.Semaphore: - """A sem lock to limit the number of concurrent tasks""" - return asyncio.Semaphore(self.limit) - - @cached_property - def submission_lock(self) -> asyncio.Lock: - """Submission lock to indicate when submission is complete""" - return asyncio.Lock() - - @property - def submitting(self) -> bool: - """Flag to indicate whether we are still submitting coroutines""" - return self.submission_lock.locked() - - async def cancel(self) -> None: - """Stop the submission queue, cancel running tasks, close pending coroutines. - - This is triggered when an unhandled error occurs and the queue should - stop processing and bail. - """ - # Kitchen is closed - await self.close() - - # No more waiting - if not self.nolimit: - self.sem.release() - - # Cancel tasks - await self.cancel_tasks() - - # Close pending coroutines - await self.close_coros() - - # let the submission queue die - await self.submit_task - - async def cancel_tasks(self) -> None: - """Cancel any running tasks""" - - for running in self.running_tasks: - running.cancel() - try: - await running - finally: - # ignore errors, we are dying anyway - continue - - async def close(self) -> None: - """Close the generator, prevent any further processing""" - if not self.closed: - await self.closing_lock.acquire() - - async def close_coros(self) -> None: - """Close provided coroutines (unless the provided coros is a generator)""" - if self.consumes_generator: - # If we have a generator, dont blow/create/wait upon any more items - return - - async for coro in self.iter_coros(): - try: - # this could be an `aio.ConcurrentError` and not have a - # `close` method, but as we are asking for forgiveness anyway, - # no point in looking before we leap. - coro.close() # type:ignore - finally: - # ignore errors, we are dying anyway - continue - - async def create_task(self, coro: Awaitable) -> None: - """Create an asyncio task from the coroutine, and remember it""" - task = asyncio.create_task(self.task(coro)) - self.remember_task(task) - self.running_queue.put_nowait(None) - - async def exit_on_completion(self) -> None: - """Send the exit signal to the output queue""" - if not self.active and not self.closed: - await self.out.put(_sentinel) - - def forget_task(self, task: asyncio.Task) -> None: - """Task? what task?""" - if self.closed: - # If we are closing, don't remove, as this has been triggered - # by cancellation. - return - self.running_tasks.remove(task) - - async def iter_coros(self) -> AsyncIterator[Union[ConcurrentIteratorError, Awaitable]]: - """Iterate provided coros either synchronously or asynchronously, - yielding the awaitables asynchoronously. - """ - try: - if self.consumes_async: - async for coro in self._coros: # type:ignore - yield coro - else: - for coro in self._coros: # type:ignore - yield coro - except BaseException as e: - # Catch all errors iterating (other errors are caught elsewhere) - # If iterating raises, wrap the error and send it to `submit` and - # and `output` to close the queues. - yield ConcurrentIteratorError(e) - - async def on_task_complete(self, result: Any, decrement: Optional[bool] = True) -> None: - """Output the result, release the sem lock, decrement the running - count, and notify output queue if complete. - """ - if self.closed: - # Results can come back after the queue has closed as they are - # cancelled. - # In that case, nothing further to do. - return - - # Give result to output - await self.out.put(result) - - if not self.nolimit: - # Release the sem.lock - self.sem.release() - if decrement: - # Decrement the running_queue if it was incremented - self.running_queue.get_nowait() - # Exit if nothing left to do - await self.exit_on_completion() - - async def output(self) -> AsyncIterator: - """Asynchronously yield results as they become available""" - while True: - # Wait for some output - result = await self.out.get() - if result is _sentinel: - # All done! - await self.close() - break - elif self.should_error(result): - # Raise an error and bail! - await self.cancel() - raise result - yield result - - async def ready(self) -> bool: - """Wait for the sem.lock and indicate availability in the submission - queue - """ - if self.closed: - return False - if not self.nolimit: - await self.sem.acquire() - # We check before and after acquiring the sem.lock to see whether - # we are `closed` as these events can be separated in - # time/procedure. - if self.closed: - return False - return True - - def remember_task(self, task: asyncio.Task) -> None: - """Remember a scheduled asyncio task, in case it needs to be - cancelled - """ - self.running_tasks.append(task) - task.add_done_callback(self.forget_task) - - def should_error(self, result: Any) -> bool: - """Check a result type and whether it should raise an error""" - return ( - isinstance(result, ConcurrentIteratorError) - or (isinstance(result, ConcurrentError) and not self.yield_exceptions)) - - async def submit(self) -> None: - """Process the iterator of coroutines as a submission queue""" - await self.submission_lock.acquire() - async for coro in self.coros: - if isinstance(coro, ConcurrentIteratorError): - # Iteration error, exit now - await self.out.put(coro) - break - if not await self.ready(): - # Queue is closing, get out of here - try: - # Ensure the last coro to be produced/generated is closed, - # as it will not be scheduled as a task, and in the case - # of generators it wont be closed any other way. - coro.close() - finally: - # ignore all coro closing errors, we are dying - break - # Check the supplied coro is awaitable - try: - self.validate_coro(coro) - except ConcurrentError as e: - await self.on_task_complete(e, decrement=False) - continue - # All good, create a task - await self.create_task(coro) - self.submission_lock.release() - # If cleanup of the submission queue has taken longer than processing - # we need to manually close - await self.exit_on_completion() - - async def task(self, coro: Awaitable) -> None: - """Task wrapper to catch/wrap errors and output awaited results""" - try: - result = await coro - except BaseException as e: - result = ConcurrentExecutionError(e) - finally: - await self.on_task_complete(result) - - def validate_coro(self, coro: Awaitable) -> None: - """Validate that a provided coroutine is actually awaitable""" - if not inspect.isawaitable(coro): - raise ConcurrentError(f"Provided input was not a coroutine: {coro}") - - if inspect.getcoroutinestate(coro) != inspect.CORO_CREATED: - raise ConcurrentError(f"Provided coroutine has already been fired: {coro}") diff --git a/tools/base/checker.py b/tools/base/checker.py deleted file mode 100644 index 4feed282d2ec1..0000000000000 --- a/tools/base/checker.py +++ /dev/null @@ -1,378 +0,0 @@ -import argparse -import asyncio -import logging -import pathlib -from functools import cached_property -from typing import Any, Iterable, Optional, Sequence, Tuple, Type - -from tools.base import runner - - -class BaseChecker(runner.Runner): - """Runs check methods prefixed with `check_` and named in `self.checks` - - Check methods should call the `self.warn`, `self.error` or `self.succeed` - depending upon the outcome of the checks. - """ - _active_check = "" - checks: Tuple[str, ...] = () - - def __init__(self, *args): - super().__init__(*args) - self.success = {} - self.errors = {} - self.warnings = {} - - @property - def active_check(self) -> str: - return self._active_check - - @property - def diff(self) -> bool: - """Flag to determine whether the checker should print diffs to the console""" - return self.args.diff - - @property - def error_count(self) -> int: - """Count of all errors found""" - return sum(len(e) for e in self.errors.values()) - - @property - def exiting(self): - return "exiting" in self.errors - - @property - def failed(self) -> dict: - """Dictionary of errors per check""" - return dict((k, (len(v))) for k, v in self.errors.items()) - - @property - def fix(self) -> bool: - """Flag to determine whether the checker should attempt to fix found problems""" - return self.args.fix - - @property - def has_failed(self) -> bool: - """Shows whether there are any failures""" - # add logic for warn/error - return bool(self.failed or self.warned) - - @cached_property - def path(self) -> pathlib.Path: - """The "path" - usually Envoy src dir. This is used for finding configs for the tooling and should be a dir""" - try: - path = pathlib.Path(self.args.path or self.args.paths[0]) - except IndexError: - raise self.parser.error( - "Missing path: `path` must be set either as an arg or with --path") - if not path.is_dir(): - raise self.parser.error( - "Incorrect path: `path` must be a directory, set either as first arg or with --path" - ) - return path - - @property - def paths(self) -> list: - """List of paths to apply checks to""" - return self.args.paths or [self.path] - - @property - def show_summary(self) -> bool: - """Show a summary at the end or not""" - return bool( - not self.exiting and (self.args.summary or self.error_count or self.warning_count)) - - @property - def status(self) -> dict: - """Dictionary showing current success/warnings/errors""" - return dict( - success=self.success_count, - errors=self.error_count, - warnings=self.warning_count, - failed=self.failed, - warned=self.warned, - succeeded=self.succeeded) - - @property - def succeeded(self) -> dict: - """Dictionary of successful checks grouped by check type""" - return dict((k, (len(v))) for k, v in self.success.items()) - - @property - def success_count(self) -> int: - """Current count of successful checks""" - return sum(len(e) for e in self.success.values()) - - @cached_property - def summary(self) -> "CheckerSummary": - """Instance of the checker's summary class""" - return self.summary_class(self) - - @property - def summary_class(self) -> Type["CheckerSummary"]: - """Checker's summary class""" - return CheckerSummary - - @property - def warned(self) -> dict: - """Dictionary of warned checks grouped by check type""" - return dict((k, (len(v))) for k, v in self.warnings.items()) - - @property - def warning_count(self) -> int: - """Current count of warned checks""" - return sum(len(e) for e in self.warnings.values()) - - def add_arguments(self, parser: argparse.ArgumentParser) -> None: - """Add arguments to the arg parser""" - super().add_arguments(parser) - parser.add_argument( - "--fix", action="store_true", default=False, help="Attempt to fix in place") - parser.add_argument( - "--diff", - action="store_true", - default=False, - help="Display a diff in the console where available") - parser.add_argument( - "--warning", - "-w", - choices=["warn", "error"], - default="warn", - help="Handle warnings as warnings or errors") - parser.add_argument( - "--summary", action="store_true", default=False, help="Show a summary of check runs") - parser.add_argument( - "--summary-errors", - type=int, - default=5, - help="Number of errors to show in the summary, -1 shows all") - parser.add_argument( - "--summary-warnings", - type=int, - default=5, - help="Number of warnings to show in the summary, -1 shows all") - parser.add_argument( - "--check", - "-c", - choices=self.checks, - nargs="*", - help="Specify which checks to run, can be specified for multiple checks") - for check in self.checks: - parser.add_argument( - f"--config-{check}", default="", help=f"Custom configuration for the {check} check") - parser.add_argument( - "--path", - "-p", - default=None, - help= - "Path to the test root (usually Envoy source dir). If not specified the first path of paths is used" - ) - parser.add_argument( - "paths", - nargs="*", - help= - "Paths to check. At least one path must be specified, or the `path` argument should be provided" - ) - - def error( - self, - name: str, - errors: Optional[Iterable[str]], - log: bool = True, - log_type: str = "error") -> int: - """Record (and log) errors for a check type""" - if not errors: - return 0 - self.errors[name] = self.errors.get(name, []) - self.errors[name].extend(errors) - if not log: - return 1 - for message in errors: - getattr(self.log, log_type)(f"[{name}] {message}") - return 1 - - def exit(self) -> int: - self.log.handlers[0].setLevel(logging.FATAL) - self.stdout.handlers[0].setLevel(logging.FATAL) - return self.error("exiting", ["Keyboard exit"], log_type="fatal") - - def get_checks(self) -> Sequence[str]: - """Get list of checks for this checker class filtered according to user args""" - return ( - self.checks if not self.args.check else - [check for check in self.args.check if check in self.checks]) - - def on_check_begin(self, check: str) -> Any: - self._active_check = check - self.log.notice(f"[{check}] Running check") - - def on_check_run(self, check: str) -> Any: - """Callback hook called after each check run""" - self._active_check = "" - if self.exiting: - return - elif check in self.errors: - self.log.error(f"[{check}] Check failed") - elif check in self.warnings: - self.log.warning(f"[{check}] Check has warnings") - else: - self.log.success(f"[{check}] Check completed successfully") - - def on_checks_begin(self) -> Any: - """Callback hook called before all checks""" - pass - - def on_checks_complete(self) -> Any: - """Callback hook called after all checks have run, and returning the final outcome of a checks_run""" - if self.show_summary: - self.summary.print_summary() - return 1 if self.has_failed else 0 - - @runner.cleansup - def run(self) -> int: - """Run all configured checks and return the sum of their error counts""" - checks = self.get_checks() - try: - self.on_checks_begin() - for check in checks: - self.on_check_begin(check) - getattr(self, f"check_{check}")() - self.on_check_run(check) - except KeyboardInterrupt as e: - self.exit() - finally: - result = self.on_checks_complete() - return result - - def succeed(self, name: str, success: list, log: bool = True) -> None: - """Record (and log) success for a check type""" - self.success[name] = self.success.get(name, []) - self.success[name].extend(success) - if not log: - return - for message in success: - self.log.success(f"[{name}] {message}") - - def warn(self, name: str, warnings: list, log: bool = True) -> None: - """Record (and log) warnings for a check type""" - self.warnings[name] = self.warnings.get(name, []) - self.warnings[name].extend(warnings) - if not log: - return - for message in warnings: - self.log.warning(f"[{name}] {message}") - - -class Checker(BaseChecker): - - def on_check_begin(self, check: str) -> None: - super().on_check_begin(check) - - def on_check_run(self, check: str) -> None: - super().on_check_run(check) - - def on_checks_begin(self) -> None: - super().on_checks_complete() - - def on_checks_complete(self) -> int: - return super().on_checks_complete() - - -class BazelChecker(runner.BazelRunner, Checker): - pass - - -class CheckerSummary(object): - - def __init__(self, checker: BaseChecker): - self.checker = checker - - @property - def max_errors(self) -> int: - """Maximum errors to display in summary""" - return self.checker.args.summary_errors - - @property - def max_warnings(self) -> int: - """Maximum warnings to display in summary""" - return self.checker.args.summary_warnings - - def print_failed(self, problem_type): - _out = [] - _max = getattr(self, f"max_{problem_type}") - for check, problems in getattr(self.checker, problem_type).items(): - _msg = f"{self.checker.name} {check}" - _max = (min(len(problems), _max) if _max >= 0 else len(problems)) - msg = ( - f"{_msg}: (showing first {_max} of {len(problems)})" if - (len(problems) > _max and _max > 0) else (f"{_msg}:" if _max != 0 else _msg)) - _out.extend(self._section(msg, problems[:_max])) - if not _out: - return - output = ( - self.checker.log.warning if problem_type == "warnings" else self.checker.log.error) - output("\n".join(_out + [""])) - - def print_status(self) -> None: - """Print summary status to stderr""" - if self.checker.errors: - self.checker.log.error(f"{self.checker.status}") - elif self.checker.warnings: - self.checker.log.warning(f"{self.checker.status}") - else: - self.checker.log.info(f"{self.checker.status}") - - def print_summary(self) -> None: - """Write summary to stderr""" - self.print_failed("warnings") - self.print_failed("errors") - self.print_status() - - def _section(self, message: str, lines: list = None) -> list: - """Print a summary section""" - section = ["Summary", "-" * 80, f"{message}"] - if lines: - section += [line.split("\n")[0] for line in lines] - return section - - -class AsyncChecker(BaseChecker): - """Async version of the Checker class for use with asyncio""" - - async def _run(self) -> int: - checks = self.get_checks() - try: - await self.on_checks_begin() - for check in checks: - await self.on_check_begin(check) - await getattr(self, f"check_{check}")() - await self.on_check_run(check) - finally: - if self.exiting: - result = 1 - else: - result = await self.on_checks_complete() - return result - - @runner.cleansup - def run(self) -> int: - try: - return asyncio.get_event_loop().run_until_complete(self._run()) - except KeyboardInterrupt as e: - # This needs to be outside the loop to catch the a keyboard interrupt - # This means that a new loop has to be created to cleanup - result = self.exit() - result = asyncio.get_event_loop().run_until_complete(self.on_checks_complete()) - return result - - async def on_check_begin(self, check: str) -> None: - super().on_check_begin(check) - - async def on_check_run(self, check: str) -> None: - super().on_check_run(check) - - async def on_checks_begin(self) -> None: - super().on_checks_begin() - - async def on_checks_complete(self) -> int: - return super().on_checks_complete() diff --git a/tools/base/requirements.in b/tools/base/requirements.in index db00988a40f69..cec88af56453c 100644 --- a/tools/base/requirements.in +++ b/tools/base/requirements.in @@ -1,7 +1,11 @@ aio.functional +aio.subprocess +aio.tasks colorama coloredlogs coverage +envoy.base.checker +envoy.base.runner envoy.base.utils envoy.code_format.python_check>=0.0.4 envoy.dependency.pip_check>=0.0.4 diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index 570fe423f0f23..da6990801d30e 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -31,6 +31,7 @@ aio.tasks==0.0.4 \ --hash=sha256:9abd4b0881edb292c4f91a2f63b1dea7a9829a4bd4e8440225a1a412a90461fc # via # envoy.code-format.python-check + # via -r tools/base/requirements.in # envoy.github.abstract # envoy.github.release aiodocker==0.21.0 \ @@ -280,6 +281,7 @@ envoy.base.checker==0.0.2 \ envoy.base.runner==0.0.4 \ --hash=sha256:4eeb2b661f1f0c402df4425852be554a8a83ef5d338bfae69ddcb9b90755379e # via + # -r tools/base/requirements.in # envoy.base.checker # envoy.distribution.release # envoy.docs.sphinx-runner diff --git a/tools/base/runner.py b/tools/base/runner.py deleted file mode 100644 index 87d5577b370a0..0000000000000 --- a/tools/base/runner.py +++ /dev/null @@ -1,300 +0,0 @@ -# -# Generic runner class for use by cli implementations -# - -import argparse -import inspect -import logging -import pathlib -import subprocess -import sys -import tempfile -from functools import cached_property, wraps -from typing import Callable, Optional, Tuple, Type, Union - -from frozendict import frozendict - -import coloredlogs # type:ignore -import verboselogs # type:ignore - -LOG_LEVELS = (("debug", logging.DEBUG), ("info", logging.INFO), ("warn", logging.WARN), - ("error", logging.ERROR)) -LOG_FIELD_STYLES: frozendict = frozendict( - name=frozendict(color="blue"), levelname=frozendict(color="cyan", bold=True)) -LOG_FMT = "%(name)s %(levelname)s %(message)s" -LOG_LEVEL_STYLES: frozendict = frozendict( - critical=frozendict(bold=True, color="red"), - debug=frozendict(color="green"), - error=frozendict(color="red", bold=True), - info=frozendict(color="white", bold=True), - notice=frozendict(color="magenta", bold=True), - spam=frozendict(color="green", faint=True), - success=frozendict(bold=True, color="green"), - verbose=frozendict(color="blue"), - warning=frozendict(color="yellow", bold=True)) - - -def catches(errors: Union[Type[Exception], Tuple[Type[Exception], ...]]) -> Callable: - """Method decorator to catch specified errors - - logs and returns 1 for sys.exit if error/s are caught - - can be used as so: - - ```python - - class MyRunner(runner.Runner): - - @runner.catches((MyError, MyOtherError)) - def run(self): - self.myrun() - ``` - - Can work with `async` methods too. - """ - - def wrapper(fun: Callable) -> Callable: - - @wraps(fun) - def wrapped(self, *args, **kwargs) -> Optional[int]: - try: - return fun(self, *args, **kwargs) - except errors as e: - self.log.error(str(e) or repr(e)) - return 1 - - @wraps(fun) - async def async_wrapped(self, *args, **kwargs) -> Optional[int]: - try: - return await fun(self, *args, **kwargs) - except errors as e: - self.log.error(str(e) or repr(e)) - return 1 - - wrapped_fun = async_wrapped if inspect.iscoroutinefunction(fun) else wrapped - - # mypy doesnt trust `@wraps` to give back a `__wrapped__` object so we - # need to code defensively here - wrapping = getattr(wrapped_fun, "__wrapped__", None) - if wrapping: - setattr(wrapping, "__catches__", errors) - return wrapped_fun - - return wrapper - - -def cleansup(fun) -> Callable: - """Method decorator to call `.cleanup()` after run. - - Can work with `sync` and `async` methods. - """ - - @wraps(fun) - def wrapped(self, *args, **kwargs) -> Optional[int]: - try: - return fun(self, *args, **kwargs) - finally: - self.cleanup() - - @wraps(fun) - async def async_wrapped(self, *args, **kwargs) -> Optional[int]: - try: - return await fun(self, *args, **kwargs) - finally: - await self.cleanup() - - # mypy doesnt trust `@wraps` to give back a `__wrapped__` object so we - # need to code defensively here - wrapped_fun = async_wrapped if inspect.iscoroutinefunction(fun) else wrapped - wrapping = getattr(wrapped_fun, "__wrapped__", None) - if wrapping: - setattr(wrapping, "__cleansup__", True) - return wrapped_fun - - -class BazelRunError(Exception): - pass - - -class LogFilter(logging.Filter): - - def filter(self, rec): - return rec.levelno in (logging.DEBUG, logging.INFO) - - -class BaseRunner: - - def __init__(self, *args): - self._args = args - - @cached_property - def args(self) -> argparse.Namespace: - """Parsed args""" - return self.parser.parse_known_args(self._args)[0] - - @cached_property - def extra_args(self) -> list: - """Unparsed args""" - return self.parser.parse_known_args(self._args)[1] - - @property - def log_field_styles(self): - return LOG_FIELD_STYLES - - @property - def log_fmt(self): - return LOG_FMT - - @property - def log_level_styles(self): - return LOG_LEVEL_STYLES - - @cached_property - def log(self) -> verboselogs.VerboseLogger: - """Instantiated logger""" - verboselogs.install() - logger = logging.getLogger(self.name) - logger.setLevel(self.log_level) - coloredlogs.install( - field_styles=self.log_field_styles, - level_styles=self.log_level_styles, - fmt=self.log_fmt, - level='DEBUG', - logger=logger, - isatty=True) - return logger - - @cached_property - def log_level(self) -> int: - """Log level parsed from args""" - return dict(LOG_LEVELS)[self.args.log_level] - - @property - def name(self) -> str: - """Name of the runner""" - return self.__class__.__name__ - - @cached_property - def parser(self) -> argparse.ArgumentParser: - """Argparse parser""" - parser = argparse.ArgumentParser(allow_abbrev=False) - self.add_arguments(parser) - return parser - - @cached_property - def path(self) -> pathlib.Path: - return pathlib.Path(".") - - @cached_property - def stdout(self) -> logging.Logger: - """Log to stdout""" - logger = logging.getLogger("stdout") - logger.setLevel(self.log_level) - handler = logging.StreamHandler(sys.stdout) - handler.setFormatter(logging.Formatter("%(message)s")) - logger.addHandler(handler) - return logger - - @cached_property - def tempdir(self) -> tempfile.TemporaryDirectory: - """If you call this property, remember to call `.cleanup` - - For `run` methods this should be done by decorating the method with - `@runner.cleansup` - """ - if self._missing_cleanup: - self.log.warning( - "Tempdir created but instance has a `run` method which is not decorated with `@runner.cleansup`" - ) - return tempfile.TemporaryDirectory() - - def add_arguments(self, parser: argparse.ArgumentParser) -> None: - """Override this method to add custom arguments to the arg parser""" - parser.add_argument( - "--log-level", - "-l", - choices=[level[0] for level in LOG_LEVELS], - default="info", - help="Log level to display") - - @property - def _missing_cleanup(self) -> bool: - run_fun = getattr(self, "run", None) - return bool( - run_fun - and not getattr(getattr(run_fun, "__wrapped__", object()), "__cleansup__", False)) - - def _cleanup_tempdir(self) -> None: - if "tempdir" in self.__dict__: - self.tempdir.cleanup() - del self.__dict__["tempdir"] - - -class Runner(BaseRunner): - - def cleanup(self) -> None: - self._cleanup_tempdir() - - -class AsyncRunner(BaseRunner): - - async def cleanup(self) -> None: - self._cleanup_tempdir() - - -class ForkingAdapter: - - def __init__(self, context: Runner): - self.context = context - - def __call__(self, *args, **kwargs) -> subprocess.CompletedProcess: - return self.subproc_run(*args, **kwargs) - - def subproc_run( - self, *args, capture_output: bool = True, **kwargs) -> subprocess.CompletedProcess: - """Fork a subprocess, using self.context.path as the cwd by default""" - kwargs["cwd"] = kwargs.get("cwd", self.context.path) - return subprocess.run(*args, capture_output=capture_output, **kwargs) - - -class BazelAdapter: - - def __init__(self, context: "ForkingRunner"): - self.context = context - - def query(self, query: str) -> list: - """Run a bazel query and return stdout as list of lines""" - resp = self.context.subproc_run(["bazel", "query", f"'{query}'"]) - if resp.returncode: - raise BazelRunError(f"Bazel query failed: {resp}") - return resp.stdout.decode("utf-8").split("\n") - - def run( - self, - target: str, - *args, - capture_output: bool = False, - cwd: str = "", - raises: bool = True) -> subprocess.CompletedProcess: - """Run a bazel target and return the subprocess response""" - args = (("--",) + args) if args else args - bazel_args = ("bazel", "run", target) + args - resp = self.context.subproc_run( - bazel_args, capture_output=capture_output, cwd=cwd or self.context.path) - if resp.returncode and raises: - raise BazelRunError(f"Bazel run failed: {resp}") - return resp - - -class ForkingRunner(Runner): - - @cached_property - def subproc_run(self) -> ForkingAdapter: - return ForkingAdapter(self) - - -class BazelRunner(ForkingRunner): - - @cached_property - def bazel(self) -> BazelAdapter: - return BazelAdapter(self) diff --git a/tools/base/tests/test_aio.py b/tools/base/tests/test_aio.py deleted file mode 100644 index e80747f5d4a8a..0000000000000 --- a/tools/base/tests/test_aio.py +++ /dev/null @@ -1,1289 +0,0 @@ - -import asyncio -import gc -import inspect -import types -from typing import AsyncIterator, AsyncIterable -from unittest.mock import AsyncMock, MagicMock, PropertyMock - -import pytest - -from tools.base import aio - - -@pytest.mark.asyncio -async def test_async_subprocess_parallel(patches): - patched = patches( - "asyncio", - "ProcessPoolExecutor", - "async_subprocess.run", - prefix="tools.base.aio") - procs = [f"PROC{i}" for i in range(0, 3)] - kwargs = {f"KEY{i}": f"VALUE{i}" for i in range(0, 3)} - - async def async_result(result): - return result - - with patched as (m_asyncio, m_future, m_run): - returned = [f"RESULT{i}" for i in range(0, 5)] - m_asyncio.as_completed.return_value = [ - async_result(result) for result in returned] - - results = [] - async for result in aio.async_subprocess.parallel(procs, **kwargs): - results.append(result) - - assert results == returned - assert ( - list(m_future.call_args) - == [(), {}]) - assert ( - list(m_asyncio.as_completed.call_args) - == [(tuple(m_asyncio.ensure_future.return_value for i in range(0, len(procs))), ), {}]) - kwargs["executor"] = m_future.return_value.__enter__.return_value - assert ( - list(list(c) for c in m_run.call_args_list) - == [[(proc,), kwargs] for proc in procs]) - assert ( - list(list(c) for c in m_asyncio.ensure_future.call_args_list) - == [[(m_run.return_value,), {}] for proc in procs]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("loop", [True, False]) -@pytest.mark.parametrize("executor", [None, "EXECUTOR"]) -async def test_async_subprocess_run(patches, loop, executor): - patched = patches( - "asyncio", - "partial", - "subprocess", - prefix="tools.base.aio") - args = [f"ARG{i}" for i in range(0, 3)] - kwargs = {f"KEY{i}": f"VALUE{i}" for i in range(0, 3)} - - if loop: - kwargs["loop"] = AsyncMock() - - if executor: - kwargs["executor"] = executor - - with patched as (m_asyncio, m_partial, m_subproc): - m_asyncio.get_running_loop.return_value = AsyncMock() - if loop: - m_loop = kwargs["loop"] - else: - m_loop = m_asyncio.get_running_loop.return_value - - assert ( - await aio.async_subprocess.run(*args, **kwargs) - == m_loop.run_in_executor.return_value) - - if loop: - assert not m_asyncio.get_running_loop.called - - kwargs.pop("executor", None) - kwargs.pop("loop", None) - - assert ( - list(m_partial.call_args) - == [(m_subproc.run, ) + tuple(args), kwargs]) - assert ( - list(m_loop.run_in_executor.call_args) - == [(executor, m_partial.return_value), {}]) - - -@pytest.mark.parametrize("limit", ["XX", None, "", 0, -1, 73]) -@pytest.mark.parametrize("yield_exceptions", [None, True, False]) -def test_aio_concurrent_constructor(limit, yield_exceptions): - kwargs = {} - if limit == "XX": - limit = None - else: - kwargs["limit"] = limit - if yield_exceptions is not None: - kwargs["yield_exceptions"] = yield_exceptions - - concurrent = aio.concurrent(["CORO"], **kwargs) - assert concurrent._coros == ["CORO"] - assert concurrent._limit == limit - assert ( - concurrent.yield_exceptions - == (False - if yield_exceptions is None - else yield_exceptions)) - assert concurrent._running == [] - - assert concurrent.running_tasks is concurrent._running - assert "running_tasks" in concurrent.__dict__ - - -def test_aio_concurrent_dunder_aiter(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - "concurrent.output", - ("concurrent.submit", dict(new_callable=MagicMock)), - prefix="tools.base.aio") - - with patched as (m_asyncio, m_output, m_submit): - assert concurrent.__aiter__() == m_output.return_value - - assert concurrent.submit_task == m_asyncio.create_task.return_value - assert ( - list(m_submit.call_args) - == [(), {}]) - assert ( - list(m_asyncio.create_task.call_args) - == [(m_submit.return_value, ), {}]) - - -@pytest.mark.parametrize("running", [True, False]) -@pytest.mark.parametrize("submitting", [True, False]) -def test_aio_concurrent_active(patches, running, submitting): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - ("concurrent.submitting", dict(new_callable=PropertyMock)), - ("concurrent.running", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_asyncio, m_submit, m_run): - m_submit.return_value = submitting - m_run.return_value = running - assert concurrent.active == (submitting or running) - - assert "active" not in concurrent.__dict__ - - -def test_aio_concurrent_closing_lock(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - prefix="tools.base.aio") - - with patched as (m_asyncio, ): - assert concurrent.closing_lock == m_asyncio.Lock.return_value - - assert ( - list(m_asyncio.Lock.call_args) - == [(), {}]) - assert "closing_lock" in concurrent.__dict__ - - - -@pytest.mark.parametrize("locked", [True, False]) -def test_aio_concurrent_closed(patches, locked): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.closing_lock", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_closing_lock, ): - m_closing_lock.return_value.locked.return_value = locked - assert concurrent.closed == locked - - assert "closed" not in concurrent.__dict__ - - -@pytest.mark.asyncio -@pytest.mark.parametrize("raises", [None, BaseException, GeneratorExit]) -@pytest.mark.parametrize("close_raises", [None, BaseException]) -async def test_aio_concurrent_coros(patches, raises, close_raises): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.iter_coros", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - results = [] - return_coros = [f"CORO{i}" for i in range(0, 3)] - m_aclose = AsyncMock() - if close_raises: - m_aclose.side_effect = close_raises() - - class Coros: - aclose = m_aclose - - def __call__(self): - return self - - async def __aiter__(self): - if raises: - raise raises("AN ERROR OCCURRED") - for coro in return_coros: - yield coro - - with patched as (m_coros, ): - coros = Coros() - m_coros.return_value = coros - if raises == BaseException: - with pytest.raises(BaseException): - async for coro in concurrent.coros: - pass - else: - async for coro in concurrent.coros: - results.append(coro) - - if raises == GeneratorExit: - assert ( - list(coros.aclose.call_args) - == [(), {}]) - return - - assert not coros.aclose.called - assert "coros" not in concurrent.__dict__ - - if raises: - return - assert results == return_coros - - -def test_aio_concurrent_running_queue(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - prefix="tools.base.aio") - - with patched as (m_asyncio, ): - assert concurrent.running_queue == m_asyncio.Queue.return_value - - assert ( - list(m_asyncio.Queue.call_args) - == [(), {}]) - assert "running_queue" in concurrent.__dict__ - - -@pytest.mark.parametrize("cpus", [None, "", 0, 4, 73]) -def test_aio_concurrent_default_limit(patches, cpus): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "min", - "os", - prefix="tools.base.aio") - - with patched as (m_min, m_os): - m_os.cpu_count.return_value = cpus - assert concurrent.default_limit == m_min.return_value - - assert ( - list(m_min.call_args) - == [(32, (cpus or 0) + 4), {}]) - assert "default_limit" not in concurrent.__dict__ - - -def test_aio_concurrent_consumes_async(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "isinstance", - prefix="tools.base.aio") - - with patched as (m_inst, ): - assert concurrent.consumes_async == m_inst.return_value - - assert ( - list(m_inst.call_args) - == [(["CORO"], (types.AsyncGeneratorType, AsyncIterator, AsyncIterable)), {}]) - assert "consumes_async" in concurrent.__dict__ - - -def test_aio_concurrent_consumes_generator(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "isinstance", - prefix="tools.base.aio") - - with patched as (m_inst, ): - assert concurrent.consumes_generator == m_inst.return_value - - assert ( - list(m_inst.call_args) - == [(["CORO"], (types.AsyncGeneratorType, types.GeneratorType)), {}]) - assert "consumes_generator" in concurrent.__dict__ - - -@pytest.mark.parametrize("limit", [None, "", 0, -1, 73]) -def test_aio_concurrent_limit(patches, limit): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.default_limit", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - concurrent._limit = limit - - with patched as (m_limit, ): - assert concurrent.limit == (limit or m_limit.return_value) - - if limit: - assert not m_limit.called - - assert "limit" in concurrent.__dict__ - - -@pytest.mark.parametrize("limit", [None, "", 0, -1, 73]) -def test_aio_concurrent_nolimit(patches, limit): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.limit", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_limit, ): - m_limit.return_value = limit - assert concurrent.nolimit == (limit == -1) - - assert "nolimit" in concurrent.__dict__ - - -def test_aio_concurrent_out(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - prefix="tools.base.aio") - - with patched as (m_asyncio, ): - assert concurrent.out == m_asyncio.Queue.return_value - - assert ( - list(m_asyncio.Queue.call_args) - == [(), {}]) - assert "out" in concurrent.__dict__ - - -@pytest.mark.parametrize("empty", [True, False]) -def test_aio_concurrent_running(patches, empty): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.running_queue", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_running_queue, ): - m_running_queue.return_value.empty.return_value = empty - assert concurrent.running == (not empty) - - assert "running" not in concurrent.__dict__ - - -def test_aio_concurrent_sem(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - ("concurrent.limit", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_asyncio, m_limit): - assert concurrent.sem == m_asyncio.Semaphore.return_value - - assert ( - list(m_asyncio.Semaphore.call_args) - == [(m_limit.return_value, ), {}]) - assert "sem" in concurrent.__dict__ - - -def test_aio_concurrent_submission_lock(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - prefix="tools.base.aio") - - with patched as (m_asyncio, ): - assert concurrent.submission_lock == m_asyncio.Lock.return_value - - assert ( - list(m_asyncio.Lock.call_args) - == [(), {}]) - assert "submission_lock" in concurrent.__dict__ - - -@pytest.mark.parametrize("locked", [True, False]) -def test_aio_concurrent_submitting(patches, locked): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.submission_lock", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_submission_lock, ): - m_submission_lock.return_value.locked.return_value = locked - assert concurrent.submitting == locked - - assert "submitting" not in concurrent.__dict__ - - -@pytest.mark.asyncio -async def test_aio_concurrent_cancel(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.cancel_tasks", dict(new_callable=AsyncMock)), - ("concurrent.close", dict(new_callable=AsyncMock)), - ("concurrent.close_coros", dict(new_callable=AsyncMock)), - ("concurrent.sem", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - waiter = MagicMock() - - class SubmitTask: - def __init__(self): - self.cancel = MagicMock() - - def __await__(self): - waiter() - yield - - concurrent.submit_task = SubmitTask() - - with patched as (m_cancel, m_close, m_coros, m_sem): - assert not await concurrent.cancel() - - assert ( - list(m_close.call_args) - == [(), {}]) - assert ( - list(m_sem.return_value.release.call_args) - == [(), {}]) - assert ( - list(m_cancel.call_args) - == [(), {}]) - assert ( - list(m_coros.call_args) - == [(), {}]) - assert ( - list(waiter.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("bad", range(0, 8)) -async def test_aio_concurrent_cancel_tasks(patches, bad): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.running_tasks", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - tasks = [] - waiter = MagicMock() - - class Task: - def __init__(self, i): - self.i = i - self.cancel = MagicMock() - - def __await__(self): - waiter() - if self.i == bad: - raise BaseException("AN ERROR OCCURRED") - - for i in range(0, 7): - tasks.append(Task(i)) - - with patched as (m_running, ): - m_running.return_value = tasks - assert not await concurrent.cancel_tasks() - - assert ( - list(list(c) for c in waiter.call_args_list) - == [[(), {}]] * 7) - for task in tasks: - assert ( - list(task.cancel.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("closed", [True, False]) -async def test_aio_concurrent_close(patches, closed): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.closed", dict(new_callable=PropertyMock)), - ("concurrent.closing_lock", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_closed, m_lock): - m_closed.return_value = closed - m_lock.return_value.acquire = AsyncMock() - assert not await concurrent.close() - - if closed: - assert not m_lock.called - else: - assert ( - list(m_lock.return_value.acquire.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("consumes_generator", [True, False]) -@pytest.mark.parametrize("bad", range(0, 8)) -async def test_aio_concurrent_close_coros(patches, consumes_generator, bad): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "concurrent.close", - ("concurrent.iter_coros", dict(new_callable=PropertyMock)), - ("concurrent.consumes_generator", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - coros = [] - for i in range(0, 7): - coro = MagicMock() - if i == bad: - coro.close.side_effect = BaseException("AN ERROR OCCURRED") - coros.append(coro) - - async def iter_coros(): - for coro in coros: - yield coro - - with patched as (m_close, m_iter, m_isgen): - m_isgen.return_value = consumes_generator - m_iter.return_value = iter_coros - assert not await concurrent.close_coros() - - if consumes_generator: - assert not m_iter.called - return - assert ( - list(m_iter.call_args) - == [(), {}]) - for coro in coros: - assert ( - list(coro.close.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -async def test_aio_concurrent_create_task(patches): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "asyncio", - "concurrent.remember_task", - ("concurrent.task", dict(new_callable=MagicMock)), - ("concurrent.running_queue", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_asyncio, m_rem, m_task, m_running_queue): - assert not await concurrent.create_task("CORO") - - assert ( - list(m_running_queue.return_value.put_nowait.call_args) - == [(None, ), {}]) - assert ( - list(m_task.call_args) - == [("CORO", ), {}]) - assert ( - list(m_asyncio.create_task.call_args) - == [(m_task.return_value, ), {}]) - assert ( - list(m_rem.call_args) - == [(m_asyncio.create_task.return_value, ), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("closed", [True, False]) -@pytest.mark.parametrize("active", [True, False]) -async def test_aio_concurrent_exit_on_completion(patches, active, closed): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.active", dict(new_callable=PropertyMock)), - ("concurrent.closed", dict(new_callable=PropertyMock)), - ("concurrent.out", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - with patched as (m_active, m_closed, m_out): - m_out.return_value.put = AsyncMock() - m_active.return_value = active - m_closed.return_value = closed - assert not await concurrent.exit_on_completion() - - if closed or active: - assert not m_out.called - return - assert ( - list(m_out.return_value.put.call_args) - == [(aio._sentinel, ), {}]) - - -@pytest.mark.parametrize("closed", [True, False]) -def test_aio_concurrent_forget_task(patches, closed): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.closed", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - concurrent._running = MagicMock() - - with patched as (m_closed, ): - m_closed.return_value = closed - assert not concurrent.forget_task("TASK") - - if closed: - assert not concurrent._running.remove.called - return - assert ( - list(concurrent._running.remove.call_args) - == [("TASK", ), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("raises", [True, False]) -@pytest.mark.parametrize("consumes_async", [True, False]) -async def test_aio_concurrent_iter_coros(patches, raises, consumes_async): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.consumes_async", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - coros = [f"CORO{i}" for i in range(0, 7)] - exception = BaseException("AN RAISES OCCURRED") - - def iter_coros(): - if raises: - raise exception - for coro in coros: - yield coro - - async def async_iter_coros(): - if raises: - raise exception - for coro in coros: - yield coro - - concurrent._coros = ( - async_iter_coros() - if consumes_async - else iter_coros()) - results = [] - - with patched as (m_async, ): - m_async.return_value = consumes_async - - async for result in concurrent.iter_coros(): - results.append(result) - - if raises: - error = results[0] - assert isinstance(error, aio.ConcurrentIteratorError) - assert error.args[0] is exception - assert results == [error] - return - assert results == coros - - -@pytest.mark.asyncio -@pytest.mark.parametrize("closed", [True, False]) -@pytest.mark.parametrize("nolimit", [True, False]) -@pytest.mark.parametrize("decrement", [None, True, False]) -async def test_aio_concurrent_on_task_complete(patches, closed, nolimit, decrement): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.exit_on_completion", dict(new_callable=AsyncMock)), - ("concurrent.closed", dict(new_callable=PropertyMock)), - ("concurrent.out", dict(new_callable=PropertyMock)), - ("concurrent.running_queue", dict(new_callable=PropertyMock)), - ("concurrent.nolimit", dict(new_callable=PropertyMock)), - ("concurrent.sem", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - kwargs = {} - if decrement is not None: - kwargs["decrement"] = decrement - - with patched as (m_complete, m_closed, m_out, m_running_queue, m_nolimit, m_sem): - m_nolimit.return_value = nolimit - m_closed.return_value = closed - m_out.return_value.put = AsyncMock() - assert not await concurrent.on_task_complete("RESULT", **kwargs) - - if closed: - assert not m_complete.called - assert not m_nolimit.called - assert not m_sem.called - assert not m_running_queue.called - assert not m_out.return_value.put.called - return - - assert ( - list(m_out.return_value.put.call_args) - == [("RESULT", ), {}]) - if nolimit: - assert not m_sem.return_value.release.called - else: - assert ( - list(m_sem.return_value.release.call_args) - == [(), {}]) - if decrement or decrement is None: - assert ( - list(m_running_queue.return_value.get_nowait.call_args) - == [(), {}]) - else: - assert not m_running_queue.return_value.get_nowait.called - assert ( - list(m_complete.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("result_count", range(0, 7)) -@pytest.mark.parametrize("error", [True, False]) -@pytest.mark.parametrize("should_error", [True, False]) -async def test_aio_concurrent_output(patches, result_count, error, should_error): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "concurrent.should_error", - ("concurrent.cancel", dict(new_callable=AsyncMock)), - ("concurrent.close", dict(new_callable=AsyncMock)), - ("concurrent.out", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - exception = Exception() - - class DummyQueue: - _running_queue = 0 - - async def get(self): - if result_count == 0: - return aio._sentinel - if result_count > self._running_queue: - self._running_queue += 1 - if error and result_count == self._running_queue: - return exception - return f"RESULT {self._running_queue}" - return aio._sentinel - - def should_error(self, result): - return error and should_error and (result_count == self._running_queue) - - q = DummyQueue() - results = [] - - with patched as (m_error, m_cancel, m_close, m_out): - m_out.return_value.get.side_effect = q.get - m_error.side_effect = q.should_error - if result_count and error and should_error: - with pytest.raises(Exception): - async for result in concurrent.output(): - results.append(result) - else: - async for result in concurrent.output(): - results.append(result) - - if result_count and error and should_error: - # last one errored - assert results == [f"RESULT {i}" for i in range(1, result_count)] - assert ( - list(list(c) for c in m_error.call_args_list) - == [[(result,), {}] for result in results] + [[(exception,), {}]]) - assert ( - list(m_cancel.call_args) - == [(), {}]) - assert not m_close.called - return - - assert ( - list(list(c) for c in m_close.call_args_list) - == [[(), {}]]) - assert not m_cancel.called - - if not result_count: - assert results == [] - return - - if error: - assert ( - results - == [f"RESULT {i}" for i in range(1, result_count)] + [exception]) - return - # all results returned correctly - assert results == [f"RESULT {i}" for i in range(1, result_count + 1)] - - -@pytest.mark.asyncio -@pytest.mark.parametrize("closed_before", [True, False]) -@pytest.mark.parametrize("closed_after", [True, False]) -@pytest.mark.parametrize("nolimit", [True, False]) -async def test_aio_concurrent_ready(patches, closed_before, closed_after, nolimit): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - ("concurrent.closed", dict(new_callable=PropertyMock)), - ("concurrent.nolimit", dict(new_callable=PropertyMock)), - ("concurrent.sem", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - - class DummyCloser: - order_mock = MagicMock() - close_calls = 0 - - async def _acquire(self): - self.order_mock("ACQUIRE") - - def _nolimit(self): - self.order_mock("NOLIMIT") - return nolimit - - def _closed(self): - self.order_mock("CLOSED") - self.close_calls += 1 - if self.close_calls == 1: - return closed_before - if self.close_calls == 2: - return closed_after - - closer = DummyCloser() - - with patched as (m_closed, m_nolimit, m_sem): - m_nolimit.side_effect = closer._nolimit - m_closed.side_effect = closer._closed - m_sem.return_value.acquire = closer._acquire - assert ( - await concurrent.ready() - == ((not closed_before and not closed_after) - if not nolimit else not closed_before)) - - if closed_before: - assert not m_nolimit.called - assert not m_sem.called - assert ( - list(list(c) for c in closer.order_mock.call_args_list) - == [[('CLOSED',), {}]]) - return - if nolimit: - assert not m_sem.called - assert ( - list(list(c) for c in closer.order_mock.call_args_list) - == [[('CLOSED',), {}], - [('NOLIMIT',), {}]]) - return - assert ( - list(list(c) for c in closer.order_mock.call_args_list) - == [[('CLOSED',), {}], - [('NOLIMIT',), {}], - [('ACQUIRE',), {}], - [('CLOSED',), {}]]) - - -def test_aio_concurrent_remember_task(): - concurrent = aio.concurrent(["CORO"]) - concurrent._running = MagicMock() - task = MagicMock() - assert not concurrent.remember_task(task) - assert ( - list(concurrent._running.append.call_args) - == [(task, ), {}]) - assert ( - list(task.add_done_callback.call_args) - == [(concurrent.forget_task, ), {}]) - - -@pytest.mark.parametrize("result", [None, "RESULT", aio.ConcurrentError, aio.ConcurrentExecutionError, aio.ConcurrentIteratorError]) -@pytest.mark.parametrize("yield_exceptions", [True, False]) -def test_aio_concurrent_should_error(result, yield_exceptions): - concurrent = aio.concurrent(["CORO"]) - concurrent.yield_exceptions = yield_exceptions - - if isinstance(result, type) and issubclass(result, BaseException): - result = result() - - assert ( - concurrent.should_error(result) - == ((isinstance(result, aio.ConcurrentIteratorError) - or isinstance(result, aio.ConcurrentError) and not yield_exceptions))) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("coros", range(0, 7)) -@pytest.mark.parametrize("unready", range(0, 8)) -@pytest.mark.parametrize("valid_raises", [None, Exception, aio.ConcurrentError]) -@pytest.mark.parametrize("iter_errors", [True, False]) -async def test_aio_concurrent_submit(patches, coros, unready, valid_raises, iter_errors): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "isinstance", - "concurrent.validate_coro", - ("concurrent.exit_on_completion", dict(new_callable=AsyncMock)), - ("concurrent.create_task", dict(new_callable=AsyncMock)), - ("concurrent.on_task_complete", dict(new_callable=AsyncMock)), - ("concurrent.ready", dict(new_callable=AsyncMock)), - ("concurrent.coros", dict(new_callable=PropertyMock)), - ("concurrent.out", dict(new_callable=PropertyMock)), - ("concurrent.submission_lock", dict(new_callable=PropertyMock)), - prefix="tools.base.aio") - m_order = MagicMock() - - class DummyReady: - counter = 0 - - def ready(self): - if self.counter >= unready: - self.counter += 1 - return False - self.counter += 1 - return True - - ready = DummyReady() - - async def acquire(): - m_order("ACQUIRE") - - def release(): - m_order("RELEASE") - - corolist = [MagicMock() for coro in range(1, coros)] - - async def iter_coros(): - for coro in corolist: - m_order(coro) - yield coro - - valid_errors = ( - (valid_raises == Exception) - and coros > 1 - and not unready == 0 - and not iter_errors) - - with patched as (m_inst, m_valid, m_exit, m_create, m_complete, m_ready, m_coros, m_out, m_lock): - m_out.return_value.put = AsyncMock() - m_inst.return_value = iter_errors - m_valid.side_effect = valid_raises - m_ready.side_effect = ready.ready - m_coros.return_value = iter_coros() - m_lock.return_value.acquire.side_effect = acquire - m_lock.return_value.release.side_effect = release - - if valid_errors: - with pytest.raises(Exception): - await concurrent.submit() - else: - assert not await concurrent.submit() - - if valid_errors: - assert not m_lock.return_value.called - assert not m_exit.called - else: - assert ( - list(m_lock.return_value.release.call_args) - == [(), {}]) - assert ( - list(m_exit.call_args) - == [(), {}]) - - if coros < 2: - assert not m_valid.called - assert not m_inst.called - assert not m_complete.called - assert not m_create.called - assert not m_ready.called - assert not m_out.return_value.put.called - return - - should_close_coro = ( - not iter_errors - and not valid_errors - and (len(corolist) > unready)) - - if should_close_coro: - assert corolist[unready].close.called - else: - assert not any(coro.close.called for coro in corolist) - - if iter_errors: - assert ( - list(list(c) for c in m_out.return_value.put.call_args_list) - == [[(corolist[0], ), {}]]) - assert ( - list(list(c) for c in m_inst.call_args_list) - == [[(corolist[0], aio.ConcurrentIteratorError), {}]]) - assert not m_ready.called - assert not m_valid.called - assert not m_complete.called - assert not m_create.called - return - - if valid_errors: - assert ( - list(list(c) for c in m_inst.call_args_list) - == [[(corolist[0], aio.ConcurrentIteratorError), {}]]) - assert ( - list(list(c) for c in m_ready.call_args_list) - == [[(), {}]]) - assert ( - list(list(c) for c in m_valid.call_args_list) - == [[(corolist[0], ), {}]]) - assert not m_complete.called - assert not m_create.called - assert ( - list(list(c) for c in m_order.call_args_list) - == ([[('ACQUIRE',), {}], - [(corolist[0],), {}]])) - return - - assert not m_out.return_value.put.called - assert ( - list(list(c) for c in m_ready.call_args_list) - == [[(), {}]] * min(coros - 1, unready + 1 or 1)) - assert ( - list(list(c) for c in m_valid.call_args_list) - == [[(corolist[i - 1], ), {}] for i in range(1, min(coros, unready + 1))]) - assert ( - list(list(c) for c in m_order.call_args_list) - == ([[('ACQUIRE',), {}]] - + [[(corolist[i - 1],), {}] for i in range(1, min(coros, unready + 2))] - + [[('RELEASE',), {}]])) - if valid_raises: - assert len(m_complete.call_args_list) == max(min(coros - 1, unready), 0) - for c in m_complete.call_args_list: - error = list(c)[0][0] - assert isinstance(error, aio.ConcurrentError) - assert ( - list(c) - == [(error,), {'decrement': False}]) - assert not m_create.called - return - assert not m_complete.called - assert ( - list(list(c) for c in m_create.call_args_list) - == [[(corolist[i - 1],), {}] for i in range(1, min(coros, unready + 1))]) - - -class OtherException(BaseException): - pass - - -@pytest.mark.asyncio -@pytest.mark.parametrize("raises", [None, Exception, OtherException]) -async def test_aio_concurrent_task(patches, raises): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "concurrent.on_task_complete", - prefix="tools.base.aio") - - if raises: - exception = raises("AN ERROR OCCURRED") - - async def coro(): - if raises: - raise exception - return 23 - - with patched as (m_complete, ): - assert not await concurrent.task(coro()) - - result = m_complete.call_args[0][0] - - if not raises: - assert result == 23 - else: - assert isinstance(result, aio.ConcurrentExecutionError) - assert result.args[0] is exception - assert ( - list(m_complete.call_args) - == [(result, ), {}]) - - -@pytest.mark.parametrize("awaitable", [True, False]) -@pytest.mark.parametrize( - "state", - [inspect.CORO_CLOSED, - inspect.CORO_CREATED, - inspect.CORO_RUNNING, - inspect.CORO_SUSPENDED]) -def test_aio_concurrent_validate_coro(patches, awaitable, state): - concurrent = aio.concurrent(["CORO"]) - patched = patches( - "inspect.getcoroutinestate", - prefix="tools.base.aio") - - # we cant patch inspect.isawaitable without fooing unittest - def unawaitable(): - pass - - async def coro(): - pass - - awaits = ( - coro() - if awaitable - else unawaitable) - - with patched as (m_state, ): - m_state.return_value = state - - if awaitable and state == inspect.CORO_CREATED: - assert not concurrent.validate_coro(awaits) - else: - with pytest.raises(aio.ConcurrentError) as e: - concurrent.validate_coro(awaits) - - if not awaitable: - assert ( - e.value.args[0] - == f'Provided input was not a coroutine: {awaits}') - assert not m_state.called - return - - awaits.close() - assert ( - list(m_state.call_args) - == [(awaits, ), {}]) - - if state != inspect.CORO_CREATED: - assert ( - e.value.args[0] - == f'Provided coroutine has already been fired: {awaits}') - - -async def aiter(items): - for item in items: - yield item - - -@pytest.mark.asyncio -@pytest.mark.parametrize("limit", list(range(0, 4)) + [-1]) -@pytest.mark.parametrize("yield_exceptions", [None, True, False]) -@pytest.mark.parametrize("iter_type", [list, tuple, set, iter, aiter]) -@pytest.mark.parametrize( - "coros", - [["HAPPY"], - ["HAPPY"] * 2 + ["SAD"] + ["HAPPY"] * 3, - ["HAPPY"] * 7, - ["HAPPY"] * 2 + ["RAISE"] + ["HAPPY"] * 3, - ["SAD"] * 2 + ["HAPPY"] * 3, - ["HAPPY"] * 2 + ["CABBAGE"] + ["HAPPY"] * 3, - ["HAPPY"] * 2 + ["FIRED"] + ["HAPPY"] * 3]) -async def test_aio_concurrent_integration(limit, yield_exceptions, iter_type, coros): - # This is an integration/black-box test that only measures inputs/outputs and the - # effect of using the utility with them on them - - # `HAPPY` - a happy coroutine ready to be fired - # `SAD` - a sad coroutine that will raise a `SadError` when fired - # `FIRED` - a coroutine that has already been fired - # `RAISE` - raise an error in the iterator - # `CABBAGE` - leafy vegetable of the brassica family - - tasks_at_the_beginning = len(asyncio.all_tasks()) - - kwargs = {} - - if yield_exceptions is not None: - kwargs["yield_exceptions"] = yield_exceptions - - if limit: - kwargs["limit"] = limit - - class SadError(Exception): - pass - - class LoopError(Exception): - pass - - async def happy(): - # this makes happy return after sad (ie errors) and tests the ordering of responses - # and the handling of pending tasks when errors occur - await asyncio.sleep(.01) - return "HAPPY" - - fired = happy() - await fired - - async def sad(): - raise SadError - - def coro_gen(): - for coro in coros: - if coro == "RAISE": - raise LoopError() - if coro == "HAPPY": - yield happy() - elif coro == "SAD": - yield sad() - elif coro == "FIRED": - yield fired - else: - yield coro - - all_good = all(coro == "HAPPY" for coro in coros) - iter_raises = any(coro == "RAISE" for coro in coros) - - if iter_raises: - # we can only test the generator types for errors - # during iteration - ie if `list`, `tuple` etc contain - # errors, they would raise now. - if not iter_type in [iter, aiter]: - return - generated_coros = coro_gen() - else: - generated_coros = list(coro_gen()) - expected_err_index = next((i for i, x in enumerate(coros) if x != 'HAPPY'), None) - - results = [] - concurrent = aio.concurrent(iter_type(generated_coros), **kwargs) - - if (not all_good and not yield_exceptions) or iter_raises: - if iter_raises: - with pytest.raises(aio.ConcurrentIteratorError) as e: - async for result in concurrent: - results.append(result) - assert isinstance(e.value.args[0], LoopError) - return - else: - coro_fail = ( - any(not inspect.isawaitable(coro) for coro in generated_coros) - or any(coro == "FIRED" for coro in coros)) - if coro_fail: - with pytest.raises(aio.ConcurrentError): - async for result in concurrent: - results.append(result) - else: - with pytest.raises(aio.ConcurrentExecutionError): - async for result in concurrent: - results.append(result) - - # for iterators there is no way of knowing that more awaitables were - # on the way when failure happened, so these need to be closed here - if iter_type in (iter, aiter): - for coro in generated_coros[expected_err_index:]: - if not isinstance(coro, str): - coro.close() - - if limit < 1 and iter_type != set: - # as all jobs are submitted concurrently (the default is higher than - # tne number of test jobs, and -1 forces no limit) and as sad is - # faster than happy, we get no results - assert results == [] - elif iter_type != set: - # because the ordering on sets is indeterminate the results are unpredictable - # therefore the easiest thing is to just exclude them from this test - assert results == coros[:expected_err_index - (expected_err_index % limit)] - - # this can probs be removed, i think it was caused by unhandled GeneratorExit - await asyncio.sleep(.001) - gc.collect() - assert len(asyncio.all_tasks()) == tasks_at_the_beginning - return - - async for result in concurrent: - results.append(result) - - assert len(asyncio.all_tasks()) == tasks_at_the_beginning - - def mangled_results(): - # replace the errors with the test strings - for result in results: - if isinstance(result, aio.ConcurrentExecutionError): - yield "SAD" - elif isinstance(result, aio.ConcurrentError): - if "CABBAGE" in result.args[0]: - yield "CABBAGE" - else: - yield "FIRED" - else: - yield result - - if expected_err_index: - err_index = ( - expected_err_index - if limit == 0 - else expected_err_index - (expected_err_index % limit)) - - if expected_err_index and err_index >= limit and limit not in [0, -1]: - # the error is at the beginning of whichever batch its in - expected = ["HAPPY"] * 6 - expected[err_index] = coros[err_index] - else: - # the error is in the first batch so its at the beginning - expected = [x for x in list(coros) if x != "HAPPY"] + [x for x in list(coros) if x == "HAPPY"] - - if iter_type == set: - assert set(expected) == set(mangled_results()) - else: - assert expected == list(mangled_results()) diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py deleted file mode 100644 index e3c7d3c155e8c..0000000000000 --- a/tools/base/tests/test_checker.py +++ /dev/null @@ -1,1017 +0,0 @@ -import logging -from unittest.mock import MagicMock, patch, PropertyMock - -import pytest - -from tools.base.checker import ( - AsyncChecker, BaseChecker, BazelChecker, Checker, CheckerSummary) -from tools.base.runner import BazelRunner - - -class DummyChecker(Checker): - - def __init__(self): - self.args = PropertyMock() - - -class DummyBazelChecker(BazelChecker): - - def __init__(self): - self.args = PropertyMock() - - -class DummyCheckerWithChecks(Checker): - checks = ("check1", "check2") - - def __init__(self, *args): - self.check1 = MagicMock() - self.check2 = MagicMock() - - def check_check1(self): - self.check1() - - def check_check2(self): - self.check2() - - -def test_checker_constructor(): - super_mock = patch("tools.base.checker.runner.Runner.__init__") - - with super_mock as m_super: - checker = Checker("path1", "path2", "path3") - - assert ( - list(m_super.call_args) - == [('path1', 'path2', 'path3'), {}]) - assert checker.summary_class == CheckerSummary - - assert checker.active_check == "" - assert "active_check" not in checker.__dict__ - - -def test_checker_diff(): - checker = Checker("path1", "path2", "path3") - args_mock = patch( - "tools.base.checker.Checker.args", - new_callable=PropertyMock) - - with args_mock as m_args: - assert checker.diff == m_args.return_value.diff - assert "diff" not in checker.__dict__ - - -@pytest.mark.parametrize( - "errors", - [{}, dict(exiting="EEK"), dict(notexiting="OK")]) -def test_checker_exiting(errors): - checker = Checker("path1", "path2", "path3") - checker.errors = errors - assert checker.exiting == bool("exiting" in errors) - assert "exiting" not in checker.__dict__ - - -def test_checker_error_count(): - checker = Checker("path1", "path2", "path3") - checker.errors = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) - assert checker.error_count == 15 - assert "error_count" not in checker.__dict__ - - -def test_checker_failed(): - checker = Checker("path1", "path2", "path3") - checker.errors = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) - assert checker.failed == {'foo': 3, 'bar': 5, 'baz': 7} - assert "failed" not in checker.__dict__ - - -def test_checker_fix(): - checker = Checker("path1", "path2", "path3") - args_mock = patch( - "tools.base.checker.Checker.args", - new_callable=PropertyMock) - - with args_mock as m_args: - assert checker.fix == m_args.return_value.fix - assert "fix" not in checker.__dict__ - - -@pytest.mark.parametrize("failed", [True, False]) -@pytest.mark.parametrize("warned", [True, False]) -def test_checker_has_failed(patches, failed, warned): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.failed", dict(new_callable=PropertyMock)), - ("Checker.warned", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_failed, m_warned): - m_failed.return_value = failed - m_warned.return_value = warned - result = checker.has_failed - - if failed or warned: - assert result is True - else: - assert result is False - assert "has_failed" not in checker.__dict__ - - -@pytest.mark.parametrize("path", [None, "PATH"]) -@pytest.mark.parametrize("paths", [[], ["PATH0"]]) -@pytest.mark.parametrize("isdir", [True, False]) -def test_checker_path(patches, path, paths, isdir): - class DummyError(Exception): - pass - checker = Checker("path1", "path2", "path3") - patched = patches( - "pathlib", - ("Checker.args", dict(new_callable=PropertyMock)), - ("Checker.parser", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_plib, m_args, m_parser): - m_parser.return_value.error = DummyError - m_args.return_value.path = path - m_args.return_value.paths = paths - m_plib.Path.return_value.is_dir.return_value = isdir - if not path and not paths: - with pytest.raises(DummyError) as e: - checker.path - assert ( - e.value.args - == ('Missing path: `path` must be set either as an arg or with --path',)) - elif not isdir: - with pytest.raises(DummyError) as e: - checker.path - assert ( - e.value.args - == ('Incorrect path: `path` must be a directory, set either as first arg or with --path',)) - else: - assert checker.path == m_plib.Path.return_value - assert ( - list(m_plib.Path.call_args) - == [(path or paths[0],), {}]) - assert "path" in checker.__dict__ - if path or paths: - assert ( - list(m_plib.Path.return_value.is_dir.call_args) - == [(), {}]) - - -@pytest.mark.parametrize("paths", [[], ["path1", "path2"]]) -def test_checker_paths(patches, paths): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.args", dict(new_callable=PropertyMock)), - ("Checker.path", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_args, m_path): - m_args.return_value.paths = paths - result = checker.paths - - if paths: - assert result == paths - else: - assert result == [m_path.return_value] - assert "paths" not in checker.__dict__ - - -@pytest.mark.parametrize("summary", [True, False]) -@pytest.mark.parametrize("error_count", [0, 1]) -@pytest.mark.parametrize("warning_count", [0, 1]) -@pytest.mark.parametrize("exiting", [True, False]) -def test_checker_show_summary(patches, summary, error_count, warning_count, exiting): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.args", dict(new_callable=PropertyMock)), - ("Checker.exiting", dict(new_callable=PropertyMock)), - ("Checker.error_count", dict(new_callable=PropertyMock)), - ("Checker.warning_count", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_args, m_exit, m_errors, m_warnings): - m_args.return_value.summary = summary - m_errors.return_value = error_count - m_warnings.return_value = warning_count - m_exit.return_value = exiting - result = checker.show_summary - - if exiting: - assert result is False - elif summary or error_count or warning_count: - assert result is True - else: - assert result is False - assert "show_summary" not in checker.__dict__ - - -def test_checker_status(patches): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.success_count", dict(new_callable=PropertyMock)), - ("Checker.error_count", dict(new_callable=PropertyMock)), - ("Checker.warning_count", dict(new_callable=PropertyMock)), - ("Checker.failed", dict(new_callable=PropertyMock)), - ("Checker.warned", dict(new_callable=PropertyMock)), - ("Checker.succeeded", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as args: - (m_success_count, m_error_count, m_warning_count, - m_failed, m_warned, m_succeeded) = args - assert ( - checker.status - == dict( - success=m_success_count.return_value, - errors=m_error_count.return_value, - warnings=m_warning_count.return_value, - failed=m_failed.return_value, - warned=m_warned.return_value, - succeeded=m_succeeded.return_value)) - assert "status" not in checker.__dict__ - - -def test_checker_succeeded(): - checker = Checker("path1", "path2", "path3") - checker.success = dict( - foo=["check"] * 3, - bar=["check"] * 5, - baz=["check"] * 7) - assert ( - checker.succeeded - == dict(foo=3, bar=5, baz=7)) - assert "succeeded" not in checker.__dict__ - - -def test_checker_success_count(): - checker = Checker("path1", "path2", "path3") - checker.success = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) - assert checker.success_count == 15 - assert "success_count" not in checker.__dict__ - - -def test_checker_summary(): - checker = Checker("path1", "path2", "path3") - summary_mock = patch( - "tools.base.checker.Checker.summary_class", - new_callable=PropertyMock) - - with summary_mock as m_summary: - assert checker.summary == m_summary.return_value.return_value - - assert ( - list(m_summary.return_value.call_args) - == [(checker,), {}]) - assert "summary" in checker.__dict__ - - -def test_checker_warned(): - checker = Checker("path1", "path2", "path3") - checker.warnings = dict( - foo=["check"] * 3, - bar=["check"] * 5, - baz=["check"] * 7) - assert ( - checker.warned - == dict(foo=3, bar=5, baz=7)) - assert "warned" not in checker.__dict__ - - -def test_checker_warning_count(): - checker = Checker("path1", "path2", "path3") - checker.warnings = dict(foo=["warn"] * 3, bar=["warn"] * 5, baz=["warn"] * 7) - assert checker.warning_count == 15 - assert "warning_count" not in checker.__dict__ - - -def test_checker_add_arguments(patches): - checker = DummyCheckerWithChecks("path1", "path2", "path3") - parser = MagicMock() - patched = patches( - "runner.Runner.add_arguments", - prefix="tools.base.checker") - - with patched as (m_super, ): - assert checker.add_arguments(parser) is None - - assert ( - list(m_super.call_args) - == [(parser,), {}]) - - assert ( - list(list(c) for c in parser.add_argument.call_args_list) - == [[('--fix',), - {'action': 'store_true', - 'default': False, - 'help': 'Attempt to fix in place'}], - [('--diff',), - {'action': 'store_true', - 'default': False, - 'help': 'Display a diff in the console where available'}], - [('--warning', '-w'), - {'choices': ['warn', 'error'], - 'default': 'warn', - 'help': 'Handle warnings as warnings or errors'}], - [('--summary',), - {'action': 'store_true', - 'default': False, - 'help': 'Show a summary of check runs'}], - [('--summary-errors',), - {'type': int, - 'default': 5, - 'help': 'Number of errors to show in the summary, -1 shows all'}], - [('--summary-warnings',), - {'type': int, - 'default': 5, - 'help': 'Number of warnings to show in the summary, -1 shows all'}], - [('--check', '-c'), - {'choices': ("check1", "check2"), - 'nargs': '*', - 'help': 'Specify which checks to run, can be specified for multiple checks'}], - [('--config-check1',), - {'default': '', - 'help': 'Custom configuration for the check1 check'}], - [('--config-check2',), - {'default': '', - 'help': 'Custom configuration for the check2 check'}], - [('--path', '-p'), - {'default': None, - 'help': 'Path to the test root (usually Envoy source dir). If not specified the first path of paths is used'}], - [('paths',), - {'nargs': '*', - 'help': 'Paths to check. At least one path must be specified, or the `path` argument should be provided'}]]) - - -TEST_ERRORS: tuple = ( - {}, - dict(myerror=[]), - dict(myerror=["a", "b", "c"]), - dict(othererror=["other1", "other2", "other3"]), - dict(othererror=["other1", "other2", "other3"], myerror=["a", "b", "c"])) - - -@pytest.mark.parametrize("log", [True, False]) -@pytest.mark.parametrize("log_type", [None, "fatal"]) -@pytest.mark.parametrize("errors", TEST_ERRORS) -@pytest.mark.parametrize("newerrors", [[], ["err1", "err2", "err3"]]) -def test_checker_error(log, log_type, errors, newerrors): - checker = Checker("path1", "path2", "path3") - log_mock = patch( - "tools.base.checker.Checker.log", - new_callable=PropertyMock) - checker.errors = errors.copy() - result = 1 if newerrors else 0 - - with log_mock as m_log: - if log_type: - assert checker.error("mycheck", newerrors, log, log_type=log_type) == result - else: - assert checker.error("mycheck", newerrors, log) == result - - if not newerrors: - assert not m_log.called - assert "mycheck" not in checker.errors - return - - assert checker.errors["mycheck"] == errors.get("mycheck", []) + newerrors - for k, v in errors.items(): - if k != "mycheck": - assert checker.errors[k] == v - if log: - assert ( - list(list(c) for c in getattr(m_log.return_value, log_type or "error").call_args_list) - == [[(f'[mycheck] err{i}',), {}] for i in range(1, 4)]) - else: - assert not getattr(m_log.return_value, log_type or "error").called - - -def test_checker_exit(patches): - checker = Checker("path1", "path2", "path3") - patched = patches( - "Checker.error", - ("Checker.log", dict(new_callable=PropertyMock)), - ("Checker.stdout", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_error, m_log, m_stdout): - assert checker.exit() == m_error.return_value - - assert ( - list(m_log.return_value.handlers.__getitem__.call_args) - == [(0,), {}]) - assert ( - list(m_log.return_value.handlers.__getitem__.return_value.setLevel.call_args) - == [(logging.FATAL,), {}]) - assert ( - list(m_stdout.return_value.handlers.__getitem__.call_args) - == [(0,), {}]) - assert ( - list(m_stdout.return_value.handlers.__getitem__.return_value.setLevel.call_args) - == [(logging.FATAL,), {}]) - assert ( - list(m_error.call_args) - == [('exiting', ['Keyboard exit']), {'log_type': 'fatal'}]) - - -TEST_CHECKS: tuple = ( - None, - (), - ("check1", ), - ("check1", "check2", "check3"), - ("check3", "check4", "check5"), - ("check4", "check5")) - - -@pytest.mark.parametrize("checks", TEST_CHECKS) -def test_checker_get_checks(checks): - checker = Checker("path1", "path2", "path3") - checker.checks = ("check1", "check2", "check3") - args_mock = patch( - "tools.base.checker.Checker.args", - new_callable=PropertyMock) - - with args_mock as m_args: - m_args.return_value.check = checks - if checks: - assert ( - checker.get_checks() - == [check for check in checker.checks if check in checks or []]) - else: - assert checker.get_checks() == checker.checks - - -def test_checker_on_check_begin(patches): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.log", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_log, ): - assert not checker.on_check_begin("checkname") - - assert checker.active_check == "checkname" - assert ( - list(m_log.return_value.notice.call_args) - == [('[checkname] Running check',), {}]) - - -@pytest.mark.parametrize("errors", [[], ["CHECK1", "CHECK2", "CHECK3"], ["CHECK2", "CHECK3"]]) -@pytest.mark.parametrize("warnings", [[], ["CHECK1", "CHECK2", "CHECK3"], ["CHECK2", "CHECK3"]]) -@pytest.mark.parametrize("exiting", [True, False]) -def test_checker_on_check_run(patches, errors, warnings, exiting): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.exiting", dict(new_callable=PropertyMock)), - ("Checker.log", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - check = "CHECK1" - checker.errors = errors - checker.warnings = warnings - checker._active_check = check - - with patched as (m_exit, m_log): - m_exit.return_value = exiting - assert not checker.on_check_run(check) - - assert checker.active_check == "" - - if exiting: - assert not m_log.called - return - - if check in errors: - assert ( - list(m_log.return_value.error.call_args) - == [('[CHECK1] Check failed',), {}]) - assert not m_log.return_value.warning.called - assert not m_log.return_value.success.called - return - - if check in warnings: - assert ( - list(m_log.return_value.warning.call_args) - == [('[CHECK1] Check has warnings',), {}]) - assert not m_log.return_value.error.called - assert not m_log.return_value.info.called - return - - assert ( - list(m_log.return_value.success.call_args) - == [(f'[{check}] Check completed successfully',), {}]) - assert not m_log.return_value.warning.called - assert not m_log.return_value.error.called - - -def test_checker_on_checks_begin(): - checker = Checker("path1", "path2", "path3") - assert checker.on_checks_begin() is None - - -@pytest.mark.parametrize("failed", [True, False]) -@pytest.mark.parametrize("show_summary", [True, False]) -def test_checker_on_checks_complete(patches, failed, show_summary): - checker = Checker("path1", "path2", "path3") - patched = patches( - ("Checker.has_failed", dict(new_callable=PropertyMock)), - ("Checker.show_summary", dict(new_callable=PropertyMock)), - ("Checker.summary", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_failed, m_show_summary, m_summary): - m_failed.return_value = failed - m_show_summary.return_value = show_summary - assert checker.on_checks_complete() is (1 if failed else 0) - - if show_summary: - assert ( - list(m_summary.return_value.print_summary.call_args) - == [(), {}]) - else: - assert not m_summary.return_value.print_summary.called - - -@pytest.mark.parametrize("raises", [None, KeyboardInterrupt, Exception]) -def test_checker_run(patches, raises): - checker = DummyCheckerWithChecks("path1", "path2", "path3") - patched = patches( - "Checker.exit", - "Checker.get_checks", - "Checker.on_check_begin", - "Checker.on_check_run", - "Checker.on_checks_begin", - "Checker.on_checks_complete", - ("Checker.log", dict(new_callable=PropertyMock)), - ("Checker.name", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_exit, m_get, m_check, m_run, m_begin, m_complete, m_log, m_name): - m_get.return_value = ("check1", "check2") - - if raises: - m_begin.side_effect = raises() - - if raises == KeyboardInterrupt: - result = checker.run() - - else: - with pytest.raises(raises): - checker.run() - else: - assert checker.run() == m_complete.return_value - - assert ( - list(m_begin.call_args) - == [(), {}]) - assert ( - list(m_complete.call_args) - == [(), {}]) - - if raises == KeyboardInterrupt: - assert ( - list(m_exit.call_args) - == [(), {}]) - return - - assert not m_exit.called - - if raises: - return - - assert ( - list(m_get.call_args) - == [(), {}]) - assert ( - list(list(c) for c in m_check.call_args_list) - == [[(f'check{i}',), {}] for i in range(1, 3)]) - assert ( - list(list(c) for c in m_run.call_args_list) - == [[(f'check{i}',), {}] for i in range(1, 3)]) - assert ( - list(checker.check1.call_args) - == [(), {}]) - assert ( - list(checker.check2.call_args) - == [(), {}]) - - -TEST_WARNS: tuple = ( - {}, - dict(mywarn=[]), - dict(mywarn=["a", "b", "c"]), - dict(otherwarn=["other1", "other2", "other3"]), - dict(otherwarn=["other1", "other2", "other3"], mywarn=["a", "b", "c"])) - - -@pytest.mark.parametrize("log", [True, False]) -@pytest.mark.parametrize("warns", TEST_WARNS) -def test_checker_warn(patches, log, warns): - checker = Checker("path1", "path2", "path3") - log_mock = patch( - "tools.base.checker.Checker.log", - new_callable=PropertyMock) - checker.warnings = warns.copy() - - with log_mock as m_log: - checker.warn("mycheck", ["warn1", "warn2", "warn3"], log) - - assert checker.warnings["mycheck"] == warns.get("mycheck", []) + ["warn1", "warn2", "warn3"] - for k, v in warns.items(): - if k != "mycheck": - assert checker.warnings[k] == v - if log: - assert ( - list(list(c) for c in m_log.return_value.warning.call_args_list) - == [[(f'[mycheck] warn{i}',), {}] for i in range(1, 4)]) - else: - assert not m_log.return_value.warn.called - - -TEST_SUCCESS: tuple = ( - {}, - dict(mysuccess=[]), - dict(mysuccess=["a", "b", "c"]), - dict(othersuccess=["other1", "other2", "other3"]), - dict(othersuccess=["other1", "other2", "other3"], mysuccess=["a", "b", "c"])) - - -@pytest.mark.parametrize("log", [True, False]) -@pytest.mark.parametrize("success", TEST_SUCCESS) -def test_checker_succeed(patches, log, success): - checker = Checker("path1", "path2", "path3") - log_mock = patch( - "tools.base.checker.Checker.log", - new_callable=PropertyMock) - checker.success = success.copy() - - with log_mock as m_log: - checker.succeed("mycheck", ["success1", "success2", "success3"], log) - - assert checker.success["mycheck"] == success.get("mycheck", []) + ["success1", "success2", "success3"] - for k, v in success.items(): - if k != "mycheck": - assert checker.success[k] == v - if log: - assert ( - list(list(c) for c in m_log.return_value.success.call_args_list) - == [[(f'[mycheck] success{i}',), {}] for i in range(1, 4)]) - else: - assert not m_log.return_value.success.called - - -# CheckerSummary tests - -def test_checker_summary_constructor(): - checker = DummyChecker() - summary = CheckerSummary(checker) - assert summary.checker == checker - - -@pytest.mark.parametrize("max_errors", [-1, 0, 1, 23]) -def test_checker_summary_max_errors(max_errors): - checker = DummyChecker() - summary = CheckerSummary(checker) - checker.args.summary_errors = max_errors - assert summary.max_errors == max_errors - - -@pytest.mark.parametrize("max_warnings", [-1, 0, 1, 23]) -def test_checker_summary_max_warnings(max_warnings): - checker = DummyChecker() - summary = CheckerSummary(checker) - checker.args.summary_warnings = max_warnings - assert summary.max_warnings == max_warnings - - -def test_checker_summary_print_summary(patches): - checker = DummyChecker() - summary = CheckerSummary(checker) - patched = patches( - "CheckerSummary.print_failed", - "CheckerSummary.print_status", - prefix="tools.base.checker") - - with patched as (m_failed, m_status): - summary.print_summary() - assert ( - list(list(c) for c in m_failed.call_args_list) - == [[('warnings',), {}], [('errors',), {}]]) - assert m_status.called - - -TEST_SECTIONS: tuple = ( - ("MSG1", ["a", "b", "c"]), - ("MSG2", []), - ("MSG3", None)) - - -@pytest.mark.parametrize("section", TEST_SECTIONS) -def test_checker_summary_section(section): - checker = DummyChecker() - summary = CheckerSummary(checker) - message, lines = section - expected = [ - "Summary", - "-" * 80, - f"{message}"] - if lines: - expected += lines - assert summary._section(message, lines) == expected - - -@pytest.mark.parametrize("errors", (True, False)) -@pytest.mark.parametrize("warnings", (True, False)) -def test_checker_summary_print_status(patches, errors, warnings): - checker = DummyChecker() - summary = CheckerSummary(checker) - summary.checker = MagicMock() - summary.checker.errors = errors - summary.checker.warnings = warnings - - assert not summary.print_status() - - if errors: - assert ( - list(summary.checker.log.error.call_args) - == [(f"{summary.checker.status}",), {}]) - assert not summary.checker.log.warning.called - assert not summary.checker.log.info.called - return - - if warnings: - assert ( - list(summary.checker.log.warning.call_args) - == [(f"{summary.checker.status}",), {}]) - assert not summary.checker.log.error.called - assert not summary.checker.log.info.called - return - - assert ( - list(summary.checker.log.info.call_args) - == [(f"{summary.checker.status}",), {}]) - assert not summary.checker.log.error.called - assert not summary.checker.log.warning.called - - -@pytest.mark.parametrize("problem_type", ("errors", "warnings")) -@pytest.mark.parametrize("max_display", (-1, 0, 1, 23)) -@pytest.mark.parametrize("problems", ({}, dict(foo=["problem1"]), dict(foo=["problem1", "problem2"], bar=["problem3", "problem4"]))) -def test_checker_summary_print_failed(patches, problem_type, max_display, problems): - checker = DummyChecker() - summary = CheckerSummary(checker) - patched = patches( - "CheckerSummary._section", - (f"CheckerSummary.max_{problem_type}", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_section, m_max): - summary.checker = MagicMock() - setattr(summary.checker, f"{problem_type}", problems) - m_max.return_value = max_display - m_section.return_value = ["A", "B", "C"] - summary.print_failed(problem_type) - - if not problems: - assert not summary.checker.log.error.called - assert not m_section.called - return - - output = ( - summary.checker.log.warning if problem_type == "warnings" else summary.checker.log.error) - - assert ( - list(output.call_args) - == [("".join(['A\nB\nC\n'] * len(problems)),), {}]) - - if max_display == 0: - expected = [ - [(f"{summary.checker.name} {prob}", []), {}] - for prob in problems] - else: - def _problems(prob): - return ( - problems[prob][:max_display] - if max_display > 0 - else problems[prob]) - def _extra(prob): - return ( - f": (showing first {max_display} of {len(problems)})" - if len(problems[prob]) > max_display and max_display >= 0 - else (":" - if max_display != 0 - else "")) - expected = [ - [(f"{summary.checker.name} {prob}{_extra(prob)}", _problems(prob)), {}] - for prob in problems] - assert ( - list(list(c) for c in m_section.call_args_list) - == expected) - - -# BazelChecker test - -def test_bazelchecker_constructor(): - checker = DummyBazelChecker() - assert isinstance(checker, BazelRunner) - assert isinstance(checker, Checker) - - -# AsyncChecker tests - -def test_asynchecker_constructor(): - checker = AsyncChecker() - assert isinstance(checker, BaseChecker) - - -@pytest.mark.parametrize("raises", [None, KeyboardInterrupt, Exception]) -def test_asynchecker_run(patches, raises): - checker = AsyncChecker() - - patched = patches( - "asyncio", - "BaseChecker.exit", - ("AsyncChecker._run", dict(new_callable=MagicMock)), - ("AsyncChecker.on_checks_complete", dict(new_callable=MagicMock)), - prefix="tools.base.checker") - - with patched as (m_async, m_exit, m_run, m_complete): - if raises: - m_run.side_effect = raises - - if raises == KeyboardInterrupt: - result = checker.run() - else: - with pytest.raises(raises): - checker.run() - return - else: - assert ( - checker.run() - == m_async.get_event_loop.return_value.run_until_complete.return_value) - - if raises == KeyboardInterrupt: - assert ( - list(m_exit.call_args) - == [(), {}]) - assert ( - list(m_async.get_event_loop.call_args_list[1]) - == [(), {}]) - assert ( - list(m_async.get_event_loop.return_value.run_until_complete.call_args) - == [(m_complete.return_value,), {}]) - assert ( - list(m_complete.call_args) - == [(), {}]) - assert result == m_async.get_event_loop.return_value.run_until_complete.return_value - return - - assert not m_exit.called - assert ( - list(m_async.get_event_loop.call_args) - == [(), {}]) - assert ( - list(m_async.get_event_loop.return_value.run_until_complete.call_args) - == [(m_run.return_value,), {}]) - assert ( - list(m_run.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -async def test_asynchecker_on_check_begin(patches): - checker = AsyncChecker() - patched = patches( - "BaseChecker.on_check_begin", - prefix="tools.base.checker") - - with patched as (m_super, ): - assert not await checker.on_check_begin("CHECKNAME") - - assert ( - list(m_super.call_args) - == [('CHECKNAME',), {}]) - - -@pytest.mark.asyncio -async def test_asynchecker_on_check_run(patches): - checker = AsyncChecker() - patched = patches( - "BaseChecker.on_check_run", - prefix="tools.base.checker") - - with patched as (m_super, ): - assert not await checker.on_check_run("CHECKNAME") - - assert ( - list(m_super.call_args) - == [('CHECKNAME',), {}]) - - -@pytest.mark.asyncio -async def test_asynchecker_on_checks_begin(patches): - checker = AsyncChecker() - patched = patches( - "BaseChecker.on_checks_begin", - prefix="tools.base.checker") - - with patched as (m_super, ): - assert not await checker.on_checks_begin() - - assert ( - list(m_super.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -async def test_asynchecker_on_checks_complete(patches): - checker = AsyncChecker() - - patched = patches( - "BaseChecker.on_checks_complete", - prefix="tools.base.checker") - - with patched as (m_complete, ): - assert ( - await checker.on_checks_complete() - == m_complete.return_value) - - assert ( - list(m_complete.call_args) - == [(), {}]) - - -@pytest.mark.asyncio -@pytest.mark.parametrize("raises", [True, False]) -@pytest.mark.parametrize("exiting", [True, False]) -async def test_asynchecker__run(patches, raises, exiting): - _check1 = MagicMock() - _check2 = MagicMock() - _check3 = MagicMock() - - class AsyncCheckerWithChecks(AsyncChecker): - - async def check_check1(self): - return _check1() - - async def check_check2(self): - return _check2() - - async def check_check3(self): - return _check3() - - class SomeError(Exception): - pass - - checker = AsyncCheckerWithChecks() - - patched = patches( - "BaseChecker.log", - "BaseChecker.get_checks", - "AsyncChecker.on_checks_begin", - "AsyncChecker.on_check_begin", - "AsyncChecker.on_check_run", - "AsyncChecker.on_checks_complete", - ("AsyncChecker.exiting", dict(new_callable=PropertyMock)), - prefix="tools.base.checker") - - with patched as (m_log, m_checks, m_begin, m_check, m_run, m_complete, m_exit): - m_checks.return_value = ["check1", "check2", "check3"] - m_exit.return_value = exiting - if raises: - m_begin.side_effect = SomeError("AN ERROR OCCURRED") - - with pytest.raises(SomeError): - await checker._run() - elif exiting: - assert await checker._run() == 1 - else: - assert await checker._run() == m_complete.return_value - - assert ( - list(m_begin.call_args) - == [(), {}]) - - if exiting: - return - - assert ( - list(m_complete.call_args) - == [(), {}]) - - if raises: - return - - assert ( - list(m_checks.call_args) - == [(), {}]) - assert ( - list(list(c) for c in m_check.call_args_list) - == [[(f'check{i}',), {}] for i in range(1, 4)]) - for check in [_check1, _check2, _check3]: - assert ( - list(check.call_args) - == [(), {}]) - assert ( - list(list(c) for c in m_run.call_args_list) - == [[('check1',), {}], [('check2',), {}], [('check3',), {}]]) diff --git a/tools/base/tests/test_runner.py b/tools/base/tests/test_runner.py deleted file mode 100644 index 4b88cda46c080..0000000000000 --- a/tools/base/tests/test_runner.py +++ /dev/null @@ -1,710 +0,0 @@ -import importlib -import logging -import sys -from unittest.mock import AsyncMock, MagicMock, patch, PropertyMock - -import pytest - -from tools.base import runner - - -# this is necessary to fix coverage as these libs are imported before pytest -# is invoked -importlib.reload(runner) - - -class DummyRunner(runner.BaseRunner): - - def __init__(self): - self.args = PropertyMock() - - -class DummyForkingRunner(runner.ForkingRunner): - - def __init__(self): - self.args = PropertyMock() - - -class OneError(Exception): - - def __str__(self): - return "" - - pass - - -class TwoError(Exception): - pass - - -def _failing_runner(errors): - - class DummyFailingRunner: - # this dummy runner calls the _runner mock - # when its run/run_async methods are called - # and optionally raises some type of error - # to ensure they are caught as expected - - log = PropertyMock() - _runner = MagicMock() - - def __init__(self, raises=None): - self.raises = raises - - @runner.catches(errors) - def run(self, *args, **kwargs): - result = self._runner(*args, **kwargs) - if self.raises: - raise self.raises("AN ERROR OCCURRED") - return result - - @runner.catches(errors) - async def run_async(self, *args, **kwargs): - result = self._runner(*args, **kwargs) - if self.raises: - raise self.raises("AN ERROR OCCURRED") - return result - - return DummyFailingRunner - - -@pytest.mark.asyncio -@pytest.mark.parametrize("async_fun", [True, False]) -@pytest.mark.parametrize( - "errors", - [OneError, (OneError, TwoError)]) -@pytest.mark.parametrize( - "raises", - [None, OneError, TwoError]) -@pytest.mark.parametrize( - "args", - [(), ("ARG1", "ARG2")]) -@pytest.mark.parametrize( - "kwargs", - [{}, dict(key1="VAL1", key2="VAL2")]) -async def test_catches(errors, async_fun, raises, args, kwargs): - run = _failing_runner(errors)(raises) - should_fail = ( - raises - and not ( - raises == errors - or (isinstance(errors, tuple) - and raises in errors))) - - assert run.run.__wrapped__.__catches__ == errors - assert run.run_async.__wrapped__.__catches__ == errors - - if should_fail: - result = 1 - with pytest.raises(raises): - run.run(*args, **kwargs) if not async_fun else await run.run_async(*args, **kwargs) - else: - result = run.run(*args, **kwargs) if not async_fun else await run.run_async(*args, **kwargs) - - assert ( - list(run._runner.call_args) - == [args, kwargs]) - - if not should_fail and raises: - assert result == 1 - error = run.log.error.call_args[0][0] - _error = raises("AN ERROR OCCURRED") - assert ( - error - == (str(_error) or repr(_error))) - assert ( - list(run.log.error.call_args) - == [(error,), {}]) - else: - assert not run.log.error.called - - if raises: - assert result == 1 - else: - assert result == run._runner.return_value - - -def _cleanup_runner(async_fun, raises): - - class DummyCleanupRunner: - # this dummy runner calls the _runner mock - # when its run/async_fun methods are called - # and optionally raises some type of error - # to ensure they are caught as expected - - log = PropertyMock() - _runner = MagicMock() - - @runner.cleansup - def run(self, *args, **kwargs): - result = self._runner(*args, **kwargs) - if raises: - raise Exception("AN ERROR OCCURRED") - return result - - @runner.cleansup - async def run_async(self, *args, **kwargs): - result = self._runner(*args, **kwargs) - if raises: - raise Exception("AN ERROR OCCURRED") - return result - - return DummyCleanupRunner() - - -@pytest.mark.asyncio -@pytest.mark.parametrize("async_fun", [True, False]) -@pytest.mark.parametrize("raises", [True, False]) -async def test_cleansup(async_fun, raises): - run = _cleanup_runner(async_fun, raises) - args = [f"ARG{i}" for i in range(0, 3)] - kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)} - - assert run.run.__wrapped__.__cleansup__ is True - assert run.run_async.__wrapped__.__cleansup__ is True - - if async_fun: - run.cleanup = AsyncMock() - if raises: - with pytest.raises(Exception): - await run.run_async(*args, **kwargs) - else: - assert ( - await run.run_async(*args, **kwargs) - == run._runner.return_value) - else: - run.cleanup = MagicMock() - if raises: - with pytest.raises(Exception): - run.run(*args, **kwargs) - else: - assert ( - run.run(*args, **kwargs) - == run._runner.return_value) - - assert ( - list(run._runner.call_args) - == [tuple(args), kwargs]) - assert ( - list(run.cleanup.call_args) - == [(), {}]) - - -def test_base_runner_constructor(): - run = runner.BaseRunner("path1", "path2", "path3") - assert run._args == ("path1", "path2", "path3") - assert run.log_field_styles == runner.LOG_FIELD_STYLES - assert run.log_level_styles == runner.LOG_LEVEL_STYLES - assert run.log_fmt == runner.LOG_FMT - - -def test_base_runner_args(): - run = runner.BaseRunner("path1", "path2", "path3") - parser_mock = patch( - "tools.base.runner.BaseRunner.parser", - new_callable=PropertyMock) - - with parser_mock as m_parser: - assert run.args == m_parser.return_value.parse_known_args.return_value.__getitem__.return_value - - assert ( - list(m_parser.return_value.parse_known_args.call_args) - == [(('path1', 'path2', 'path3'),), {}]) - assert ( - list(m_parser.return_value.parse_known_args.return_value.__getitem__.call_args) - == [(0,), {}]) - assert "args" in run.__dict__ - - -def test_base_runner_extra_args(): - run = runner.BaseRunner("path1", "path2", "path3") - parser_mock = patch( - "tools.base.runner.BaseRunner.parser", - new_callable=PropertyMock) - - with parser_mock as m_parser: - assert run.extra_args == m_parser.return_value.parse_known_args.return_value.__getitem__.return_value - - assert ( - list(m_parser.return_value.parse_known_args.call_args) - == [(('path1', 'path2', 'path3'),), {}]) - assert ( - list(m_parser.return_value.parse_known_args.return_value.__getitem__.call_args) - == [(1,), {}]) - assert "extra_args" in run.__dict__ - - -def test_base_runner_log(patches): - run = runner.BaseRunner("path1", "path2", "path3") - patched = patches( - "logging.getLogger", - "LogFilter", - "coloredlogs", - "verboselogs", - ("BaseRunner.log_level", dict(new_callable=PropertyMock)), - ("BaseRunner.log_level_styles", dict(new_callable=PropertyMock)), - ("BaseRunner.log_field_styles", dict(new_callable=PropertyMock)), - ("BaseRunner.log_fmt", dict(new_callable=PropertyMock)), - ("BaseRunner.name", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - with patched as patchy: - (m_logger, m_filter, m_color, m_verb, - m_level, m_lstyle, m_fstyle, m_fmt, m_name) = patchy - assert run.log == m_logger.return_value - - assert ( - list(m_verb.install.call_args) - == [(), {}]) - assert ( - list(m_logger.return_value.setLevel.call_args) - == [(m_level.return_value,), {}]) - assert ( - list(m_logger.return_value.setLevel.call_args) - == [(m_level.return_value,), {}]) - assert ( - list(m_color.install.call_args) - == [(), - {'fmt': m_fmt.return_value, - 'isatty': True, - 'field_styles': m_fstyle.return_value, - 'level': 'DEBUG', - 'level_styles': m_lstyle.return_value, - 'logger': m_logger.return_value}]) - assert "log" in run.__dict__ - - -def test_base_runner_log_level(patches): - run = runner.BaseRunner("path1", "path2", "path3") - patched = patches( - "dict", - ("BaseRunner.args", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - with patched as (m_dict, m_args): - assert run.log_level == m_dict.return_value.__getitem__.return_value - - assert ( - list(m_dict.call_args) - == [(runner.LOG_LEVELS, ), {}]) - assert ( - list(m_dict.return_value.__getitem__.call_args) - == [(m_args.return_value.log_level,), {}]) - assert "log_level" in run.__dict__ - - -def test_base_runner_name(): - run = DummyRunner() - assert run.name == run.__class__.__name__ - assert "name" not in run.__dict__ - - -def test_base_runner_parser(patches): - run = runner.BaseRunner("path1", "path2", "path3") - patched = patches( - "argparse.ArgumentParser", - "BaseRunner.add_arguments", - prefix="tools.base.runner") - with patched as (m_parser, m_add_args): - assert run.parser == m_parser.return_value - - assert ( - list(m_parser.call_args) - == [(), {"allow_abbrev": False}]) - assert ( - list(m_add_args.call_args) - == [(m_parser.return_value,), {}]) - assert "parser" in run.__dict__ - - -def test_base_runner_path(patches): - run = runner.BaseRunner("path1", "path2", "path3") - patched = patches( - "pathlib", - prefix="tools.base.runner") - - with patched as (m_plib, ): - assert run.path == m_plib.Path.return_value - - assert ( - list(m_plib.Path.call_args) - == [(".", ), {}]) - - -def test_base_runner_stdout(patches): - run = runner.BaseRunner("path1", "path2", "path3") - - patched = patches( - "logging", - ("BaseRunner.log_level", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - with patched as (m_log, m_level): - assert run.stdout == m_log.getLogger.return_value - - assert ( - list(m_log.getLogger.call_args) - == [('stdout',), {}]) - assert ( - list(m_log.getLogger.return_value.setLevel.call_args) - == [(m_level.return_value,), {}]) - assert ( - list(m_log.StreamHandler.call_args) - == [(sys.stdout,), {}]) - assert ( - list(m_log.Formatter.call_args) - == [('%(message)s',), {}]) - assert ( - list(m_log.StreamHandler.return_value.setFormatter.call_args) - == [(m_log.Formatter.return_value,), {}]) - assert ( - list(m_log.getLogger.return_value.addHandler.call_args) - == [(m_log.StreamHandler.return_value,), {}]) - - -@pytest.mark.parametrize("missing", [True, False]) -def test_base_runner_tempdir(patches, missing): - run = runner.BaseRunner() - patched = patches( - "tempfile", - ("BaseRunner.log", dict(new_callable=PropertyMock)), - ("BaseRunner._missing_cleanup", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - with patched as (m_tmp, m_log, m_missing): - m_missing.return_value = missing - assert run.tempdir == m_tmp.TemporaryDirectory.return_value - - if missing: - assert ( - list(m_log.return_value.warning.call_args) - == [("Tempdir created but instance has a `run` method which is not decorated with `@runner.cleansup`", ), {}]) - else: - assert not m_log.called - - assert ( - list(m_tmp.TemporaryDirectory.call_args) - == [(), {}]) - assert "tempdir" in run.__dict__ - - -def test_base_runner_add_arguments(): - run = runner.BaseRunner("path1", "path2", "path3") - parser = MagicMock() - - assert run.add_arguments(parser) is None - - assert ( - list(list(c) for c in parser.add_argument.call_args_list) - == [[('--log-level', '-l'), - {'choices': ['debug', 'info', 'warn', 'error'], - 'default': 'info', 'help': 'Log level to display'}], - ]) - - -@pytest.mark.parametrize("has_fun", [True, False]) -@pytest.mark.parametrize("is_wrapped", [True, False]) -@pytest.mark.parametrize("cleansup", [True, False]) -def test_base_runner__missing_cleanup(has_fun, is_wrapped, cleansup): - - def _runner_factory(): - if not has_fun: - return runner.BaseRunner() - - class _Wrap: - if cleansup: - __cleansup__ = True - - class _Wrapper: - if is_wrapped: - __wrapped__ = _Wrap() - - class DummyRunner(runner.BaseRunner): - run = _Wrapper() - - return DummyRunner() - - run = _runner_factory() - - assert ( - run._missing_cleanup - == (has_fun - and not (is_wrapped and cleansup))) - assert "_missing_cleanup" not in run.__dict__ - - -@pytest.mark.parametrize("cached", [True, False]) -def test_base_runner__cleanup_tempdir(patches, cached): - run = runner.BaseRunner() - patched = patches( - ("BaseRunner.tempdir", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - if cached: - run.__dict__["tempdir"] = "TEMPDIR" - - with patched as (m_temp, ): - assert not run._cleanup_tempdir() - - if cached: - assert ( - list(m_temp.return_value.cleanup.call_args) - == [(), {}]) - else: - assert not m_temp.called - assert "tempdir" not in run.__dict__ - - -# LogFilter tests -@pytest.mark.parametrize("level", [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, None, "giraffe"]) -def test_base_runner_log_filter(level): - logfilter = runner.LogFilter() - - class DummyRecord: - levelno = level - - if level in [logging.DEBUG, logging.INFO]: - assert logfilter.filter(DummyRecord()) - else: - assert not logfilter.filter(DummyRecord()) - - -def test_runner_constructor(patches): - patched = patches( - "BaseRunner.__init__", - prefix="tools.base.runner") - args = [f"ARG{i}" for i in range(0, 3)] - kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)} - - with patched as (m_super, ): - m_super.return_value = None - run = runner.Runner(*args, **kwargs) - - assert isinstance(run, runner.BaseRunner) - assert ( - list(m_super.call_args) - == [tuple(args), kwargs]) - - -def test_runner_cleanup(patches): - run = runner.Runner() - patched = patches( - "Runner._cleanup_tempdir", - prefix="tools.base.runner") - - with patched as (m_temp, ): - assert not run.cleanup() - - assert ( - list(m_temp.call_args) - == [(), {}]) - - -def test_async_runner_constructor(patches): - patched = patches( - "BaseRunner.__init__", - prefix="tools.base.runner") - args = [f"ARG{i}" for i in range(0, 3)] - kwargs = {f"K{i}": f"V{i}" for i in range(0, 3)} - - with patched as (m_super, ): - m_super.return_value = None - run = runner.AsyncRunner(*args, **kwargs) - - assert isinstance(run, runner.BaseRunner) - assert ( - list(m_super.call_args) - == [tuple(args), kwargs]) - - -@pytest.mark.asyncio -async def test_async_runner_cleanup(patches): - run = runner.AsyncRunner() - patched = patches( - "AsyncRunner._cleanup_tempdir", - prefix="tools.base.runner") - - with patched as (m_temp, ): - assert not await run.cleanup() - - assert ( - list(m_temp.call_args) - == [(), {}]) - - -# BazelAdapter tests - -def test_bazeladapter_constructor(): - run = DummyRunner() - adapter = runner.BazelAdapter(run) - assert adapter.context == run - - -@pytest.mark.parametrize("query_returns", [0, 1]) -def test_bazeladapter_query(query_returns): - run = DummyForkingRunner() - adapter = runner.BazelAdapter(run) - fork_mock = patch("tools.base.runner.ForkingAdapter.subproc_run") - - with fork_mock as m_fork: - m_fork.return_value.returncode = query_returns - if query_returns: - with pytest.raises(runner.BazelRunError) as result: - adapter.query("BAZEL QUERY") - else: - result = adapter.query("BAZEL QUERY") - - assert ( - list(m_fork.call_args) - == [(['bazel', 'query', "'BAZEL QUERY'"],), {}]) - - if query_returns: - assert result.errisinstance(runner.BazelRunError) - assert ( - result.value.args - == (f"Bazel query failed: {m_fork.return_value}",)) - assert not m_fork.return_value.stdout.decode.called - else: - assert ( - result - == m_fork.return_value.stdout.decode.return_value.split.return_value) - assert ( - list(m_fork.return_value.stdout.decode.call_args) - == [('utf-8',), {}]) - assert ( - list(m_fork.return_value.stdout.decode.return_value.split.call_args) - == [('\n',), {}]) - - -@pytest.mark.parametrize("cwd", [None, "", "SOMEPATH"]) -@pytest.mark.parametrize("raises", [None, True, False]) -@pytest.mark.parametrize("capture_output", [None, True, False]) -@pytest.mark.parametrize("run_returns", [0, 1]) -@pytest.mark.parametrize("args", [(), ("foo",), ("foo", "bar")]) -def test_bazeladapter_run(patches, run_returns, cwd, raises, args, capture_output): - run = DummyForkingRunner() - adapter = runner.BazelAdapter(run) - patched = patches( - "ForkingAdapter.subproc_run", - ("ForkingRunner.path", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - adapter_args = ("BAZEL RUN",) + args - kwargs = {} - if raises is not None: - kwargs["raises"] = raises - if cwd is not None: - kwargs["cwd"] = cwd - if capture_output is not None: - kwargs["capture_output"] = capture_output - - with patched as (m_fork, m_path): - m_fork.return_value.returncode = run_returns - if run_returns and (raises is not False): - with pytest.raises(runner.BazelRunError) as result: - adapter.run(*adapter_args, **kwargs) - else: - result = adapter.run(*adapter_args, **kwargs) - - call_args = (("--",) + args) if args else args - bazel_args = ("bazel", "run", "BAZEL RUN") + call_args - bazel_kwargs = {} - bazel_kwargs["capture_output"] = ( - True - if capture_output is True - else False) - bazel_kwargs["cwd"] = ( - cwd - if cwd - else m_path.return_value) - assert ( - list(m_fork.call_args) - == [(bazel_args,), bazel_kwargs]) - if run_returns and (raises is not False): - assert result.errisinstance(runner.BazelRunError) - assert ( - result.value.args - == (f"Bazel run failed: {m_fork.return_value}",)) - else: - assert result == m_fork.return_value - - -# ForkingAdapter tests - -def test_forkingadapter_constructor(): - run = DummyRunner() - adapter = runner.ForkingAdapter(run) - assert adapter.context == run - - -def test_forkingadapter_call(): - run = DummyRunner() - adapter = runner.ForkingAdapter(run) - fork_mock = patch("tools.base.runner.ForkingAdapter.subproc_run") - - with fork_mock as m_fork: - assert ( - adapter( - "arg1", "arg2", "arg3", - kwa1="foo", - kwa2="bar", - kwa3="baz") - == m_fork.return_value) - assert ( - list(m_fork.call_args) - == [('arg1', 'arg2', 'arg3'), - {'kwa1': 'foo', 'kwa2': 'bar', 'kwa3': 'baz'}]) - - -@pytest.mark.parametrize("args", [(), ("a", "b")]) -@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"]) -@pytest.mark.parametrize("capture_output", ["NONE", True, False]) -def test_forkingadapter_subproc_run(patches, args, cwd, capture_output): - adapter = runner.ForkingAdapter(DummyRunner()) - patched = patches( - "subprocess.run", - ("BaseRunner.path", dict(new_callable=PropertyMock)), - prefix="tools.base.runner") - - with patched as (m_run, m_path): - kwargs = {} - if cwd != "NONE": - kwargs["cwd"] = cwd - if capture_output != "NONE": - kwargs["capture_output"] = capture_output - assert adapter.subproc_run(*args, **kwargs) == m_run.return_value - - expected = {'capture_output': True, 'cwd': cwd} - if capture_output is False: - expected["capture_output"] = False - if cwd == "NONE": - expected["cwd"] = m_path.return_value - assert ( - list(m_run.call_args) - == [args, expected]) - - -# ForkingRunner tests - -def test_forkingrunner_fork(): - run = runner.ForkingRunner("path1", "path2", "path3") - forking_mock = patch("tools.base.runner.ForkingAdapter") - - with forking_mock as m_fork: - assert run.subproc_run == m_fork.return_value - assert ( - list(m_fork.call_args) - == [(run,), {}]) - assert "subproc_run" in run.__dict__ - - -# BazelRunner tests - -def test_bazelrunner_bazel(): - run = runner.BazelRunner("path1", "path2", "path3") - bazel_mock = patch("tools.base.runner.BazelAdapter") - - with bazel_mock as m_bazel: - assert run.bazel == m_bazel.return_value - assert ( - list(m_bazel.call_args) - == [(run,), {}]) - assert "bazel" in run.__dict__ diff --git a/tools/base/tests/test_utils.py b/tools/base/tests/test_utils.py deleted file mode 100644 index 5ea95efb4ac3a..0000000000000 --- a/tools/base/tests/test_utils.py +++ /dev/null @@ -1,228 +0,0 @@ -import importlib -import sys -from contextlib import contextmanager -from unittest.mock import MagicMock - -import pytest - -from tools.base import utils - - -# this is necessary to fix coverage as these libs are imported before pytest -# is invoked -importlib.reload(utils) - - -def test_util_buffered_stdout(): - stdout = [] - - with utils.buffered(stdout=stdout): - print("test1") - print("test2") - sys.stdout.write("test3\n") - sys.stderr.write("error0\n") - - assert stdout == ["test1", "test2", "test3"] - - -def test_util_buffered_stderr(): - stderr = [] - - with utils.buffered(stderr=stderr): - print("test1") - print("test2") - sys.stdout.write("test3\n") - sys.stderr.write("error0\n") - sys.stderr.write("error1\n") - - assert stderr == ["error0", "error1"] - - -def test_util_buffered_stdout_stderr(): - stdout = [] - stderr = [] - - with utils.buffered(stdout=stdout, stderr=stderr): - print("test1") - print("test2") - sys.stdout.write("test3\n") - sys.stderr.write("error0\n") - sys.stderr.write("error1\n") - - assert stdout == ["test1", "test2", "test3"] - assert stderr == ["error0", "error1"] - - -def test_util_buffered_no_stdout_stderr(): - with pytest.raises(utils.BufferUtilError): - with utils.buffered(): - pass - - -def test_util_nested(): - - fun1_args = [] - fun2_args = [] - - @contextmanager - def fun1(arg): - fun1_args.append(arg) - yield "FUN1" - - @contextmanager - def fun2(arg): - fun2_args.append(arg) - yield "FUN2" - - with utils.nested(fun1("A"), fun2("B")) as (fun1_yield, fun2_yield): - assert fun1_yield == "FUN1" - assert fun2_yield == "FUN2" - - assert fun1_args == ["A"] - assert fun2_args == ["B"] - - -def test_util_coverage_with_data_file(patches): - patched = patches( - "ConfigParser", - "tempfile.TemporaryDirectory", - "os.path.join", - "open", - prefix="tools.base.utils") - - with patched as (m_config, m_tmp, m_join, m_open): - with utils.coverage_with_data_file("PATH") as tmprc: - assert tmprc == m_join.return_value - assert ( - list(m_config.call_args) - == [(), {}]) - assert ( - list(m_config.return_value.read.call_args) - == [('.coveragerc',), {}]) - assert ( - list(m_config.return_value.__getitem__.call_args) - == [('run',), {}]) - assert ( - list(m_config.return_value.__getitem__.return_value.__setitem__.call_args) - == [('data_file', 'PATH'), {}]) - assert ( - list(m_tmp.call_args) - == [(), {}]) - assert ( - list(m_join.call_args) - == [(m_tmp.return_value.__enter__.return_value, '.coveragerc'), {}]) - assert ( - list(m_open.call_args) - == [(m_join.return_value, 'w'), {}]) - assert ( - list(m_config.return_value.write.call_args) - == [(m_open.return_value.__enter__.return_value,), {}]) - - - -@pytest.mark.parametrize( - "tarballs", - [(), tuple("TARB{i}" for i in range(0, 3))]) -def test_util_extract(patches, tarballs): - patched = patches( - "nested", - "pathlib", - "tarfile.open", - prefix="tools.base.utils") - - with patched as (m_nested, m_plib, m_open): - _extractions = [MagicMock(), MagicMock()] - m_nested.return_value.__enter__.return_value = _extractions - - if tarballs: - assert utils.extract("PATH", *tarballs) == m_plib.Path.return_value - else: - with pytest.raises(utils.ExtractError) as e: - utils.extract("PATH", *tarballs) - - if not tarballs: - assert ( - e.value.args[0] - == 'No tarballs specified for extraction to PATH') - assert not m_nested.called - assert not m_open.called - for _extract in _extractions: - assert not _extract.extractall.called - return - - assert ( - list(m_plib.Path.call_args) - == [("PATH", ), {}]) - - for _extract in _extractions: - assert ( - list(_extract.extractall.call_args) - == [(), dict(path="PATH")]) - - assert ( - list(m_open.call_args_list) - == [[(tarb, ), {}] for tarb in tarballs]) - assert ( - list(m_nested.call_args) - == [tuple(m_open.return_value for x in tarballs), {}]) - - -@pytest.mark.parametrize( - "tarballs", - [(), tuple("TARB{i}" for i in range(0, 3))]) -def test_util_untar(patches, tarballs): - patched = patches( - "tempfile.TemporaryDirectory", - "extract", - prefix="tools.base.utils") - - with patched as (m_tmp, m_extract): - with utils.untar(*tarballs) as tmpdir: - assert tmpdir == m_extract.return_value - - assert ( - list(m_tmp.call_args) - == [(), {}]) - assert ( - list(m_extract.call_args) - == [(m_tmp.return_value.__enter__.return_value, ) + tarballs, {}]) - - -def test_util_from_yaml(patches): - patched = patches( - "pathlib", - "yaml", - prefix="tools.base.utils") - - with patched as (m_plib, m_yaml): - assert utils.from_yaml("PATH") == m_yaml.safe_load.return_value - - assert ( - list(m_plib.Path.call_args) - == [("PATH", ), {}]) - assert ( - list(m_yaml.safe_load.call_args) - == [(m_plib.Path.return_value.read_text.return_value, ), {}]) - assert ( - list(m_plib.Path.return_value.read_text.call_args) - == [(), {}]) - - -def test_util_to_yaml(patches): - patched = patches( - "pathlib", - "yaml", - prefix="tools.base.utils") - - with patched as (m_plib, m_yaml): - assert utils.to_yaml("DATA", "PATH") == m_plib.Path.return_value - - assert ( - list(m_yaml.dump.call_args) - == [("DATA", ), {}]) - assert ( - list(m_plib.Path.return_value.write_text.call_args) - == [(m_yaml.dump.return_value, ), {}]) - assert ( - list(m_plib.Path.call_args) - == [("PATH", ), {}]) diff --git a/tools/base/utils.py b/tools/base/utils.py deleted file mode 100644 index ca92cdc4e78be..0000000000000 --- a/tools/base/utils.py +++ /dev/null @@ -1,142 +0,0 @@ -# -# Provides shared utils used by other python modules -# - -import io -import os -import pathlib -import tarfile -import tempfile -from configparser import ConfigParser -from contextlib import ExitStack, contextmanager, redirect_stderr, redirect_stdout -from pathlib import Path -from typing import Callable, ContextManager, Iterator, List, Optional, Union - -import yaml - - -class ExtractError(Exception): - pass - - -# this is testing specific - consider moving to tools.testing.utils -@contextmanager -def coverage_with_data_file(data_file: str) -> Iterator[str]: - """This context manager takes the path of a data file - and creates a custom coveragerc with the data file path included. - - The context is yielded the path to the custom rc file. - """ - parser = ConfigParser() - parser.read(".coveragerc") - parser["run"]["data_file"] = data_file - # use a temporary .coveragerc - with tempfile.TemporaryDirectory() as tmpdir: - tmprc = os.path.join(tmpdir, ".coveragerc") - with open(tmprc, "w") as f: - parser.write(f) - yield tmprc - - -class BufferUtilError(Exception): - pass - - -@contextmanager -def nested(*contexts): - with ExitStack() as stack: - yield [stack.enter_context(context) for context in contexts] - - -@contextmanager -def buffered( - stdout: list = None, - stderr: list = None, - mangle: Optional[Callable[[list], list]] = None) -> Iterator[None]: - """Captures stdout and stderr and feeds lines to supplied lists""" - - mangle = mangle or (lambda lines: lines) - - if stdout is None and stderr is None: - raise BufferUtilError("You must specify stdout and/or stderr") - - contexts: List[Union[redirect_stderr[io.StringIO], redirect_stdout[io.StringIO]]] = [] - - if stdout is not None: - _stdout = io.StringIO() - contexts.append(redirect_stdout(_stdout)) - if stderr is not None: - _stderr = io.StringIO() - contexts.append(redirect_stderr(_stderr)) - - with nested(*contexts): - yield - - if stdout is not None: - _stdout.seek(0) - stdout.extend(mangle(_stdout.read().strip().split("\n"))) - if stderr is not None: - _stderr.seek(0) - stderr.extend(mangle(_stderr.read().strip().split("\n"))) - - -def extract(path: Union[pathlib.Path, str], *tarballs: Union[pathlib.Path, str]) -> pathlib.Path: - if not tarballs: - raise ExtractError(f"No tarballs specified for extraction to {path}") - openers = nested(*tuple(tarfile.open(tarball) for tarball in tarballs)) - - with openers as tarfiles: - for tar in tarfiles: - tar.extractall(path=path) - return pathlib.Path(path) - - -@contextmanager -def untar(*tarballs: Union[pathlib.Path, str]) -> Iterator[pathlib.Path]: - """Untar a tarball into a temporary directory - - for example to list the contents of a tarball: - - ``` - import os - - from tooling.base.utils import untar - - - with untar("path/to.tar") as tmpdir: - print(os.listdir(tmpdir)) - - ``` - - the created temp directory will be cleaned up on - exiting the contextmanager - - """ - with tempfile.TemporaryDirectory() as tmpdir: - yield extract(tmpdir, *tarballs) - - -def from_yaml(path: Union[pathlib.Path, str]) -> Union[dict, list, str, int]: - """Returns the loaded python object from a yaml file given by `path`""" - return yaml.safe_load(pathlib.Path(path).read_text()) - - -def to_yaml(data: Union[dict, list, str, int], path: Union[pathlib.Path, str]) -> pathlib.Path: - """For given `data` dumps as yaml to provided `path`. - - Returns `path` - """ - path = pathlib.Path(path) - path.write_text(yaml.dump(data)) - return path - - -@contextmanager -def cd_and_return(path: Union[pathlib.Path, str]) -> ContextManager[None]: - """Changes working directory to given path and returns to previous working directory on exit""" - prev_cwd = Path.cwd() - try: - os.chdir(path) - yield - finally: - os.chdir(prev_cwd) diff --git a/tools/docs/BUILD b/tools/docs/BUILD index 6438eeac3af41..5a53db4b7e654 100644 --- a/tools/docs/BUILD +++ b/tools/docs/BUILD @@ -1,5 +1,5 @@ load("@rules_python//python:defs.bzl", "py_binary") -load("@base_pip3//:requirements.bzl", "entry_point") +load("@base_pip3//:requirements.bzl", "entry_point", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//tools/base:envoy_python.bzl", "envoy_py_binary") @@ -11,7 +11,7 @@ py_binary( name = "generate_extensions_security_rst", srcs = ["generate_extensions_security_rst.py"], deps = [ - "//tools/base:utils", + requirement("envoy.base.utils"), ], ) @@ -52,5 +52,5 @@ alias( envoy_py_binary( name = "tools.docs.rst_check", data = ["//docs:root/version_history/current.rst"], - deps = ["//tools/base:checker"], + deps = [requirement("envoy.base.checker")], ) diff --git a/tools/docs/generate_extensions_security_rst.py b/tools/docs/generate_extensions_security_rst.py index 180e2eb247f69..a1f6b7a49bfd1 100644 --- a/tools/docs/generate_extensions_security_rst.py +++ b/tools/docs/generate_extensions_security_rst.py @@ -8,7 +8,7 @@ import sys import tarfile -from tools.base import utils +from envoy.base import utils def format_item(extension, metadata): diff --git a/tools/docs/rst_check.py b/tools/docs/rst_check.py index 763f2dbff932e..b064393806a20 100644 --- a/tools/docs/rst_check.py +++ b/tools/docs/rst_check.py @@ -4,7 +4,7 @@ from functools import cached_property from typing import Iterator, List, Pattern -from tools.base import checker +from envoy.base import checker INVALID_REFLINK = r".* ref:.*" REF_WITH_PUNCTUATION_REGEX = r".*\. <[^<]*>`\s*" diff --git a/tools/extensions/BUILD b/tools/extensions/BUILD index 52147a0e6b446..96d5c41d46a2c 100644 --- a/tools/extensions/BUILD +++ b/tools/extensions/BUILD @@ -1,6 +1,7 @@ load("//bazel:envoy_build_system.bzl", "envoy_package") load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//tools/base:envoy_python.bzl", "envoy_py_binary") +load("@base_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 @@ -16,7 +17,7 @@ envoy_py_binary( "//test/extensions/filters/network/common/fuzz:uber_per_readfilter.cc", ] + envoy_all_extensions(), deps = [ - "//tools/base:checker", - "//tools/base:utils", + requirement("envoy.base.checker"), + requirement("envoy.base.utils"), ], ) diff --git a/tools/extensions/extensions_check.py b/tools/extensions/extensions_check.py index 981b05b514280..b97d47ad6542f 100644 --- a/tools/extensions/extensions_check.py +++ b/tools/extensions/extensions_check.py @@ -11,7 +11,7 @@ from importlib.machinery import ModuleSpec, SourceFileLoader from typing import Iterator -from tools.base import checker, utils +from envoy.base import checker, utils BUILD_CONFIG_PATH = "source/extensions/extensions_build_config.bzl" CONTRIB_BUILD_CONFIG_PATH = "contrib/contrib_build_config.bzl" diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index 747a1a8c330ec..0de758aa01684 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -32,11 +32,11 @@ py_binary( deps = [ ":manifest_proto_py_proto", "//tools/api_proto_plugin", - "//tools/base:utils", "//tools/config_validation:validate_fragment", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", "@com_google_protobuf//:protobuf_python", + requirement("envoy.base.utils"), requirement("Jinja2"), ], ) diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index a8d45c7ccd1e5..b61ca3fcd7d4a 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -21,10 +21,11 @@ # just remove it from the sys.path. sys.path = [p for p in sys.path if not p.endswith('bazel_tools')] +from envoy.base import utils + from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor -from tools.base import utils from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 diff --git a/tools/testing/BUILD b/tools/testing/BUILD index 35b9cf843a286..2ddf8fe5b1435 100644 --- a/tools/testing/BUILD +++ b/tools/testing/BUILD @@ -25,8 +25,8 @@ envoy_py_binary( requirement("pytest-asyncio"), requirement("pytest-cov"), requirement("pytest-patches"), - "//tools/base:runner", - "//tools/base:utils", + requirement("envoy.base.runner"), + requirement("envoy.base.utils"), ], ) @@ -35,18 +35,18 @@ envoy_py_binary( data = [ ":plugin", "//:.coveragerc", - "//tools/base:runner", - "//tools/base:utils", ], deps = [ requirement("coverage"), + requirement("envoy.base.runner"), + requirement("envoy.base.utils"), ], ) envoy_py_binary( name = "tools.testing.all_pytests", deps = [ - "//tools/base:checker", - "//tools/base:utils", + requirement("envoy.base.checker"), + requirement("envoy.base.utils"), ], ) diff --git a/tools/testing/all_pytests.py b/tools/testing/all_pytests.py index 4225add80659a..ea4749763b48d 100644 --- a/tools/testing/all_pytests.py +++ b/tools/testing/all_pytests.py @@ -12,7 +12,7 @@ from functools import cached_property from typing import Optional -from tools.base import checker, runner +from envoy.base import checker, runner class PytestChecker(checker.BazelChecker): diff --git a/tools/testing/python_coverage.py b/tools/testing/python_coverage.py index 6d1a6c9aa00e5..109be0183cc17 100755 --- a/tools/testing/python_coverage.py +++ b/tools/testing/python_coverage.py @@ -17,7 +17,7 @@ from coverage import cmdline # type:ignore -from tools.base import runner, utils +from envoy.base import runner, utils class CoverageRunner(runner.Runner): diff --git a/tools/testing/python_pytest.py b/tools/testing/python_pytest.py index 37cbe96eeb521..de9fd1ad41561 100755 --- a/tools/testing/python_pytest.py +++ b/tools/testing/python_pytest.py @@ -16,7 +16,7 @@ import pytest -from tools.base import runner, utils +from envoy.base import runner, utils class PytestRunner(runner.Runner): diff --git a/tools/testing/tests/test_all_pytests.py b/tools/testing/tests/test_all_pytests.py index 453d80247af2a..bdb36872782cc 100644 --- a/tools/testing/tests/test_all_pytests.py +++ b/tools/testing/tests/test_all_pytests.py @@ -3,7 +3,7 @@ import pytest -from tools.base.runner import BazelRunError +from envoy.base.runner import BazelRunError from tools.testing import all_pytests From 37ae32290b7703d34feb481aaeaa014c6cebc9f3 Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Wed, 22 Sep 2021 09:08:42 +0700 Subject: [PATCH 086/121] ext_authz: Honor append field of OkHttpResponse.response_headers_to_add (#18002) This patch makes sure the filter honors the OkHttpResponse.response_headers_to_add.append field set by a gRPC-based ext_authz server. Risk Level: Low Testing: Added Docs Changes: Fix: HeaderValueOption.append field default value is true. Release Notes: N/A Platform-Specific Features: N/A Fixes #17494 Signed-off-by: Dhi Aurrahman Signed-off-by: gayang --- docs/root/version_history/current.rst | 1 + .../filters/common/ext_authz/ext_authz.h | 3 ++ .../common/ext_authz/ext_authz_grpc_impl.cc | 11 ++++- .../common/ext_authz/ext_authz_http_impl.cc | 8 +++- .../filters/http/ext_authz/ext_authz.cc | 17 +++++++- .../filters/http/ext_authz/ext_authz.h | 1 + .../ext_authz/ext_authz_http_impl_test.cc | 3 +- .../filters/common/ext_authz/test_common.cc | 9 +++- .../ext_authz/ext_authz_integration_test.cc | 42 +++++++++++++++---- .../filters/http/ext_authz/ext_authz_test.cc | 19 +++++++-- 10 files changed, 95 insertions(+), 19 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 681b5b39f2625..b8fa93fa3613b 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -79,6 +79,7 @@ Bug Fixes * compressor: fix a bug where if trailers were added and a subsequent filter paused the filter chain, the request could be stalled. This behavior can be reverted by setting ``envoy.reloadable_features.fix_added_trailers`` to false. * dynamic forward proxy: fixing a validation bug where san and sni checks were not applied setting :ref:`http_protocol_options ` via :ref:`typed_extension_protocol_options `. * ext_authz: fix the ext_authz filter to correctly merge multiple same headers using the ',' as separator in the check request to the external authorization service. +* ext_authz: fix the use of ``append`` field of :ref:`response_headers_to_add ` to set or append encoded response headers from a gRPC auth server. * ext_authz: fix the HTTP ext_authz filter to respond with ``403 Forbidden`` when a gRPC auth server sends a denied check response with an empty HTTP status code. * ext_authz: the network ext_authz filter now correctly sets dynamic metadata returned by the authorization service for non-OK responses. This behavior now matches the http ext_authz filter. * hcm: remove deprecation for :ref:`xff_num_trusted_hops ` and forbid mixing ip detection extensions with old related knobs. diff --git a/source/extensions/filters/common/ext_authz/ext_authz.h b/source/extensions/filters/common/ext_authz/ext_authz.h index 3d06846ed1123..96545dd83a959 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz.h +++ b/source/extensions/filters/common/ext_authz/ext_authz.h @@ -78,6 +78,9 @@ struct Response { // (using "addCopy") to the response sent back to the downstream client on OK auth // responses. Http::HeaderVector response_headers_to_add; + // A set of HTTP headers returned by the authorization server, will be optionally set (using + // "setCopy") to the response sent back to the downstream client on OK auth responses. + Http::HeaderVector response_headers_to_set; // A set of HTTP headers consumed by the authorization server, will be removed // from the request to the upstream server. std::vector headers_to_remove; diff --git a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc index a1155ac0528dd..dc730cdcb6dc8 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_grpc_impl.cc @@ -57,10 +57,17 @@ void GrpcClientImpl::onSuccess(std::unique_ptrheaders_to_remove.push_back(Http::LowerCaseString(header)); } } + + // These two vectors hold header overrides of encoded response headers. if (response->ok_response().response_headers_to_add_size() > 0) { for (const auto& header : response->ok_response().response_headers_to_add()) { - authz_response->response_headers_to_add.emplace_back( - Http::LowerCaseString(header.header().key()), header.header().value()); + if (header.append().value()) { + authz_response->response_headers_to_add.emplace_back( + Http::LowerCaseString(header.header().key()), header.header().value()); + } else { + authz_response->response_headers_to_set.emplace_back( + Http::LowerCaseString(header.header().key()), header.header().value()); + } } } } diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 4dba952fede7d..388936a823e43 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -36,6 +36,7 @@ const Response& errorResponse() { Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, + Http::HeaderVector{}, {{}}, EMPTY_STRING, Http::Code::Forbidden, @@ -67,6 +68,8 @@ struct SuccessResponse { std::string(header.value().getStringView())); } if (response_matchers_->matches(header.key().getStringView())) { + // For HTTP implementation, the response headers from the auth server will, by default, be + // appended (using addCopy) to the encoded response headers. response_->response_headers_to_add.emplace_back( Http::LowerCaseString{std::string(header.key().getStringView())}, std::string(header.value().getStringView())); @@ -328,8 +331,8 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { message->headers(), config_->upstreamHeaderMatchers(), config_->upstreamHeaderToAppendMatchers(), config_->clientHeaderOnSuccessMatchers(), Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, - Http::HeaderVector{}, std::move(headers_to_remove), EMPTY_STRING, Http::Code::OK, - ProtobufWkt::Struct{}}}; + Http::HeaderVector{}, Http::HeaderVector{}, std::move(headers_to_remove), + EMPTY_STRING, Http::Code::OK, ProtobufWkt::Struct{}}}; return std::move(ok.response_); } @@ -342,6 +345,7 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, + Http::HeaderVector{}, {{}}, message->bodyAsString(), static_cast(status_code), diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 29a6ca8be8437..90df75ee279dd 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -160,7 +160,8 @@ Http::FilterHeadersStatus Filter::encode100ContinueHeaders(Http::ResponseHeaderM Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) { ENVOY_STREAM_LOG(trace, - "ext_authz filter has {} response header(s) to add to the encoded response:", + "ext_authz filter has {} response header(s) to add and {} response header(s) to " + "set to the encoded response:", *encoder_callbacks_, response_headers_to_add_.size()); if (!response_headers_to_add_.empty()) { ENVOY_STREAM_LOG( @@ -171,6 +172,14 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers } } + if (!response_headers_to_set_.empty()) { + ENVOY_STREAM_LOG( + trace, "ext_authz filter set header(s) to the encoded response:", *encoder_callbacks_); + for (const auto& header : response_headers_to_set_) { + ENVOY_STREAM_LOG(trace, "'{}':'{}'", *encoder_callbacks_, header.first.get(), header.second); + headers.setCopy(header.first, header.second); + } + } return Http::FilterHeadersStatus::Continue; } @@ -271,6 +280,12 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { response_headers_to_add_ = std::move(response->response_headers_to_add); } + if (!response->response_headers_to_set.empty()) { + ENVOY_STREAM_LOG(trace, "ext_authz filter saving {} header(s) to set to the response:", + *decoder_callbacks_, response->response_headers_to_set.size()); + response_headers_to_set_ = std::move(response->response_headers_to_set); + } + if (cluster_) { config_->incCounter(cluster_->statsScope(), config_->ext_authz_ok_); } diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 32563b627203b..cd75def13e928 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -299,6 +299,7 @@ class Filter : public Logger::Loggable, Http::StreamEncoderFilterCallbacks* encoder_callbacks_{}; Http::RequestHeaderMap* request_headers_; Http::HeaderVector response_headers_to_add_{}; + Http::HeaderVector response_headers_to_set_{}; State state_{State::NotStarted}; FilterReturn filter_return_{FilterReturn::ContinueDecoding}; Upstream::ClusterInfoConstSharedPtr cluster_; diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 96d0be2f7b171..46bc2bfd7da28 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -303,7 +303,8 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithAddedAuthzHeaders) { {{":status", "200", false}, {"x-downstream-ok", "1", false}, {"x-upstream-ok", "1", false}}); const auto authz_response = TestCommon::makeAuthzResponse( CheckStatus::OK, Http::Code::OK, EMPTY_STRING, TestCommon::makeHeaderValueOption({}), - TestCommon::makeHeaderValueOption({{"x-downstream-ok", "1", false}})); + // By default, the value of envoy.config.core.v3.HeaderValueOption.append is true. + TestCommon::makeHeaderValueOption({{"x-downstream-ok", "1", true}})); auto check_response = TestCommon::makeMessageResponse(expected_headers); envoy::service::auth::v3::CheckRequest request; auto mutable_headers = diff --git a/test/extensions/filters/common/ext_authz/test_common.cc b/test/extensions/filters/common/ext_authz/test_common.cc index 67429f891e2ae..66be0140ea7c0 100644 --- a/test/extensions/filters/common/ext_authz/test_common.cc +++ b/test/extensions/filters/common/ext_authz/test_common.cc @@ -82,8 +82,13 @@ Response TestCommon::makeAuthzResponse(CheckStatus status, Http::Code status_cod } if (!downstream_headers.empty()) { for (auto& header : downstream_headers) { - authz_response.response_headers_to_add.emplace_back( - Http::LowerCaseString(header.header().key()), header.header().value()); + if (header.append().value()) { + authz_response.response_headers_to_add.emplace_back( + Http::LowerCaseString(header.header().key()), header.header().value()); + } else { + authz_response.response_headers_to_set.emplace_back( + Http::LowerCaseString(header.header().key()), header.header().value()); + } } } return authz_response; diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index 50600d0e7bccb..2f27c478f61bb 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -188,7 +188,11 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, result = upstream_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeHeaders( + Http::TestResponseHeaderMapImpl{{":status", "200"}, + {"replaceable", "set-by-upstream"}, + {"set-cookie", "cookie1=snickerdoodle"}}, + false); upstream_request_->encodeData(response_size_, true); for (const auto& header_to_add : headers_to_add) { @@ -256,7 +260,8 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, const Headers& headers_to_remove, const Http::TestRequestHeaderMapImpl& new_headers_from_upstream, const Http::TestRequestHeaderMapImpl& headers_to_append_multiple, - const Headers& response_headers_to_add) { + const Headers& response_headers_to_append, + const Headers& response_headers_to_set = {}) { ext_authz_request_->startGrpcStream(); envoy::service::auth::v3::CheckResponse check_response; check_response.mutable_status()->set_code(Grpc::Status::WellKnownGrpcStatus::Ok); @@ -306,17 +311,29 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, return Http::HeaderMap::Iterate::Continue; }); - for (const auto& response_header_to_add : response_headers_to_add) { + for (const auto& response_header_to_add : response_headers_to_append) { auto* entry = check_response.mutable_ok_response()->mutable_response_headers_to_add()->Add(); const auto key = std::string(response_header_to_add.first); const auto value = std::string(response_header_to_add.second); - entry->mutable_append()->set_value(false); + entry->mutable_append()->set_value(true); entry->mutable_header()->set_key(key); entry->mutable_header()->set_value(value); ENVOY_LOG_MISC(trace, "sendExtAuthzResponse: set response_header_to_add {}={}", key, value); } + for (const auto& response_header_to_set : response_headers_to_set) { + auto* entry = check_response.mutable_ok_response()->mutable_response_headers_to_add()->Add(); + const auto key = std::string(response_header_to_set.first); + const auto value = std::string(response_header_to_set.second); + + // Replaces the one sent by the upstream. + entry->mutable_append()->set_value(false); + entry->mutable_header()->set_key(key); + entry->mutable_header()->set_value(value); + ENVOY_LOG_MISC(trace, "sendExtAuthzResponse: set response_header_to_set {}={}", key, value); + } + ext_authz_request_->sendGrpcMessage(check_response); ext_authz_request_->finishGrpcStream(Grpc::Status::Ok); } @@ -674,18 +691,27 @@ TEST_P(ExtAuthzGrpcIntegrationTest, DownstreamHeadersOnSuccess) { waitForExtAuthzRequest(expectedCheckRequest(Http::CodecType::HTTP1)); // Send back an ext_authz response with response_headers_to_add set. - sendExtAuthzResponse(Headers{}, Headers{}, Headers{}, Http::TestRequestHeaderMapImpl{}, - Http::TestRequestHeaderMapImpl{}, - Headers{{"downstream2", "downstream-should-see-me"}}); + sendExtAuthzResponse( + Headers{}, Headers{}, Headers{}, Http::TestRequestHeaderMapImpl{}, + Http::TestRequestHeaderMapImpl{}, + Headers{{"downstream2", "downstream-should-see-me"}, {"set-cookie", "cookie2=gingerbread"}}, + Headers{{"replaceable", "by-ext-authz"}}); // Wait for the upstream response. waitForSuccessfulUpstreamResponse("200"); + EXPECT_EQ(Http::HeaderUtility::getAllOfHeaderAsString(response_->headers(), + Http::LowerCaseString("set-cookie")) + .result() + .value(), + "cookie1=snickerdoodle,cookie2=gingerbread"); + // Verify the response is HTTP 200 with the header from `response_headers_to_add` above. const std::string expected_body(response_size_, 'a'); verifyResponse(std::move(response_), "200", Http::TestResponseHeaderMapImpl{{":status", "200"}, - {"downstream2", "downstream-should-see-me"}}, + {"downstream2", "downstream-should-see-me"}, + {"replaceable", "by-ext-authz"}}, expected_body); cleanup(); } diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 18283f359bc95..bd8affbc05893 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -1744,8 +1744,13 @@ TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { response.headers_to_append = Http::HeaderVector{{request_header_key, "bar"}}; response.headers_to_set = Http::HeaderVector{{key_to_add, "foo"}, {key_to_override, "bar"}}; response.headers_to_remove = std::vector{key_to_remove}; + // This cookie will be appended to the encoded headers. response.response_headers_to_add = - Http::HeaderVector{{Http::LowerCaseString{"cookie"}, "flavor=gingerbread"}}; + Http::HeaderVector{{Http::LowerCaseString{"set-cookie"}, "cookie2=gingerbread"}}; + // This "should-be-overridden" header value from the auth server will override the + // "should-be-overridden" entry from the upstream server. + response.response_headers_to_set = Http::HeaderVector{ + {Http::LowerCaseString{"should-be-overridden"}, "finally-set-by-auth-server"}}; auto response_ptr = std::make_unique(response); @@ -1766,14 +1771,22 @@ TEST_P(HttpFilterTestParam, ImmediateOkResponseWithHttpAttributes) { EXPECT_EQ(request_headers_.has(key_to_remove), false); Buffer::OwnedImpl response_data{}; - Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}}; + Http::TestResponseHeaderMapImpl response_headers{ + {":status", "200"}, + {"set-cookie", "cookie1=snickerdoodle"}, + {"should-be-overridden", "originally-set-by-upstream"}}; Http::TestResponseTrailerMapImpl response_trailers{}; Http::MetadataMap response_metadata{}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(response_data, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers)); EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->encodeMetadata(response_metadata)); - EXPECT_EQ(response_headers.get_("cookie"), "flavor=gingerbread"); + EXPECT_EQ(Http::HeaderUtility::getAllOfHeaderAsString(response_headers, + Http::LowerCaseString("set-cookie")) + .result() + .value(), + "cookie1=snickerdoodle,cookie2=gingerbread"); + EXPECT_EQ(response_headers.get_("should-be-overridden"), "finally-set-by-auth-server"); } // Test that an synchronous denied response from the authorization service, on the call stack, From 6121a9a57a36fa8a27ca7039483343d3ab5ec8ff Mon Sep 17 00:00:00 2001 From: phlax Date: Wed, 22 Sep 2021 11:47:20 +0100 Subject: [PATCH 087/121] docs: Fix build rst/html script (#18204) Signed-off-by: Ryan Northey Signed-off-by: gayang --- ci/upload_gcs_artifact.sh | 2 +- docs/build.sh | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/ci/upload_gcs_artifact.sh b/ci/upload_gcs_artifact.sh index bb952610392f8..e72082a9cf0b5 100755 --- a/ci/upload_gcs_artifact.sh +++ b/ci/upload_gcs_artifact.sh @@ -24,7 +24,7 @@ if [[ "$BUILD_REASON" == "PullRequest" ]] || [[ "$TARGET_SUFFIX" == "docs" ]]; t # -> https://storage.googleapis.com/envoy-postsubmit/$UPLOAD_PATH/docs/envoy-docs-rst.tar.gz # - PR build (commit sha from the developers branch) # -> https://storage.googleapis.com/envoy-pr/$UPLOAD_PATH/$TARGET_SUFFIX - UPLOAD_PATH="$(git log --pretty=%P -n 1 | cut -d' ' -f2 | head -c7)" + UPLOAD_PATH="$(git rev-parse HEAD | head -c7)" else UPLOAD_PATH="${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}}" fi diff --git a/docs/build.sh b/docs/build.sh index 9cffa9a9dd97b..dbcea9d91254f 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -19,17 +19,18 @@ fi MAIN_BRANCH="refs/heads/main" RELEASE_TAG_REGEX="^refs/tags/v.*" +# default is to build html only +BUILD_TYPE=html + if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then DOCS_TAG="${AZP_BRANCH/refs\/tags\//}" export DOCS_TAG - # no need to build rst explicitly, just html - HTML_ONLY=true else BUILD_SHA=$(git rev-parse HEAD) export BUILD_SHA if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then # no need to build html, just rst - RST_ONLY=true + BUILD_TYPE=rst fi fi @@ -41,10 +42,10 @@ BAZEL_BUILD_OPTIONS+=( "--action_env=SPHINX_SKIP_CONFIG_VALIDATION") # Building html/rst is determined by then needs of CI but can be overridden in dev. -if [[ -z "${RST_ONLY}" ]] || [[ -n "${DOCS_BUILD_HTML}" ]]; then +if [[ "${BUILD_TYPE}" == "html" ]] || [[ -n "${DOCS_BUILD_HTML}" ]]; then BUILD_HTML=1 fi -if [[ -z "${HTML_ONLY}" ]] || [[ -n "${DOCS_BUILD_RST}" ]]; then +if [[ "${BUILD_TYPE}" == "rst" ]] || [[ -n "${DOCS_BUILD_RST}" ]]; then BUILD_RST=1 fi From cbc76a4e9b930ddea4f03b7ec277da03c4531283 Mon Sep 17 00:00:00 2001 From: "Adi (Suissa) Peleg" Date: Wed, 22 Sep 2021 13:05:15 -0400 Subject: [PATCH 088/121] ci: fixing api_compat post-job cache issue (#18216) Signed-off-by: Adi Suissa-Peleg Signed-off-by: gayang --- ci/do_ci.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 02f89f49c744b..70a9f483d4abf 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -379,7 +379,8 @@ elif [[ "$CI_TARGET" == "bazel.api_compat" ]]; then BASE_BRANCH_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh) COMMIT_TITLE=$(git log -n 1 --pretty='format:%C(auto)%h (%s, %ad)' "${BASE_BRANCH_REF}") echo -e "\tUsing base commit ${COMMIT_TITLE}" - bazel run //tools/api_proto_breaking_change_detector:detector_ci "${BASE_BRANCH_REF}" + # BAZEL_BUILD_OPTIONS needed for setting the repository_cache param. + bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/api_proto_breaking_change_detector:detector_ci "${BASE_BRANCH_REF}" exit 0 elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then setup_clang_toolchain From 4ea2a3ddf2a3e1382b082ff7d178a91ec7219c0f Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 23 Sep 2021 02:09:50 +0800 Subject: [PATCH 089/121] network: remove an assert (#18201) #16122 added asserts to utilities to make sure they were called from the correct thread. Over on the Envoy mobile side when we do QUIC we're doing it from the main thread, and failing some of the assert checks. I think the right thing to do here is just remove the assert This is the only use of isWorkerThread() so I'm inclined to just remove it. cc @goaway Risk Level: n/a Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- source/common/common/thread.cc | 10 ---------- source/common/common/thread.h | 1 - source/common/network/address_impl.cc | 1 - test/common/thread_local/thread_local_impl_test.cc | 2 -- 4 files changed, 14 deletions(-) diff --git a/source/common/common/thread.cc b/source/common/common/thread.cc index 282858399000b..e6ee78c8e5208 100644 --- a/source/common/common/thread.cc +++ b/source/common/common/thread.cc @@ -13,16 +13,6 @@ bool MainThread::isMainThread() { return main_thread_singleton->inMainThread() || main_thread_singleton->inTestThread(); } -bool MainThread::isWorkerThread() { - auto main_thread_singleton = MainThreadSingleton::getExisting(); - // Allow worker thread code to be executed in test thread. - if (main_thread_singleton == nullptr) { - return true; - } - // When threading is on, compare thread id with main thread id. - return !main_thread_singleton->inMainThread(); -} - void MainThread::clear() { delete MainThreadSingleton::getExisting(); MainThreadSingleton::clear(); diff --git a/source/common/common/thread.h b/source/common/common/thread.h index 347df89c9fab1..1ade49a3d1b93 100644 --- a/source/common/common/thread.h +++ b/source/common/common/thread.h @@ -194,7 +194,6 @@ struct MainThread { */ static void clear(); static bool isMainThread(); - static bool isWorkerThread(); private: std::thread::id main_thread_id_; diff --git a/source/common/network/address_impl.cc b/source/common/network/address_impl.cc index e612505d83369..5954724186bce 100644 --- a/source/common/network/address_impl.cc +++ b/source/common/network/address_impl.cc @@ -110,7 +110,6 @@ addressFromSockAddrOrDie(const sockaddr_storage& ss, socklen_t ss_len, os_fd_t f // address and the socket is actually v6 only, the returned address will be // regarded as a v6 address from dual stack socket. However, this address is not going to be // used to create socket. Wrong knowledge of dual stack support won't hurt. - ASSERT(Thread::MainThread::isWorkerThread()); StatusOr address = Address::addressFromSockAddr(ss, ss_len, v6only); if (!address.ok()) { diff --git a/test/common/thread_local/thread_local_impl_test.cc b/test/common/thread_local/thread_local_impl_test.cc index 6e2586db580ac..f6937240803d4 100644 --- a/test/common/thread_local/thread_local_impl_test.cc +++ b/test/common/thread_local/thread_local_impl_test.cc @@ -18,13 +18,11 @@ namespace ThreadLocal { TEST(MainThreadVerificationTest, All) { // Before threading is on, assertion on main thread should be true. EXPECT_TRUE(Thread::MainThread::isMainThread()); - EXPECT_TRUE(Thread::MainThread::isWorkerThread()); { InstanceImpl tls; // Tls instance has been initialized. // Call to main thread verification should succeed in main thread. EXPECT_TRUE(Thread::MainThread::isMainThread()); - EXPECT_FALSE(Thread::MainThread::isWorkerThread()); tls.shutdownGlobalThreading(); tls.shutdownThread(); } From 06c44b8e2be3ff5164f6090fcea1a7846ebe5015 Mon Sep 17 00:00:00 2001 From: Ryan Hamilton Date: Wed, 22 Sep 2021 11:21:24 -0700 Subject: [PATCH 090/121] upstream: Make the Alt-Svc cache configuration required if HTTP/3 is enabled with AutoHttpConfig. (#18153) upstream: Make the Alt-Svc cache configuration required if HTTP/3 is enabled with AutoHttpConfig. Risk Level: Low Testing: Unit tests Docs Changes: N/A Release Notes: N/A Platform Specific Features: N/A Signed-off-by: Ryan Hamilton Signed-off-by: gayang --- .../http/v3/http_protocol_options.proto | 4 +- source/common/http/conn_pool_grid.cc | 6 +- .../common/upstream/cluster_manager_impl.cc | 9 +-- source/extensions/upstreams/http/config.cc | 6 +- test/common/http/conn_pool_grid_test.cc | 59 ++++++++----------- test/common/upstream/upstream_impl_test.cc | 2 + test/extensions/upstreams/http/config_test.cc | 28 +++++++++ .../multiplexed_upstream_integration_test.cc | 20 ++++++- .../multiplexed_upstream_integration_test.h | 4 +- 9 files changed, 88 insertions(+), 50 deletions(-) diff --git a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto index 217d343f47d0a..1267488d98c6a 100644 --- a/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto +++ b/api/envoy/extensions/upstreams/http/v3/http_protocol_options.proto @@ -122,7 +122,9 @@ message HttpProtocolOptions { // alternate protocols cache, which is responsible for parsing and caching // HTTP Alt-Svc headers. This enables the use of HTTP/3 for origins that // advertise supporting it. - // TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled. + // + // .. note:: + // This is required when HTTP/3 is enabled. config.core.v3.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 4; } diff --git a/source/common/http/conn_pool_grid.cc b/source/common/http/conn_pool_grid.cc index bb70fad9978c9..8a7ff29aefb6d 100644 --- a/source/common/http/conn_pool_grid.cc +++ b/source/common/http/conn_pool_grid.cc @@ -204,6 +204,7 @@ ConnectivityGrid::ConnectivityGrid( // HTTP/3. // TODO(#15649) support v6/v4, WiFi/cellular. ASSERT(connectivity_options.protocols_.size() == 3); + ASSERT(alternate_protocols); } ConnectivityGrid::~ConnectivityGrid() { @@ -364,11 +365,6 @@ bool ConnectivityGrid::shouldAttemptHttp3() { ENVOY_LOG(trace, "HTTP/3 is broken to host '{}', skipping.", host_->hostname()); return false; } - if (!alternate_protocols_) { - ENVOY_LOG(trace, "No alternate protocols cache. Attempting HTTP/3 to host '{}'.", - host_->hostname()); - return true; - } if (host_->address()->type() != Network::Address::Type::Ip) { ENVOY_LOG(error, "Address is not an IP address"); ASSERT(false); diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index e41451f8658d9..f7a11ba8b27c1 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1660,13 +1660,10 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( context_.runtime().snapshot().featureEnabled("upstream.use_http3", 100)) { ASSERT(contains(protocols, {Http::Protocol::Http11, Http::Protocol::Http2, Http::Protocol::Http3})); - Http::AlternateProtocolsCacheSharedPtr alternate_protocols_cache; - if (alternate_protocol_options.has_value()) { - alternate_protocols_cache = - alternate_protocols_cache_manager_->getCache(alternate_protocol_options.value()); - } + ASSERT(alternate_protocol_options.has_value()); #ifdef ENVOY_ENABLE_QUIC - // TODO(RyanTheOptimist): Plumb an actual alternate protocols cache. + Http::AlternateProtocolsCacheSharedPtr alternate_protocols_cache = + alternate_protocols_cache_manager_->getCache(alternate_protocol_options.value()); Envoy::Http::ConnectivityGrid::ConnectivityOptions coptions{protocols}; return std::make_unique( dispatcher, context_.api().randomGenerator(), host, priority, options, diff --git a/source/extensions/upstreams/http/config.cc b/source/extensions/upstreams/http/config.cc index 29cde124642b0..52e78f1e7c3c5 100644 --- a/source/extensions/upstreams/http/config.cc +++ b/source/extensions/upstreams/http/config.cc @@ -111,7 +111,11 @@ ProtocolOptionsConfigImpl::ProtocolOptionsConfigImpl( use_http2_ = true; use_alpn_ = true; use_http3_ = options.auto_config().has_http3_protocol_options(); - if (options.auto_config().has_alternate_protocols_cache_options()) { + if (use_http3_) { + if (!options.auto_config().has_alternate_protocols_cache_options()) { + throw EnvoyException(fmt::format("alternate protocols cache must be configured when HTTP/3 " + "is enabled with auto_config")); + } alternate_protocol_cache_options_ = options.auto_config().alternate_protocols_cache_options(); } } diff --git a/test/common/http/conn_pool_grid_test.cc b/test/common/http/conn_pool_grid_test.cc index 0e03bc9cda652..f38d056688b1a 100644 --- a/test/common/http/conn_pool_grid_test.cc +++ b/test/common/http/conn_pool_grid_test.cc @@ -98,11 +98,11 @@ class ConnectivityGridForTest : public ConnectivityGrid { }; namespace { -class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public testing::Test { +class ConnectivityGridTest : public Event::TestUsingSimulatedTime, public testing::Test { public: - ConnectivityGridTestBase(bool use_alternate_protocols) + ConnectivityGridTest() : options_({Http::Protocol::Http11, Http::Protocol::Http2, Http::Protocol::Http3}), - alternate_protocols_(maybeCreateAlternateProtocolsCacheImpl(use_alternate_protocols)), + alternate_protocols_(std::make_shared(simTime())), quic_stat_names_(store_.symbolTable()), grid_(dispatcher_, random_, Upstream::makeTestHost(cluster_, "hostname", "tcp://127.0.0.1:9000", simTime()), @@ -114,15 +114,6 @@ class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public te grid_.encoder_ = &encoder_; } - AlternateProtocolsCacheSharedPtr - maybeCreateAlternateProtocolsCacheImpl(bool use_alternate_protocols) { - AlternateProtocolsCacheSharedPtr cache; - if (!use_alternate_protocols) { - return nullptr; - } - return std::make_shared(simTime()); - } - void addHttp3AlternateProtocol() { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); const std::vector protocols = { @@ -150,25 +141,12 @@ class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public te NiceMock encoder_; }; -// Tests of the Grid in which no alternate protocols cache is configured. -class ConnectivityGridTest : public ConnectivityGridTestBase { -public: - ConnectivityGridTest() : ConnectivityGridTestBase(false) {} -}; - -// Tests of the Grid in which an alternate protocols cache is configured. -class ConnectivityGridWithAlternateProtocolsCacheImplTest : public ConnectivityGridTestBase { -public: - ConnectivityGridWithAlternateProtocolsCacheImplTest() : ConnectivityGridTestBase(true) {} -}; - // Test the first pool successfully connecting. TEST_F(ConnectivityGridTest, Success) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); - EXPECT_LOG_CONTAINS("trace", - "No alternate protocols cache. Attempting HTTP/3 to host 'hostname'.", - EXPECT_NE(grid_.newStream(decoder_, callbacks_), nullptr)); + EXPECT_NE(grid_.newStream(decoder_, callbacks_), nullptr); EXPECT_NE(grid_.first(), nullptr); EXPECT_EQ(grid_.second(), nullptr); @@ -181,6 +159,7 @@ TEST_F(ConnectivityGridTest, Success) { // Test the first pool successfully connecting under the stack of newStream. TEST_F(ConnectivityGridTest, ImmediateSuccess) { + addHttp3AlternateProtocol(); grid_.immediate_success_ = true; EXPECT_CALL(callbacks_.pool_ready_, ready()); @@ -191,6 +170,7 @@ TEST_F(ConnectivityGridTest, ImmediateSuccess) { // Test the first pool failing and the second connecting. TEST_F(ConnectivityGridTest, FailureThenSuccessSerial) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); EXPECT_LOG_CONTAINS("trace", "first pool attempting to create a new stream to host 'hostname'", @@ -220,6 +200,7 @@ TEST_F(ConnectivityGridTest, FailureThenSuccessSerial) { // Test both connections happening in parallel and the second connecting. TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelSecondConnects) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); // This timer will be returned and armed as the grid creates the wrapper's failover timer. @@ -250,6 +231,7 @@ TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelSecondConnects) { // Test both connections happening in parallel and the first connecting. TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelFirstConnects) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); // This timer will be returned and armed as the grid creates the wrapper's failover timer. @@ -279,6 +261,7 @@ TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelFirstConnects) { // Test both connections happening in parallel and the second connecting before // the first eventually fails. TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelSecondConnectsFirstFail) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); // This timer will be returned and armed as the grid creates the wrapper's failover timer. @@ -310,6 +293,7 @@ TEST_F(ConnectivityGridTest, TimeoutThenSuccessParallelSecondConnectsFirstFail) // Test that after the first pool fails, subsequent connections will // successfully fail over to the second pool (the iterators work as intended) TEST_F(ConnectivityGridTest, FailureThenSuccessForMultipleConnectionsSerial) { + addHttp3AlternateProtocol(); NiceMock callbacks2; NiceMock decoder2; // Kick off two new streams. @@ -335,6 +319,7 @@ TEST_F(ConnectivityGridTest, FailureThenSuccessForMultipleConnectionsSerial) { // Test double failure under the stack of newStream. TEST_F(ConnectivityGridTest, ImmediateDoubleFailure) { + addHttp3AlternateProtocol(); grid_.immediate_failure_ = true; EXPECT_CALL(callbacks_.pool_failure_, ready()); EXPECT_EQ(grid_.newStream(decoder_, callbacks_), nullptr); @@ -343,6 +328,7 @@ TEST_F(ConnectivityGridTest, ImmediateDoubleFailure) { // Test both connections happening in parallel and both failing. TEST_F(ConnectivityGridTest, TimeoutDoubleFailureParallel) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); // This timer will be returned and armed as the grid creates the wrapper's failover timer. @@ -371,6 +357,7 @@ TEST_F(ConnectivityGridTest, TimeoutDoubleFailureParallel) { // Test cancellation TEST_F(ConnectivityGridTest, TestCancel) { + addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); auto cancel = grid_.newStream(decoder_, callbacks_); @@ -383,6 +370,7 @@ TEST_F(ConnectivityGridTest, TestCancel) { // Make sure drains get sent to all active pools. TEST_F(ConnectivityGridTest, Drain) { + addHttp3AlternateProtocol(); grid_.drainConnections(Envoy::ConnectionPool::DrainBehavior::DrainExistingConnections); // Synthetically create a pool. @@ -405,6 +393,7 @@ TEST_F(ConnectivityGridTest, Drain) { // Make sure drain callbacks work as expected. TEST_F(ConnectivityGridTest, DrainCallbacks) { + addHttp3AlternateProtocol(); // Synthetically create both pools. grid_.createNextPool(); grid_.createNextPool(); @@ -453,6 +442,7 @@ TEST_F(ConnectivityGridTest, DrainCallbacks) { // Make sure idle callbacks work as expected. TEST_F(ConnectivityGridTest, IdleCallbacks) { + addHttp3AlternateProtocol(); // Synthetically create both pools. grid_.createNextPool(); grid_.createNextPool(); @@ -485,6 +475,7 @@ TEST_F(ConnectivityGridTest, IdleCallbacks) { // Ensure drain callbacks aren't called during grid teardown. TEST_F(ConnectivityGridTest, NoDrainOnTeardown) { + addHttp3AlternateProtocol(); grid_.createNextPool(); bool drain_received = false; @@ -500,7 +491,7 @@ TEST_F(ConnectivityGridTest, NoDrainOnTeardown) { } // Test that when HTTP/3 is broken then the HTTP/3 pool is skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessAfterBroken) { +TEST_F(ConnectivityGridTest, SuccessAfterBroken) { addHttp3AlternateProtocol(); grid_.markHttp3Broken(); EXPECT_EQ(grid_.first(), nullptr); @@ -518,7 +509,7 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessAfterBroken) } // Test the HTTP/3 pool successfully connecting when HTTP/3 is available. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, Success) { +TEST_F(ConnectivityGridTest, SuccessWithAltSvc) { addHttp3AlternateProtocol(); EXPECT_EQ(grid_.first(), nullptr); @@ -534,7 +525,7 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, Success) { } // Test that when HTTP/3 is not available then the HTTP/3 pool is skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3) { +TEST_F(ConnectivityGridTest, SuccessWithoutHttp3) { EXPECT_EQ(grid_.first(), nullptr); EXPECT_LOG_CONTAINS("trace", @@ -550,7 +541,7 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3) } // Test that when HTTP/3 is not available then the HTTP/3 pool is skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithExpiredHttp3) { +TEST_F(ConnectivityGridTest, SuccessWithExpiredHttp3) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); const std::vector protocols = { {"h3-29", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; @@ -573,7 +564,7 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithExpiredHt // Test that when the alternate protocol specifies a different host, then the HTTP/3 pool is // skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3NoMatchingHostname) { +TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingHostname) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); const std::vector protocols = { {"h3-29", "otherhostname", origin.port_, simTime().monotonicTime() + Seconds(5)}}; @@ -594,7 +585,7 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3N // Test that when the alternate protocol specifies a different port, then the HTTP/3 pool is // skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3NoMatchingPort) { +TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingPort) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); const std::vector protocols = { {"h3-29", "", origin.port_ + 1, simTime().monotonicTime() + Seconds(5)}}; @@ -614,7 +605,7 @@ TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3N } // Test that when the alternate protocol specifies an invalid ALPN, then the HTTP/3 pool is skipped. -TEST_F(ConnectivityGridWithAlternateProtocolsCacheImplTest, SuccessWithoutHttp3NoMatchingAlpn) { +TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingAlpn) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); const std::vector protocols = { {"http/2", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 029b12bc33d8b..98b36b891f1df 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -3503,6 +3503,8 @@ TEST_F(ClusterInfoImplTest, Http3Auto) { http3_protocol_options: quic_protocol_options: max_concurrent_streams: 2 + alternate_protocols_cache_options: + name: default common_http_protocol_options: idle_timeout: 1s )EOF"; diff --git a/test/extensions/upstreams/http/config_test.cc b/test/extensions/upstreams/http/config_test.cc index cd91d8c55f675..13f16e02be5a0 100644 --- a/test/extensions/upstreams/http/config_test.cc +++ b/test/extensions/upstreams/http/config_test.cc @@ -1,6 +1,7 @@ #include "source/extensions/upstreams/http/config.h" #include "test/mocks/protobuf/mocks.h" +#include "test/test_common/utility.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -45,6 +46,33 @@ TEST(FactoryTest, EmptyProto) { EXPECT_TRUE(factory.createEmptyConfigProto() != nullptr); } +TEST_F(ConfigTest, Auto) { + options_.mutable_auto_config(); + ProtocolOptionsConfigImpl config(options_, validation_visitor_); + EXPECT_FALSE(config.use_downstream_protocol_); + EXPECT_TRUE(config.use_http2_); + EXPECT_FALSE(config.use_http3_); + EXPECT_TRUE(config.use_alpn_); +} + +TEST_F(ConfigTest, AutoHttp3) { + options_.mutable_auto_config(); + options_.mutable_auto_config()->mutable_http3_protocol_options(); + options_.mutable_auto_config()->mutable_alternate_protocols_cache_options(); + ProtocolOptionsConfigImpl config(options_, validation_visitor_); + EXPECT_TRUE(config.use_http2_); + EXPECT_TRUE(config.use_http3_); + EXPECT_TRUE(config.use_alpn_); +} + +TEST_F(ConfigTest, AutoHttp3NoCache) { + options_.mutable_auto_config(); + options_.mutable_auto_config()->mutable_http3_protocol_options(); + EXPECT_THROW_WITH_MESSAGE( + ProtocolOptionsConfigImpl config(options_, validation_visitor_), EnvoyException, + "alternate protocols cache must be configured when HTTP/3 is enabled with auto_config"); +} + } // namespace Http } // namespace Upstreams } // namespace Extensions diff --git a/test/integration/multiplexed_upstream_integration_test.cc b/test/integration/multiplexed_upstream_integration_test.cc index 6dedddbdea612..c55f1b22ff6c3 100644 --- a/test/integration/multiplexed_upstream_integration_test.cc +++ b/test/integration/multiplexed_upstream_integration_test.cc @@ -262,11 +262,23 @@ TEST_P(Http2UpstreamIntegrationTest, LargeSimultaneousRequestWithBufferLimits) { } TEST_P(Http2UpstreamIntegrationTest, SimultaneousRequestAlpn) { + if (upstreamProtocol() == Http::CodecType::HTTP3) { + // TODO(alyssawilk) In order to use HTTP/3, and alt-svc entry must exist in the alternate + // protocols cache, but currently there is no easy way to initialize the test with this state. + return; + } + use_alpn_ = true; simultaneousRequest(1024, 512, 1023, 513); } TEST_P(Http2UpstreamIntegrationTest, LargeSimultaneousRequestWithBufferLimitsAlpn) { + if (upstreamProtocol() == Http::CodecType::HTTP3) { + // TODO(alyssawilk) In order to use HTTP/3, and alt-svc entry must exist in the alternate + // protocols cache, but currently there is no easy way to initialize the test with this state. + return; + } + use_alpn_ = true; config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16); @@ -666,11 +678,15 @@ class MixedUpstreamIntegrationTest : public Http2UpstreamIntegrationTest { bool use_http2_{false}; }; -TEST_P(MixedUpstreamIntegrationTest, SimultaneousRequestAutoWithHttp3) { +// TODO(alyssawilk) In order to use HTTP/3, and alt-svc entry must exist in the alternate +// protocols cache, but currently there is no easy way to initialize the test with this state. +TEST_P(MixedUpstreamIntegrationTest, DISABLED_SimultaneousRequestAutoWithHttp3) { + use_alternate_protocols_cache_ = true; testRouterRequestAndResponseWithBody(0, 0, false); } -TEST_P(MixedUpstreamIntegrationTest, SimultaneousRequestAutoWithHttp2) { +TEST_P(MixedUpstreamIntegrationTest, DISABLED_SimultaneousRequestAutoWithHttp2) { + use_alternate_protocols_cache_ = true; use_http2_ = true; testRouterRequestAndResponseWithBody(0, 0, false); } diff --git a/test/integration/multiplexed_upstream_integration_test.h b/test/integration/multiplexed_upstream_integration_test.h index a0903c717d2ec..6abcc79460242 100644 --- a/test/integration/multiplexed_upstream_integration_test.h +++ b/test/integration/multiplexed_upstream_integration_test.h @@ -9,7 +9,8 @@ class Http2UpstreamIntegrationTest : public HttpProtocolIntegrationTest { public: void initialize() override { upstream_tls_ = true; - config_helper_.configureUpstreamTls(use_alpn_, upstreamProtocol() == Http::CodecType::HTTP3); + config_helper_.configureUpstreamTls(use_alpn_, upstreamProtocol() == Http::CodecType::HTTP3, + use_alternate_protocols_cache_); HttpProtocolIntegrationTest::initialize(); } @@ -19,6 +20,7 @@ class Http2UpstreamIntegrationTest : public HttpProtocolIntegrationTest { void manySimultaneousRequests(uint32_t request_bytes, uint32_t response_bytes); bool use_alpn_{false}; + bool use_alternate_protocols_cache_{false}; uint64_t upstreamRxResetCounterValue(); uint64_t upstreamTxResetCounterValue(); From 764f4d2a149dd81f9cf4d6917660501998b7ac69 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 23 Sep 2021 02:44:02 +0800 Subject: [PATCH 091/121] windows: fixing caching test on windows (#18197) apparently it didn't like 0.0.0.0 for loopback. Risk Level: n/a Testing: yes Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- .../dynamic_forward_proxy/proxy_filter_integration_test.cc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc index c521df204f6e6..570fc3061359c 100644 --- a/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/http/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -131,7 +131,8 @@ name: envoy.clusters.dynamic_forward_proxy std::string host = fmt::format("localhost:{}", fake_upstreams_[0]->localAddress()->ip()->port()); std::string value = - absl::StrCat(fake_upstreams_[0]->localAddress()->asString(), "|1000000|0"); + absl::StrCat(Network::Test::getLoopbackAddressUrlString(version_), ":", + fake_upstreams_[0]->localAddress()->ip()->port(), "|1000000|0"); TestEnvironment::writeStringToFileForTest( "dns_cache.txt", absl::StrCat(host.length(), "\n", host, value.length(), "\n", value)); } @@ -396,8 +397,6 @@ TEST_P(ProxyFilterIntegrationTest, DnsCacheCircuitBreakersInvoked) { EXPECT_EQ("503", response->headers().Status()->value().getStringView()); } -#ifndef WIN32 -// TODO(alyssawilk) figure out why this test doesn't pass on windows. TEST_P(ProxyFilterIntegrationTest, UseCacheFile) { write_cache_file_ = true; @@ -413,7 +412,6 @@ TEST_P(ProxyFilterIntegrationTest, UseCacheFile) { EXPECT_EQ(1, test_server_->counter("dns_cache.foo.cache_load")->value()); EXPECT_EQ(1, test_server_->counter("dns_cache.foo.host_added")->value()); } -#endif } // namespace } // namespace Envoy From 4850d32b7747947747d3fef7ddb5a22c5e607f89 Mon Sep 17 00:00:00 2001 From: phlax Date: Wed, 22 Sep 2021 20:58:35 +0100 Subject: [PATCH 092/121] bazel: Add build options for su-exec (#18217) Signed-off-by: Ryan Northey Signed-off-by: gayang --- ci/do_ci.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 70a9f483d4abf..4334c2304b34c 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -135,7 +135,7 @@ function bazel_binary_build() { fi # Build su-exec utility - bazel build external:su-exec + bazel build "${BAZEL_BUILD_OPTIONS[@]}" external:su-exec cp_binary_for_image_build "${BINARY_TYPE}" "${COMPILE_TYPE}" "${EXE_NAME}" } From 0c72fbc5ebc07fa9c7faf524f9a3fa6553fd4bea Mon Sep 17 00:00:00 2001 From: Sotiris Nanopoulos Date: Wed, 22 Sep 2021 13:13:46 -0700 Subject: [PATCH 093/121] Fix issue where EnvoyUsers password expires (#18163) Windows sets an expiration date on password of users even when the password is empty. This prevents running the container as EnvoyUser after the expiration. Signed-off-by: Sotiris Nanopoulos Signed-off-by: gayang --- ci/Dockerfile-envoy-windows | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/Dockerfile-envoy-windows b/ci/Dockerfile-envoy-windows index 6f9514569c1c4..edeff92dd4ebd 100644 --- a/ci/Dockerfile-envoy-windows +++ b/ci/Dockerfile-envoy-windows @@ -4,7 +4,8 @@ ARG BUILD_TAG=ltsc2019 FROM $BUILD_OS:$BUILD_TAG USER ContainerAdministrator -RUN net user /add "EnvoyUser" +RUN net accounts /MaxPWAge:unlimited +RUN net user /add "EnvoyUser" /expires:never RUN net localgroup "Network Configuration Operators" "EnvoyUser" /add RUN mkdir "C:\\Program\ Files\\envoy" From ee2cb6d8cde97b2f67c92e34373d5f2a59e5ebc0 Mon Sep 17 00:00:00 2001 From: James Heppenstall Date: Wed, 22 Sep 2021 17:41:56 -0400 Subject: [PATCH 094/121] proxy_protocol: fix IOCallResult error checks (#18221) Signed-off-by: James Heppenstall Signed-off-by: gayang --- .../filters/listener/proxy_protocol/proxy_protocol.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index 0d8082a079fe2..d19be984e0fda 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -456,7 +456,7 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { if (buf_off_ < PROXY_PROTO_V2_HEADER_LEN) { ssize_t exp = PROXY_PROTO_V2_HEADER_LEN - buf_off_; const auto read_result = io_handle.recv(buf_ + buf_off_, exp, 0); - if (!result.ok() || read_result.return_value_ != uint64_t(exp)) { + if (!read_result.ok() || read_result.return_value_ != uint64_t(exp)) { ENVOY_LOG(debug, "failed to read proxy protocol (remote closed)"); return ReadOrParseState::Error; } @@ -478,7 +478,7 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { if (ssize_t(buf_off_) + nread >= PROXY_PROTO_V2_HEADER_LEN + addr_len) { ssize_t missing = (PROXY_PROTO_V2_HEADER_LEN + addr_len) - buf_off_; const auto read_result = io_handle.recv(buf_ + buf_off_, missing, 0); - if (!result.ok() || read_result.return_value_ != uint64_t(missing)) { + if (!read_result.ok() || read_result.return_value_ != uint64_t(missing)) { ENVOY_LOG(debug, "failed to read proxy protocol (remote closed)"); return ReadOrParseState::Error; } From 5daa011ebe4b236f3d534e0c4447ff254294d79b Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Wed, 22 Sep 2021 17:57:29 -0700 Subject: [PATCH 095/121] dns cache manager: lookup cache by name (#18223) Commit Message: dns cache manager - lookup cache by name Risk Level: low - new API Testing: new unit tests Signed-off-by: Jose Nino Co-Authored-By: Mike Schore Signed-off-by: gayang --- .../common/dynamic_forward_proxy/dns_cache.h | 8 ++++++++ .../dns_cache_manager_impl.cc | 10 ++++++++++ .../dns_cache_manager_impl.h | 1 + .../dns_cache_impl_test.cc | 17 +++++++++++++++++ .../common/dynamic_forward_proxy/mocks.h | 1 + 5 files changed, 37 insertions(+) diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache.h b/source/extensions/common/dynamic_forward_proxy/dns_cache.h index 8c178ecc8abfc..4500341592e71 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache.h @@ -221,6 +221,14 @@ class DnsCacheManager { */ virtual DnsCacheSharedPtr getCache(const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) PURE; + + /** + * Look up an existing DNS cache by name. + * @param name supplies the cache name to look up. If a cache exists with the same name it + * will be returned. + * @return pointer to the cache if it exists, nullptr otherwise. + */ + virtual DnsCacheSharedPtr lookUpCacheByName(absl::string_view cache_name) PURE; }; using DnsCacheManagerSharedPtr = std::shared_ptr; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc index 7dee0887fb44c..3fea4fcec98a5 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.cc @@ -31,6 +31,16 @@ DnsCacheSharedPtr DnsCacheManagerImpl::getCache( return new_cache; } +DnsCacheSharedPtr DnsCacheManagerImpl::lookUpCacheByName(absl::string_view cache_name) { + ASSERT(context_.mainThreadDispatcher().isThreadSafe()); + const auto& existing_cache = caches_.find(cache_name); + if (existing_cache != caches_.end()) { + return existing_cache->second.cache_; + } + + return nullptr; +} + DnsCacheManagerSharedPtr DnsCacheManagerFactoryImpl::get() { return context_.singletonManager().getTyped( SINGLETON_MANAGER_REGISTERED_NAME(dns_cache_manager), diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h index 657279c2323ce..582c8ea4f7017 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_manager_impl.h @@ -19,6 +19,7 @@ class DnsCacheManagerImpl : public DnsCacheManager, public Singleton::Instance { // DnsCacheManager DnsCacheSharedPtr getCache( const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config) override; + DnsCacheSharedPtr lookUpCacheByName(absl::string_view cache_name) override; private: struct ActiveCache { diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index 8ae4b11eb071f..d29bb933d01c8 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -975,6 +975,23 @@ TEST(DnsCacheManagerImplTest, LoadViaConfig) { "config specified DNS cache 'foo' with different settings"); } +TEST(DnsCacheManagerImplTest, LookupByName) { + NiceMock context; + DnsCacheManagerImpl cache_manager(context); + + EXPECT_EQ(cache_manager.lookUpCacheByName("foo"), nullptr); + + envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config1; + config1.set_name("foo"); + + auto cache1 = cache_manager.getCache(config1); + EXPECT_NE(cache1, nullptr); + + auto cache2 = cache_manager.lookUpCacheByName("foo"); + EXPECT_NE(cache2, nullptr); + EXPECT_EQ(cache1, cache2); +} + TEST(DnsCacheConfigOptionsTest, EmtpyDnsResolutionConfig) { NiceMock context; envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig config; diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.h b/test/extensions/common/dynamic_forward_proxy/mocks.h index 6cc4acc14c3f7..3765190ab29f7 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.h +++ b/test/extensions/common/dynamic_forward_proxy/mocks.h @@ -77,6 +77,7 @@ class MockDnsCacheManager : public DnsCacheManager { MOCK_METHOD(DnsCacheSharedPtr, getCache, (const envoy::extensions::common::dynamic_forward_proxy::v3::DnsCacheConfig& config)); + MOCK_METHOD(DnsCacheSharedPtr, lookUpCacheByName, (absl::string_view cache_name)); std::shared_ptr> dns_cache_{new NiceMock()}; }; From 0c05c2c1748a5fb4e958af10d201678d77bc415f Mon Sep 17 00:00:00 2001 From: John Esmet Date: Wed, 22 Sep 2021 23:08:02 -0400 Subject: [PATCH 096/121] tools: fix protoprint to respect CLANG_FORMAT env var (#18202) On OSX with clang-format 12.0.1 and clang-format-11 installed elsewhere with CLANG_FORMAT=/usr/local/opt/llvm@11/bin/clang-format, I noticed that tools/proto_format/proto_format.sh fix produced an odd diff on api/envoy/extensions/filters/http/jwt_authn/v3/config.proto. After some digging, I found that protoprint.py was invoking clang-format directly instead of consulting CLANG_FORMAT. This PR fixes that behavior, taking precedent from tools/code_format/check_format.py Signed-off-by: John Esmet Signed-off-by: gayang --- tools/protoxform/protoprint.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/protoxform/protoprint.py b/tools/protoxform/protoprint.py index b30058b37a68e..45dcfea7ce612 100755 --- a/tools/protoxform/protoprint.py +++ b/tools/protoxform/protoprint.py @@ -82,8 +82,9 @@ def clang_format(contents): Returns: clang-formatted string """ + clang_format_path = os.getenv("CLANG_FORMAT", "clang-format-11") return subprocess.run( - ['clang-format', + [clang_format_path, '--style=%s' % CLANG_FORMAT_STYLE, '--assume-filename=.proto'], input=contents.encode('utf-8'), stdout=subprocess.PIPE).stdout From 36f71778889169ebe2b2fc6bbeb206b914b98394 Mon Sep 17 00:00:00 2001 From: xuhj Date: Thu, 23 Sep 2021 20:28:36 +0800 Subject: [PATCH 097/121] Remove useless comment for ConnectionInfoProvider (#18210) Commit Message: Remove useless comment for ConnectionInfoProvider Additional Description: Already finished the rename task, so remove that comment. Risk Level: low Testing: N/A Signed-off-by: He Jie Xu hejie.xu@intel.com Signed-off-by: gayang --- envoy/network/socket.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/envoy/network/socket.h b/envoy/network/socket.h index e7200f8ace026..225b76297a06c 100644 --- a/envoy/network/socket.h +++ b/envoy/network/socket.h @@ -47,8 +47,6 @@ struct SocketOptionName { * Interfaces for providing a socket's various addresses. This is split into a getters interface * and a getters + setters interface. This is so that only the getters portion can be overridden * in certain cases. - * TODO(soulxu): Since there are more than address information inside the provider, this will be - * renamed as ConnectionInfoProvider. Ref https://github.com/envoyproxy/envoy/issues/17168 */ class ConnectionInfoProvider { public: From 8c5f5fee82e25ade83ed2649c6cb09d23756bb34 Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Thu, 23 Sep 2021 05:31:04 -0700 Subject: [PATCH 098/121] bazel: add root `envoy` alias (#18225) This is a minor quality of life improvement to just be able to run bazel build envoy Signed-off-by: Keith Smiley keithbsmiley@gmail.com Signed-off-by: gayang --- BUILD | 5 +++++ bazel/README.md | 24 ++++++++++++------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/BUILD b/BUILD index 9e35562c085fb..747d512e7e9f4 100644 --- a/BUILD +++ b/BUILD @@ -8,6 +8,11 @@ exports_files([ ".coveragerc", ]) +alias( + name = "envoy", + actual = "//source/exe:envoy", +) + # These two definitions exist to help reduce Envoy upstream core code depending on extensions. # To avoid visibility problems, see notes in source/extensions/extensions_build_config.bzl # diff --git a/bazel/README.md b/bazel/README.md index 3828e675a0b37..7b57d8d0d1ac3 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -30,7 +30,7 @@ dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#require independently sourced, the following steps should be followed: 1. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/start/building#requirements). -1. `bazel build -c opt //source/exe:envoy-static` from the repository root. +1. `bazel build -c opt envoy` from the repository root. ## Quick start Bazel build for developers @@ -236,7 +236,7 @@ for how to update or override dependencies. in your shell for buildifier to work. 1. `go get -u github.com/bazelbuild/buildtools/buildozer` to install buildozer. You may need to set `BUILDOZER_BIN` to `$GOPATH/bin/buildozer` in your shell for buildozer to work. -1. `bazel build //source/exe:envoy-static` from the Envoy source directory. Add `-c opt` for an optimized release build or +1. `bazel build envoy` from the Envoy source directory. Add `-c opt` for an optimized release build or `-c dbg` for an unoptimized, fully instrumented debugging build. ## Building Envoy with the CI Docker image @@ -270,7 +270,7 @@ To build Envoy with a remote build services, run Bazel with your remote build se For example the following command runs build with the GCP RBE service used in CI: ``` -bazel build //source/exe:envoy-static --config=remote-clang \ +bazel build envoy --config=remote-clang \ --remote_cache=grpcs://remotebuildexecution.googleapis.com \ --remote_executor=grpcs://remotebuildexecution.googleapis.com \ --remote_instance_name=projects/envoy-ci/instances/default_instance @@ -289,7 +289,7 @@ Building Envoy with Docker sandbox uses the same Docker image used in CI with fi output which is not depending on your local C++ toolchain. It can also help debugging issues with RBE. To build Envoy with Docker sandbox: ``` -bazel build //source/exe:envoy-static --config=docker-clang +bazel build envoy --config=docker-clang ``` Tests can be run in docker sandbox too. Note that the network environment, such as IPv6, may be different in the docker sandbox so you may want @@ -299,7 +299,7 @@ set different options. See below to configure test IP versions. To link Envoy against libc++, follow the [quick start](#quick-start-bazel-build-for-developers) to setup Clang+LLVM and run: ``` -bazel build --config=libc++ //source/exe:envoy-static +bazel build --config=libc++ envoy ``` Or use our configuration with Remote Execution or Docker sandbox, pass `--config=remote-clang-libc++` or @@ -522,14 +522,14 @@ that Bazel supports: You can use the `-c ` flag to control this, e.g. ``` -bazel build -c opt //source/exe:envoy-static +bazel build -c opt envoy ``` To override the compilation mode and optimize the build for binary size, you can use the `sizeopt` configuration: ``` -bazel build //source/exe:envoy-static --config=sizeopt +bazel build envoy --config=sizeopt ``` ## Sanitizers @@ -751,7 +751,7 @@ They should also ignore any local `.bazelrc` for reproducibility. This can be achieved with: ``` -bazel --bazelrc=/dev/null build -c opt //source/exe:envoy-static.stripped +bazel --bazelrc=/dev/null build -c opt envoy.stripped ``` One caveat to note is that the Git SHA1 is truncated to 16 bytes today as a @@ -818,7 +818,7 @@ resources, you can override Bazel's default job parallelism determination with `--jobs=N` to restrict the build to at most `N` simultaneous jobs, e.g.: ``` -bazel build --jobs=2 //source/exe:envoy-static +bazel build --jobs=2 envoy ``` # Debugging the Bazel build @@ -827,19 +827,19 @@ When trying to understand what Bazel is doing, the `-s` and `--explain` options are useful. To have Bazel provide verbose output on which commands it is executing: ``` -bazel build -s //source/exe:envoy-static +bazel build -s envoy ``` To have Bazel emit to a text file the rationale for rebuilding a target: ``` -bazel build --explain=file.txt //source/exe:envoy-static +bazel build --explain=file.txt envoy ``` To get more verbose explanations: ``` -bazel build --explain=file.txt --verbose_explanations //source/exe:envoy-static +bazel build --explain=file.txt --verbose_explanations envoy ``` # Resolving paths in bazel build output From 97b413f7b21f84641b7ef778ab97c12d679efef3 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 23 Sep 2021 21:32:32 +0800 Subject: [PATCH 099/121] tap: removing tap code from core test paths (#18230) Commit Message: n/a Additional Description: n/a Risk Level: n/a Testing: n/a Part of #9953 Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- source/extensions/transport_sockets/tap/BUILD | 5 -- test/config/BUILD | 2 - test/config/utility.cc | 56 ------------------- test/config/utility.h | 4 -- 4 files changed, 67 deletions(-) diff --git a/source/extensions/transport_sockets/tap/BUILD b/source/extensions/transport_sockets/tap/BUILD index 47437466ea2c4..03bd2a581454a 100644 --- a/source/extensions/transport_sockets/tap/BUILD +++ b/source/extensions/transport_sockets/tap/BUILD @@ -51,11 +51,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up. - extra_visibility = [ - "//test/common/access_log:__subpackages__", - "//test/extensions/transport_sockets/tls/integration:__subpackages__", - ], deps = [ ":tap_config_impl", ":tap_lib", diff --git a/test/config/BUILD b/test/config/BUILD index 9e5807cad1712..8c230a6adf076 100644 --- a/test/config/BUILD +++ b/test/config/BUILD @@ -34,11 +34,9 @@ envoy_cc_test_library( "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", "@envoy_api//envoy/config/listener/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", - "@envoy_api//envoy/config/tap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/access_loggers/file/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/quic/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/transport_sockets/tap/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/transport_sockets/tls/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/upstreams/http/v3:pkg_cc_proto", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", diff --git a/test/config/utility.cc b/test/config/utility.cc index 4cbe9cdf3a4f9..a5aca16f6c773 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -6,11 +6,9 @@ #include "envoy/config/endpoint/v3/endpoint.pb.h" #include "envoy/config/listener/v3/listener_components.pb.h" #include "envoy/config/route/v3/route_components.pb.h" -#include "envoy/config/tap/v3/common.pb.h" #include "envoy/extensions/access_loggers/file/v3/file.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" #include "envoy/extensions/transport_sockets/quic/v3/quic_transport.pb.h" -#include "envoy/extensions/transport_sockets/tap/v3/tap.pb.h" #include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h" #include "envoy/http/codec.h" #include "envoy/service/discovery/v3/discovery.pb.h" @@ -786,22 +784,6 @@ void ConfigHelper::finalize(const std::vector& ports) { bool custom_cluster = false; bool original_dst_cluster = false; auto* static_resources = bootstrap_.mutable_static_resources(); - const auto tap_path = TestEnvironment::getOptionalEnvVar("TAP_PATH"); - if (tap_path) { - ENVOY_LOG_MISC(debug, "Test tap path set to {}", tap_path.value()); - } else { - ENVOY_LOG_MISC(debug, "No tap path set for tests"); - } - for (int i = 0; i < bootstrap_.mutable_static_resources()->listeners_size(); ++i) { - auto* listener = static_resources->mutable_listeners(i); - for (int j = 0; j < listener->filter_chains_size(); ++j) { - if (tap_path) { - auto* filter_chain = listener->mutable_filter_chains(j); - setTapTransportSocket(tap_path.value(), fmt::format("listener_{}_{}", i, j), - *filter_chain->mutable_transport_socket()); - } - } - } for (int i = 0; i < bootstrap_.mutable_static_resources()->clusters_size(); ++i) { auto* cluster = static_resources->mutable_clusters(i); if (cluster->type() == envoy::config::cluster::v3::Cluster::EDS) { @@ -831,11 +813,6 @@ void ConfigHelper::finalize(const std::vector& ports) { } } } - - if (tap_path) { - setTapTransportSocket(tap_path.value(), absl::StrCat("cluster_", i), - *cluster->mutable_transport_socket()); - } } ASSERT(skip_port_usage_validation_ || port_idx == ports.size() || eds_hosts || original_dst_cluster || custom_cluster || bootstrap_.dynamic_resources().has_cds_config()); @@ -854,39 +831,6 @@ void ConfigHelper::finalize(const std::vector& ports) { finalized_ = true; } -void ConfigHelper::setTapTransportSocket( - const std::string& tap_path, const std::string& type, - envoy::config::core::v3::TransportSocket& transport_socket) { - // Determine inner transport socket. - envoy::config::core::v3::TransportSocket inner_transport_socket; - if (!transport_socket.name().empty()) { - inner_transport_socket.MergeFrom(transport_socket); - } else { - inner_transport_socket.set_name("envoy.transport_sockets.raw_buffer"); - } - // Configure outer tap transport socket. - transport_socket.set_name("envoy.transport_sockets.tap"); - envoy::extensions::transport_sockets::tap::v3::Tap tap_config; - tap_config.mutable_common_config() - ->mutable_static_config() - ->mutable_match_config() - ->set_any_match(true); - auto* output_sink = tap_config.mutable_common_config() - ->mutable_static_config() - ->mutable_output_config() - ->mutable_sinks() - ->Add(); - output_sink->set_format(envoy::config::tap::v3::OutputSink::PROTO_TEXT); - const ::testing::TestInfo* const test_info = - ::testing::UnitTest::GetInstance()->current_test_info(); - const std::string test_id = - std::string(test_info->name()) + "_" + std::string(test_info->test_case_name()) + "_" + type; - output_sink->mutable_file_per_tap()->set_path_prefix(tap_path + "_" + - absl::StrReplaceAll(test_id, {{"/", "_"}})); - tap_config.mutable_transport_socket()->MergeFrom(inner_transport_socket); - transport_socket.mutable_typed_config()->PackFrom(tap_config); -} - void ConfigHelper::setSourceAddress(const std::string& address_string) { RELEASE_ASSERT(!finalized_, ""); bootstrap_.mutable_cluster_manager() diff --git a/test/config/utility.h b/test/config/utility.h index 501d08f0e969a..7d94c4b8dbd0e 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -365,10 +365,6 @@ class ConfigHelper { // Finds the filter named 'name' from the first filter chain from the first listener. envoy::config::listener::v3::Filter* getFilterFromListener(const std::string& name); - // Configure a tap transport socket for a cluster/filter chain. - void setTapTransportSocket(const std::string& tap_path, const std::string& type, - envoy::config::core::v3::TransportSocket& transport_socket); - // The bootstrap proto Envoy will start up with. envoy::config::bootstrap::v3::Bootstrap bootstrap_; From 4687a5946eb2be62dcac40ecd9dce17d335a4ec7 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 23 Sep 2021 23:00:12 +0800 Subject: [PATCH 100/121] alt_svc_cache: adding flush capabilities (#18189) Risk Level: low Testing: new unit tests. integration test TODO in a follow up Docs Changes: n/a Release Notes: will land with integration test. Fixes #18034 Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- api/envoy/config/core/v3/protocol.proto | 6 ++ envoy/http/alternate_protocols_cache.h | 5 +- source/common/common/key_value_store_base.h | 1 - source/common/http/BUILD | 3 + .../http/alternate_protocols_cache_impl.cc | 82 +++++++++++++++-- .../http/alternate_protocols_cache_impl.h | 26 +++++- .../alternate_protocols_cache_manager_impl.cc | 26 +++++- .../alternate_protocols_cache_manager_impl.h | 5 +- .../http/alternate_protocols_cache/filter.cc | 22 +++-- .../alternate_protocols_cache_impl_test.cc | 88 +++++++++++++++---- .../alternate_protocols_cache_manager_test.cc | 10 +++ test/common/http/conn_pool_grid_test.cc | 21 +++-- test/extensions/key_value/file_based/BUILD | 17 ++++ .../alternate_protocols_cache_impl_test.cc | 49 +++++++++++ test/mocks/http/alternate_protocols_cache.h | 2 +- .../config_validation/cluster_manager_test.cc | 2 +- 16 files changed, 314 insertions(+), 51 deletions(-) create mode 100644 test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index 74b778b6d30a7..a1560318e8388 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -102,6 +102,12 @@ message AlternateProtocolsCacheOptions { // it is possible for the maximum entries in the cache to go slightly above the configured // value depending on timing. This is similar to how other circuit breakers work. google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; + + // Allows configuring a persistent + // :ref:`key value store ` to flush + // alternate protocols entries to disk. + // This function is currently only supported if concurrency is 1 + TypedExtensionConfig key_value_store_config = 3; } // [#next-free-field: 7] diff --git a/envoy/http/alternate_protocols_cache.h b/envoy/http/alternate_protocols_cache.h index 5dbbff8c29096..e688fb4417fd0 100644 --- a/envoy/http/alternate_protocols_cache.h +++ b/envoy/http/alternate_protocols_cache.h @@ -92,10 +92,11 @@ class AlternateProtocolsCache { * Sets the possible alternative protocols which can be used to connect to the * specified origin. Expires after the specified expiration time. * @param origin The origin to set alternate protocols for. - * @param protocols A list of alternate protocols. + * @param protocols A list of alternate protocols. This list may be truncated + * by the cache. */ virtual void setAlternatives(const Origin& origin, - const std::vector& protocols) PURE; + std::vector& protocols) PURE; /** * Returns the possible alternative protocols which can be used to connect to the diff --git a/source/common/common/key_value_store_base.h b/source/common/common/key_value_store_base.h index c445e9f47bdde..518cc9cdb3d00 100644 --- a/source/common/common/key_value_store_base.h +++ b/source/common/common/key_value_store_base.h @@ -8,7 +8,6 @@ #include "absl/container/flat_hash_map.h" -// TODO(alyssawilk) move to a common extension dir. namespace Envoy { // This is the base implementation of the KeyValueStore. It handles the various diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 3f1323c8c8cd9..60d3cb0cf17eb 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -176,8 +176,11 @@ envoy_cc_library( "//envoy/singleton:manager_interface", "//envoy/thread_local:thread_local_interface", "//envoy/upstream:resource_manager_interface", + "//source/common/common:key_value_store_lib", "//source/common/common:logger_lib", "//source/common/config:utility_lib", + "@com_github_google_quiche//:spdy_core_alt_svc_wire_format_lib", + "@envoy_api//envoy/config/common/key_value/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], ) diff --git a/source/common/http/alternate_protocols_cache_impl.cc b/source/common/http/alternate_protocols_cache_impl.cc index f4a176fb405df..851209796c9c5 100644 --- a/source/common/http/alternate_protocols_cache_impl.cc +++ b/source/common/http/alternate_protocols_cache_impl.cc @@ -2,22 +2,84 @@ #include "source/common/common/logger.h" +#include "quiche/spdy/core/spdy_alt_svc_wire_format.h" + namespace Envoy { namespace Http { +namespace { +std::string originToString(const AlternateProtocolsCache::Origin& origin) { + return absl::StrCat(origin.scheme_, "://", origin.hostname_, ":", origin.port_); +} +} // namespace + +std::string AlternateProtocolsCacheImpl::protocolsToStringForCache( + const std::vector& protocols, TimeSource& /*time_source*/) { + if (protocols.empty()) { + return std::string("clear"); + } + std::string value; + for (auto& protocol : protocols) { + if (!value.empty()) { + value.push_back(','); + } + absl::StrAppend(&value, protocol.alpn_, "=\"", protocol.hostname_, ":", protocol.port_, "\""); + + // Note this is _not_ actually the max age, but the absolute time at which + // this entry will expire. protocolsFromString will convert back to ma. + absl::StrAppend( + &value, "; ma=", + std::chrono::duration_cast(protocol.expiration_.time_since_epoch()) + .count()); + } + return value; +} -AlternateProtocolsCacheImpl::AlternateProtocolsCacheImpl(TimeSource& time_source) - : time_source_(time_source) {} +absl::optional> +AlternateProtocolsCacheImpl::protocolsFromString(absl::string_view alt_svc_string, + TimeSource& time_source, bool from_cache) { + std::vector protocols; + spdy::SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector; + if (!spdy::SpdyAltSvcWireFormat::ParseHeaderFieldValue(alt_svc_string, &altsvc_vector)) { + return {}; + } + for (const auto& alt_svc : altsvc_vector) { + MonotonicTime expiration; + if (from_cache) { + auto expire_time_from_epoch = std::chrono::seconds(alt_svc.max_age); + auto time_since_epoch = std::chrono::duration_cast( + time_source.monotonicTime().time_since_epoch()); + if (expire_time_from_epoch < time_since_epoch) { + expiration = time_source.monotonicTime(); + } else { + expiration = time_source.monotonicTime() + (expire_time_from_epoch - time_since_epoch); + } + } else { + expiration = time_source.monotonicTime() + std::chrono::seconds(alt_svc.max_age); + } + Http::AlternateProtocolsCache::AlternateProtocol protocol(alt_svc.protocol_id, alt_svc.host, + alt_svc.port, expiration); + protocols.push_back(protocol); + } + return protocols; +} + +AlternateProtocolsCacheImpl::AlternateProtocolsCacheImpl( + TimeSource& time_source, std::unique_ptr&& key_value_store) + : time_source_(time_source), key_value_store_(std::move(key_value_store)) {} AlternateProtocolsCacheImpl::~AlternateProtocolsCacheImpl() = default; void AlternateProtocolsCacheImpl::setAlternatives(const Origin& origin, - const std::vector& protocols) { - protocols_[origin] = protocols; + std::vector& protocols) { static const size_t max_protocols = 10; if (protocols.size() > max_protocols) { ENVOY_LOG_MISC(trace, "Too many alternate protocols: {}, truncating", protocols.size()); - std::vector& p = protocols_[origin]; - p.erase(p.begin() + max_protocols, p.end()); + protocols.erase(protocols.begin() + max_protocols, protocols.end()); + } + protocols_[origin] = protocols; + if (key_value_store_) { + key_value_store_->addOrUpdate(originToString(origin), + protocolsToStringForCache(protocols, time_source_)); } } @@ -30,6 +92,7 @@ AlternateProtocolsCacheImpl::findAlternatives(const Origin& origin) { std::vector& protocols = entry_it->second; + auto original_size = protocols.size(); const MonotonicTime now = time_source_.monotonicTime(); protocols.erase(std::remove_if(protocols.begin(), protocols.end(), [now](const AlternateProtocol& protocol) { @@ -39,8 +102,15 @@ AlternateProtocolsCacheImpl::findAlternatives(const Origin& origin) { if (protocols.empty()) { protocols_.erase(entry_it); + if (key_value_store_) { + key_value_store_->remove(originToString(origin)); + } return makeOptRefFromPtr>(nullptr); } + if (key_value_store_ && original_size != protocols.size()) { + key_value_store_->addOrUpdate(originToString(origin), + protocolsToStringForCache(protocols, time_source_)); + } return makeOptRef(const_cast&>(protocols)); } diff --git a/source/common/http/alternate_protocols_cache_impl.h b/source/common/http/alternate_protocols_cache_impl.h index a029a970c763f..df216bf0407c3 100644 --- a/source/common/http/alternate_protocols_cache_impl.h +++ b/source/common/http/alternate_protocols_cache_impl.h @@ -5,6 +5,7 @@ #include #include +#include "envoy/common/key_value_store.h" #include "envoy/common/optref.h" #include "envoy/common/time.h" #include "envoy/http/alternate_protocols_cache.h" @@ -18,12 +19,28 @@ namespace Http { // See: source/docs/http3_upstream.md class AlternateProtocolsCacheImpl : public AlternateProtocolsCache { public: - explicit AlternateProtocolsCacheImpl(TimeSource& time_source); + AlternateProtocolsCacheImpl(TimeSource& time_source, std::unique_ptr&& store); ~AlternateProtocolsCacheImpl() override; + // Convert an AlternateProtocol vector to a string to cache to the key value + // store. Note that in order to determine the lifetime of entries, this + // function will serialize ma= as absolute time from the epoch rather than + // relative time. + // This function also does not do standards-required normalization. Entries requiring + // normalization will simply not be read from cache. + static std::string protocolsToStringForCache(const std::vector& protocols, + TimeSource& time_source); + // Parse an alternate protocols string into structured data, or absl::nullopt + // if it is empty or invalid. + // If from_cache is true, it is assumed the string was serialized using + // protocolsToStringForCache and the the ma fields will be parsed as absolute times + // rather than relative time. + static absl::optional> + protocolsFromString(absl::string_view protocols, TimeSource& time_source, + bool from_cache = false); + // AlternateProtocolsCache - void setAlternatives(const Origin& origin, - const std::vector& protocols) override; + void setAlternatives(const Origin& origin, std::vector& protocols) override; OptRef> findAlternatives(const Origin& origin) override; size_t size() const override; @@ -34,6 +51,9 @@ class AlternateProtocolsCacheImpl : public AlternateProtocolsCache { // Map from hostname to list of alternate protocols. // TODO(RyanTheOptimist): Add a limit to the size of this map and evict based on usage. std::map> protocols_; + + // The key value store, if flushing to persistent storage. + std::unique_ptr key_value_store_; }; } // namespace Http diff --git a/source/common/http/alternate_protocols_cache_manager_impl.cc b/source/common/http/alternate_protocols_cache_manager_impl.cc index 59434006dcf0d..9496a5ea1c8b2 100644 --- a/source/common/http/alternate_protocols_cache_manager_impl.cc +++ b/source/common/http/alternate_protocols_cache_manager_impl.cc @@ -1,5 +1,10 @@ #include "source/common/http/alternate_protocols_cache_manager_impl.h" +#include "envoy/common/key_value_store.h" +#include "envoy/config/common/key_value/v3/config.pb.h" +#include "envoy/config/common/key_value/v3/config.pb.validate.h" + +#include "source/common/config/utility.h" #include "source/common/http/alternate_protocols_cache_impl.h" #include "source/common/protobuf/protobuf.h" @@ -18,6 +23,12 @@ AlternateProtocolsCacheManagerImpl::AlternateProtocolsCacheManagerImpl( AlternateProtocolsCacheSharedPtr AlternateProtocolsCacheManagerImpl::getCache( const envoy::config::core::v3::AlternateProtocolsCacheOptions& options) { + if (options.has_key_value_store_config() && data_.concurrency_ != 1) { + throw EnvoyException( + fmt::format("options has key value store but Envoy has concurrency = {} : {}", + data_.concurrency_, options.DebugString())); + } + const auto& existing_cache = (*slot_).caches_.find(options.name()); if (existing_cache != (*slot_).caches_.end()) { if (!Protobuf::util::MessageDifferencer::Equivalent(options, existing_cache->second.options_)) { @@ -26,12 +37,21 @@ AlternateProtocolsCacheSharedPtr AlternateProtocolsCacheManagerImpl::getCache( " first '{}' second '{}'", options.name(), existing_cache->second.options_.DebugString(), options.DebugString())); } - return existing_cache->second.cache_; } - AlternateProtocolsCacheSharedPtr new_cache = - std::make_shared(data_.dispatcher_.timeSource()); + std::unique_ptr store; + if (options.has_key_value_store_config()) { + envoy::config::common::key_value::v3::KeyValueStoreConfig kv_config; + MessageUtil::anyConvertAndValidate(options.key_value_store_config().typed_config(), kv_config, + data_.validation_visitor_); + auto& factory = Config::Utility::getAndCheckFactory(kv_config.config()); + store = factory.createStore(kv_config, data_.validation_visitor_, data_.dispatcher_, + data_.file_system_); + } + + AlternateProtocolsCacheSharedPtr new_cache = std::make_shared( + data_.dispatcher_.timeSource(), std::move(store)); (*slot_).caches_.emplace(options.name(), CacheWithOptions{options, new_cache}); return new_cache; } diff --git a/source/common/http/alternate_protocols_cache_manager_impl.h b/source/common/http/alternate_protocols_cache_manager_impl.h index 6524128d329f3..11746c935c1cb 100644 --- a/source/common/http/alternate_protocols_cache_manager_impl.h +++ b/source/common/http/alternate_protocols_cache_manager_impl.h @@ -15,9 +15,12 @@ namespace Http { struct AlternateProtocolsData { AlternateProtocolsData(Server::Configuration::FactoryContextBase& context) : dispatcher_(context.mainThreadDispatcher()), - validation_visitor_(context.messageValidationVisitor()) {} + validation_visitor_(context.messageValidationVisitor()), + file_system_(context.api().fileSystem()), concurrency_(context.options().concurrency()) {} Event::Dispatcher& dispatcher_; ProtobufMessage::ValidationVisitor& validation_visitor_; + Filesystem::Instance& file_system_; + uint32_t concurrency_; }; class AlternateProtocolsCacheManagerImpl : public AlternateProtocolsCacheManager, diff --git a/source/extensions/filters/http/alternate_protocols_cache/filter.cc b/source/extensions/filters/http/alternate_protocols_cache/filter.cc index 75cc99c9fb325..c1f8e3c9eaddd 100644 --- a/source/extensions/filters/http/alternate_protocols_cache/filter.cc +++ b/source/extensions/filters/http/alternate_protocols_cache/filter.cc @@ -5,10 +5,10 @@ #include "envoy/config/core/v3/base.pb.h" #include "envoy/extensions/filters/http/alternate_protocols_cache/v3/alternate_protocols_cache.pb.h" +#include "source/common/http/alternate_protocols_cache_impl.h" +#include "source/common/http/alternate_protocols_cache_manager_impl.h" #include "source/common/http/headers.h" -#include "quiche/spdy/core/spdy_alt_svc_wire_format.h" - namespace Envoy { namespace Extensions { namespace HttpFilters { @@ -44,23 +44,21 @@ Http::FilterHeadersStatus Filter::encodeHeaders(Http::ResponseHeaderMap& headers if (alt_svc.empty()) { return Http::FilterHeadersStatus::Continue; } + std::vector protocols; for (size_t i = 0; i < alt_svc.size(); ++i) { - spdy::SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector; - if (!spdy::SpdyAltSvcWireFormat::ParseHeaderFieldValue(alt_svc[i]->value().getStringView(), - &altsvc_vector)) { + absl::optional> + potential_protocols = Http::AlternateProtocolsCacheImpl::protocolsFromString( + alt_svc[i]->value().getStringView(), time_source_); + if (!potential_protocols.has_value()) { ENVOY_LOG(trace, "Invalid Alt-Svc header received: '{}'", alt_svc[i]->value().getStringView()); return Http::FilterHeadersStatus::Continue; } - for (const auto& alt_svc : altsvc_vector) { - MonotonicTime expiration = - time_source_.monotonicTime() + std::chrono::seconds(alt_svc.max_age); - Http::AlternateProtocolsCache::AlternateProtocol protocol(alt_svc.protocol_id, alt_svc.host, - alt_svc.port, expiration); - protocols.push_back(protocol); - } + protocols.insert(protocols.end(), std::make_move_iterator(potential_protocols.value().begin()), + std::make_move_iterator(potential_protocols.value().end())); } + // The upstream host is used here, instead of the :authority request header because // Envoy routes request to upstream hosts not to origin servers directly. This choice would // allow HTTP/3 to be used on a per-upstream host basis, even for origins which are load diff --git a/test/common/http/alternate_protocols_cache_impl_test.cc b/test/common/http/alternate_protocols_cache_impl_test.cc index 4abc98ddb5be4..9aa0dc5f0016e 100644 --- a/test/common/http/alternate_protocols_cache_impl_test.cc +++ b/test/common/http/alternate_protocols_cache_impl_test.cc @@ -1,17 +1,23 @@ #include "source/common/http/alternate_protocols_cache_impl.h" +#include "test/mocks/common.h" #include "test/test_common/simulated_time_system.h" #include "gtest/gtest.h" +using testing::NiceMock; + namespace Envoy { namespace Http { namespace { class AlternateProtocolsCacheImplTest : public testing::Test, public Event::TestUsingSimulatedTime { public: - AlternateProtocolsCacheImplTest() : protocols_(simTime()) {} + AlternateProtocolsCacheImplTest() + : store_(new NiceMock()), + protocols_(simTime(), std::unique_ptr(store_)) {} + MockKeyValueStore* store_; AlternateProtocolsCacheImpl protocols_; const std::string hostname1_ = "hostname1"; const std::string hostname2_ = "hostname2"; @@ -29,24 +35,26 @@ class AlternateProtocolsCacheImplTest : public testing::Test, public Event::Test const AlternateProtocolsCacheImpl::Origin origin1_ = {https_, hostname1_, port1_}; const AlternateProtocolsCacheImpl::Origin origin2_ = {https_, hostname2_, port2_}; - const AlternateProtocolsCacheImpl::AlternateProtocol protocol1_ = {alpn1_, hostname1_, port1_, - expiration1_}; - const AlternateProtocolsCacheImpl::AlternateProtocol protocol2_ = {alpn2_, hostname2_, port2_, - expiration2_}; + AlternateProtocolsCacheImpl::AlternateProtocol protocol1_ = {alpn1_, hostname1_, port1_, + expiration1_}; + AlternateProtocolsCacheImpl::AlternateProtocol protocol2_ = {alpn2_, hostname2_, port2_, + expiration2_}; - const std::vector protocols1_ = {protocol1_}; - const std::vector protocols2_ = {protocol2_}; + std::vector protocols1_ = {protocol1_}; + std::vector protocols2_ = {protocol2_}; }; TEST_F(AlternateProtocolsCacheImplTest, Init) { EXPECT_EQ(0, protocols_.size()); } TEST_F(AlternateProtocolsCacheImplTest, SetAlternatives) { EXPECT_EQ(0, protocols_.size()); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); protocols_.setAlternatives(origin1_, protocols1_); EXPECT_EQ(1, protocols_.size()); } TEST_F(AlternateProtocolsCacheImplTest, FindAlternatives) { + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); protocols_.setAlternatives(origin1_, protocols1_); OptRef> protocols = protocols_.findAlternatives(origin1_); @@ -55,7 +63,9 @@ TEST_F(AlternateProtocolsCacheImplTest, FindAlternatives) { } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterReplacement) { + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); protocols_.setAlternatives(origin1_, protocols1_); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn2=\"hostname2:2\"; ma=10")); protocols_.setAlternatives(origin1_, protocols2_); OptRef> protocols = protocols_.findAlternatives(origin1_); @@ -65,7 +75,9 @@ TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterReplacement) { } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesForMultipleOrigins) { + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); protocols_.setAlternatives(origin1_, protocols1_); + EXPECT_CALL(*store_, addOrUpdate("https://hostname2:2", "alpn2=\"hostname2:2\"; ma=10")); protocols_.setAlternatives(origin2_, protocols2_); OptRef> protocols = protocols_.findAlternatives(origin1_); @@ -77,8 +89,10 @@ TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesForMultipleOrigins) { } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterExpiration) { + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn1=\"hostname1:1\"; ma=5")); protocols_.setAlternatives(origin1_, protocols1_); simTime().setMonotonicTime(expiration1_ + Seconds(1)); + EXPECT_CALL(*store_, remove("https://hostname1:1")); OptRef> protocols = protocols_.findAlternatives(origin1_); ASSERT_FALSE(protocols.has_value()); @@ -86,8 +100,12 @@ TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterExpiration) { } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterPartialExpiration) { - protocols_.setAlternatives(origin1_, {protocol1_, protocol2_}); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", + "alpn1=\"hostname1:1\"; ma=5,alpn2=\"hostname2:2\"; ma=10")); + std::vector both = {protocol1_, protocol2_}; + protocols_.setAlternatives(origin1_, both); simTime().setMonotonicTime(expiration1_ + Seconds(1)); + EXPECT_CALL(*store_, addOrUpdate("https://hostname1:1", "alpn2=\"hostname2:2\"; ma=10")); OptRef> protocols = protocols_.findAlternatives(origin1_); ASSERT_TRUE(protocols.has_value()); @@ -96,17 +114,15 @@ TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterPartialExpiration) } TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterTruncation) { - AlternateProtocolsCacheImpl::AlternateProtocol protocol = protocol1_; - std::vector expected_protocols; for (size_t i = 0; i < 10; ++i) { - protocol.port_++; - expected_protocols.push_back(protocol); + protocol1_.port_++; + expected_protocols.push_back(protocol1_); } std::vector full_protocols = expected_protocols; - protocol.port_++; - full_protocols.push_back(protocol); - full_protocols.push_back(protocol); + protocol1_.port_++; + full_protocols.push_back(protocol1_); + full_protocols.push_back(protocol1_); protocols_.setAlternatives(origin1_, full_protocols); OptRef> protocols = @@ -116,6 +132,48 @@ TEST_F(AlternateProtocolsCacheImplTest, FindAlternativesAfterTruncation) { EXPECT_EQ(expected_protocols, protocols.ref()); } +TEST_F(AlternateProtocolsCacheImplTest, ToAndFromString) { + auto testAltSvc = [&](const std::string& original_alt_svc, + const std::string& expected_alt_svc) -> void { + absl::optional> protocols = + AlternateProtocolsCacheImpl::protocolsFromString(original_alt_svc, simTime(), true); + ASSERT(protocols.has_value()); + ASSERT_GE(protocols.value().size(), 1); + + AlternateProtocolsCache::AlternateProtocol& protocol = protocols.value()[0]; + EXPECT_EQ("h3-29", protocol.alpn_); + EXPECT_EQ("", protocol.hostname_); + EXPECT_EQ(443, protocol.port_); + auto duration = std::chrono::duration_cast(protocol.expiration_ - + simTime().monotonicTime()); + EXPECT_EQ(86400, duration.count()); + + if (protocols.value().size() == 2) { + AlternateProtocolsCache::AlternateProtocol& protocol2 = protocols.value()[1]; + EXPECT_EQ("h3", protocol2.alpn_); + EXPECT_EQ("", protocol2.hostname_); + EXPECT_EQ(443, protocol2.port_); + duration = std::chrono::duration_cast(protocol2.expiration_ - + simTime().monotonicTime()); + EXPECT_EQ(60, duration.count()); + } + + std::string alt_svc = + AlternateProtocolsCacheImpl::protocolsToStringForCache(protocols.value(), simTime()); + EXPECT_EQ(expected_alt_svc, alt_svc); + }; + + testAltSvc("h3-29=\":443\"; ma=86400", "h3-29=\":443\"; ma=86400"); + testAltSvc("h3-29=\":443\"; ma=86400,h3=\":443\"; ma=60", + "h3-29=\":443\"; ma=86400,h3=\":443\"; ma=60"); + + // Test once more to make sure we handle time advancing correctly. + // the absolute expiration time in testAltSvc is expected to be 86400 so add + // 60s to the default max age. + simTime().setMonotonicTime(simTime().monotonicTime() + std::chrono::seconds(60)); + testAltSvc("h3-29=\":443\"; ma=86460", "h3-29=\":443\"; ma=86460"); +} + } // namespace } // namespace Http } // namespace Envoy diff --git a/test/common/http/alternate_protocols_cache_manager_test.cc b/test/common/http/alternate_protocols_cache_manager_test.cc index 3763c6e9795c6..c24c78594717b 100644 --- a/test/common/http/alternate_protocols_cache_manager_test.cc +++ b/test/common/http/alternate_protocols_cache_manager_test.cc @@ -7,6 +7,8 @@ #include "gtest/gtest.h" +using testing::Return; + namespace Envoy { namespace Http { @@ -56,6 +58,14 @@ TEST_F(AlternateProtocolsCacheManagerTest, GetCache) { EXPECT_EQ(cache, manager_->getCache(options1_)); } +TEST_F(AlternateProtocolsCacheManagerTest, GetCacheWithFlushingAndConcurrency) { + EXPECT_CALL(context_.options_, concurrency()).WillOnce(Return(5)); + options1_.mutable_key_value_store_config(); + initialize(); + EXPECT_THROW_WITH_REGEX(manager_->getCache(options1_), EnvoyException, + "options has key value store but Envoy has concurrency = 5"); +} + TEST_F(AlternateProtocolsCacheManagerTest, GetCacheForDifferentOptions) { initialize(); AlternateProtocolsCacheSharedPtr cache1 = manager_->getCache(options1_); diff --git a/test/common/http/conn_pool_grid_test.cc b/test/common/http/conn_pool_grid_test.cc index f38d056688b1a..ebe3accff1774 100644 --- a/test/common/http/conn_pool_grid_test.cc +++ b/test/common/http/conn_pool_grid_test.cc @@ -102,7 +102,7 @@ class ConnectivityGridTest : public Event::TestUsingSimulatedTime, public testin public: ConnectivityGridTest() : options_({Http::Protocol::Http11, Http::Protocol::Http2, Http::Protocol::Http3}), - alternate_protocols_(std::make_shared(simTime())), + alternate_protocols_(std::make_shared(simTime(), nullptr)), quic_stat_names_(store_.symbolTable()), grid_(dispatcher_, random_, Upstream::makeTestHost(cluster_, "hostname", "tcp://127.0.0.1:9000", simTime()), @@ -114,9 +114,18 @@ class ConnectivityGridTest : public Event::TestUsingSimulatedTime, public testin grid_.encoder_ = &encoder_; } + AlternateProtocolsCacheSharedPtr + maybeCreateAlternateProtocolsCacheImpl(bool use_alternate_protocols) { + AlternateProtocolsCacheSharedPtr cache; + if (!use_alternate_protocols) { + return nullptr; + } + return std::make_shared(simTime(), nullptr); + } + void addHttp3AlternateProtocol() { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"h3", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); } @@ -543,7 +552,7 @@ TEST_F(ConnectivityGridTest, SuccessWithoutHttp3) { // Test that when HTTP/3 is not available then the HTTP/3 pool is skipped. TEST_F(ConnectivityGridTest, SuccessWithExpiredHttp3) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"h3-29", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); simTime().setMonotonicTime(simTime().monotonicTime() + Seconds(10)); @@ -566,7 +575,7 @@ TEST_F(ConnectivityGridTest, SuccessWithExpiredHttp3) { // skipped. TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingHostname) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"h3-29", "otherhostname", origin.port_, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); @@ -587,7 +596,7 @@ TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingHostname) { // skipped. TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingPort) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"h3-29", "", origin.port_ + 1, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); @@ -607,7 +616,7 @@ TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingPort) { // Test that when the alternate protocol specifies an invalid ALPN, then the HTTP/3 pool is skipped. TEST_F(ConnectivityGridTest, SuccessWithoutHttp3NoMatchingAlpn) { AlternateProtocolsCacheImpl::Origin origin("https", "hostname", 9000); - const std::vector protocols = { + std::vector protocols = { {"http/2", "", origin.port_, simTime().monotonicTime() + Seconds(5)}}; alternate_protocols_->setAlternatives(origin, protocols); diff --git a/test/extensions/key_value/file_based/BUILD b/test/extensions/key_value/file_based/BUILD index d55e4a2866e35..c25efa119cac8 100644 --- a/test/extensions/key_value/file_based/BUILD +++ b/test/extensions/key_value/file_based/BUILD @@ -8,6 +8,23 @@ licenses(["notice"]) # Apache 2 envoy_package() +envoy_cc_test( + name = "alternate_protocols_cache_impl_test", + srcs = ["alternate_protocols_cache_impl_test.cc"], + deps = [ + "//source/common/common:key_value_store_lib", + "//source/common/http:alternate_protocols_cache", + "//source/common/singleton:manager_impl_lib", + "//source/extensions/key_value/file_based:config_lib", + "//test/common/http:common_lib", + "//test/mocks:common_lib", + "//test/mocks/thread_local:thread_local_mocks", + "//test/test_common:simulated_time_system_lib", + "@envoy_api//envoy/config/common/key_value/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/key_value/file_based/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "key_value_store_test", srcs = ["key_value_store_test.cc"], diff --git a/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc b/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc new file mode 100644 index 0000000000000..76f1a0651af25 --- /dev/null +++ b/test/extensions/key_value/file_based/alternate_protocols_cache_impl_test.cc @@ -0,0 +1,49 @@ +#include "envoy/config/common/key_value/v3/config.pb.validate.h" +#include "envoy/extensions/key_value/file_based/v3/config.pb.h" + +#include "source/common/http/alternate_protocols_cache_manager_impl.h" +#include "source/common/singleton/manager_impl.h" + +#include "test/mocks/server/factory_context.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/test_common/simulated_time_system.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace { +class AlternateProtocolsCacheManagerTest : public testing::Test, + public Event::TestUsingSimulatedTime { +public: + AlternateProtocolsCacheManagerTest() { + options_.set_name("name"); + options_.mutable_max_entries()->set_value(10); + } + void initialize() { + Http::AlternateProtocolsData data = {context_}; + factory_ = std::make_unique(singleton_manager_, + tls_, data); + manager_ = factory_->get(); + } + Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest()}; + NiceMock context_; + testing::NiceMock tls_; + std::unique_ptr factory_; + Http::AlternateProtocolsCacheManagerSharedPtr manager_; + envoy::config::core::v3::AlternateProtocolsCacheOptions options_; +}; + +TEST_F(AlternateProtocolsCacheManagerTest, GetCacheWithFlushingAndConcurrency) { + envoy::extensions::key_value::file_based::v3::FileBasedKeyValueStoreConfig config; + config.set_filename("foo"); + envoy::config::common::key_value::v3::KeyValueStoreConfig kv_config; + kv_config.mutable_config()->set_name("envoy.key_value.file_based"); + kv_config.mutable_config()->mutable_typed_config()->PackFrom(config); + options_.mutable_key_value_store_config()->set_name("envoy.common.key_value"); + options_.mutable_key_value_store_config()->mutable_typed_config()->PackFrom(kv_config); + initialize(); + manager_->getCache(options_); +} + +} // namespace +} // namespace Envoy diff --git a/test/mocks/http/alternate_protocols_cache.h b/test/mocks/http/alternate_protocols_cache.h index 2f1f287685730..e1e0a4558c830 100644 --- a/test/mocks/http/alternate_protocols_cache.h +++ b/test/mocks/http/alternate_protocols_cache.h @@ -11,7 +11,7 @@ class MockAlternateProtocolsCache : public AlternateProtocolsCache { ~MockAlternateProtocolsCache() override; MOCK_METHOD(void, setAlternatives, - (const Origin& origin, const std::vector& protocols)); + (const Origin& origin, std::vector& protocols)); MOCK_METHOD(OptRef>, findAlternatives, (const Origin& origin)); MOCK_METHOD(size_t, size, (), (const)); diff --git a/test/server/config_validation/cluster_manager_test.cc b/test/server/config_validation/cluster_manager_test.cc index d0762c4425df7..5265850259702 100644 --- a/test/server/config_validation/cluster_manager_test.cc +++ b/test/server/config_validation/cluster_manager_test.cc @@ -32,7 +32,7 @@ TEST(ValidationClusterManagerTest, MockedMethods) { Event::SimulatedTimeSystem time_system; NiceMock validation_context; Api::ApiPtr api(Api::createApiForTest(stats_store, time_system)); - Server::MockOptions options; + NiceMock options; NiceMock runtime; NiceMock tls; NiceMock random; From 846109fb7d85fd56cb29419ac44888cea9cf92ed Mon Sep 17 00:00:00 2001 From: Rohit Agrawal Date: Thu, 23 Sep 2021 11:00:41 -0400 Subject: [PATCH 101/121] docs: add more description for max_connection_duration in timeout faqs (#18193) This PR adds some more description around max_connection_duration in the Timeout FAQs. Commit Message: add more description for max_connection_duration in timeout faqs. Additional Description: - Risk Level: N/A Testing: N/A Docs Changes: Added some more description around max_connection_duration in the Timeout FAQs. Release Notes: N/A Platform Specific Features: N/A Signed-off-by: Rohit Agrawal Signed-off-by: gayang --- api/envoy/config/core/v3/protocol.proto | 7 ++++--- docs/root/faq/configuration/timeouts.rst | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index a1560318e8388..f65f893772602 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -155,10 +155,11 @@ message HttpProtocolOptions { // The maximum duration of a connection. The duration is defined as a period since a connection // was established. If not set, there is no max duration. When max_connection_duration is reached - // the connection will be closed. Drain sequence will occur prior to closing the connection if - // if's applicable. See :ref:`drain_timeout + // and if there are no active streams, the connection will be closed. If there are any active streams, + // the drain sequence will kick-in, and the connection will be force-closed after the drain period. + // See :ref:`drain_timeout // `. - // Note: not implemented for upstream connections. + // Note: This feature is not yet implemented for the upstream connections. google.protobuf.Duration max_connection_duration = 3; // The maximum number of headers. If unconfigured, the default diff --git a/docs/root/faq/configuration/timeouts.rst b/docs/root/faq/configuration/timeouts.rst index a68963752447d..89177db38b7f8 100644 --- a/docs/root/faq/configuration/timeouts.rst +++ b/docs/root/faq/configuration/timeouts.rst @@ -30,6 +30,21 @@ Connection timeouts apply to the entire HTTP connection and all streams the conn connections use the :ref:`common_http_protocol_options ` field in the Cluster's :ref:`extension_protocol_options`, keyed by `envoy.extensions.upstreams.http.v3.HttpProtocolOptions` +* The HTTP protocol :ref:`max_connection_duration ` + is defined in a generic message used by both the HTTP connection manager as well as upstream cluster + HTTP connections but is currently only implemented for the downstream connections. The maximum + connection duration is the time after which a downstream connection will be drained and/or closed, + starting from when it first got established. If there are no active streams, the connection will be + closed. If there are any active streams, the drain sequence will kick-in, and the connection will be + force-closed after the drain period. The default value of max connection duration is *0* or unlimited, + which means that the connections will never be closed due to aging. It could be helpful in scenarios + when you are running a pool of Envoy edge-proxies and would want to close a downstream connection after + some time to prevent sticky-ness. It could also help to better load balance the overall traffic among + this pool, especially if the size of this pool is dynamically changing. To modify the max connection + duration for downstream connections use the + :ref:`common_http_protocol_options ` + field in the HTTP connection manager configuration. + See :ref:`below ` for other connection timeouts. Stream timeouts From a664023ffd5169b27ae68f92a53e29b4676d242e Mon Sep 17 00:00:00 2001 From: Adam Kotwasinski Date: Thu, 23 Sep 2021 10:52:26 -0700 Subject: [PATCH 102/121] kafka: upgrade kafka source code and server dependencies to 2.8.1 (#18074) Signed-off-by: Adam Kotwasinski Signed-off-by: gayang --- bazel/external/kafka_int32.patch | 27 --- bazel/repositories.bzl | 1 - bazel/repository_locations.bzl | 16 +- .../filters/network/source/kafka_response.h | 2 +- .../filters/network/source/kafka_types.h | 14 ++ .../mesh/command_handlers/api_versions.cc | 8 +- .../source/mesh/command_handlers/metadata.cc | 10 +- .../source/mesh/command_handlers/produce.cc | 2 +- .../produce_record_extractor.cc | 6 +- .../network/source/protocol/generator.py | 57 ++++-- .../filters/network/source/serialization.cc | 15 ++ .../filters/network/source/serialization.h | 179 ++++++++++++++++-- .../filters/network/source/tagged_fields.h | 2 +- .../test/broker/filter_protocol_test.cc | 2 +- .../command_handlers/metadata_unit_test.cc | 10 +- .../command_handlers/produce_unit_test.cc | 16 +- .../test/mesh/request_processor_unit_test.cc | 6 +- .../filters/network/test/message_utilities.h | 5 +- .../network/test/metrics_integration_test.cc | 4 +- .../test/protocol/request_utilities_cc.j2 | 15 +- .../test/protocol/response_utilities_cc.j2 | 7 +- .../network/test/serialization_test.cc | 38 +++- .../network_filters/kafka_broker_filter.rst | 2 +- .../network_filters/kafka_mesh_filter.rst | 2 +- 24 files changed, 338 insertions(+), 108 deletions(-) delete mode 100644 bazel/external/kafka_int32.patch diff --git a/bazel/external/kafka_int32.patch b/bazel/external/kafka_int32.patch deleted file mode 100644 index 8b88fe3358211..0000000000000 --- a/bazel/external/kafka_int32.patch +++ /dev/null @@ -1,27 +0,0 @@ ---- DescribeGroupsResponse.json 2020-03-25 16:12:16.373302600 -0400 -+++ DescribeGroupsResponse.json 2020-03-25 16:11:16.184156200 -0400 -@@ -63,7 +63,7 @@ - { "name": "MemberAssignment", "type": "bytes", "versions": "0+", - "about": "The current assignment provided by the group leader." } - ]}, -- { "name": "AuthorizedOperations", "type": "int32", "versions": "3+", "default": "-2147483648", -+ { "name": "AuthorizedOperations", "type": "int32", "versions": "3+", "default": "INT32_MIN", - "about": "32-bit bitfield to represent authorized operations for this group." } - ]} - ] - ---- MetadataResponse.json 2020-03-25 15:53:36.319161000 -0400 -+++ MetadataResponse.json 2020-03-25 15:54:11.510400000 -0400 -@@ -81,10 +81,10 @@ - { "name": "OfflineReplicas", "type": "[]int32", "versions": "5+", "ignorable": true, - "about": "The set of offline replicas of this partition." } - ]}, -- { "name": "TopicAuthorizedOperations", "type": "int32", "versions": "8+", "default": "-2147483648", -+ { "name": "TopicAuthorizedOperations", "type": "int32", "versions": "8+", "default": "INT32_MIN", - "about": "32-bit bitfield to represent authorized operations for this topic." } - ]}, -- { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8+", "default": "-2147483648", -+ { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8+", "default": "INT32_MIN", - "about": "32-bit bitfield to represent authorized operations for this cluster." } - ] - } diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index cb8967cd6faee..2547bfbec54f4 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -1065,7 +1065,6 @@ filegroup( external_http_archive( name = "kafka_source", build_file_content = KAFKASOURCE_BUILD_CONTENT, - patches = ["@envoy//bazel/external:kafka_int32.patch"], ) # This archive provides Kafka C/CPP client used by mesh filter to communicate with upstream diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index d76537a00b84b..f1f6d073f1ceb 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -930,13 +930,13 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Kafka (source)", project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", - version = "2.4.1", - sha256 = "740236f44d66e33ea83382383b4fb7eabdab7093a644b525dd5ec90207f933bd", + version = "2.8.1", + sha256 = "c3fd89257e056e11b5e1b09d4bbd8332ce5abfdfa7c7a5bb6a5cfe9860fcc688", strip_prefix = "kafka-{version}/clients/src/main/resources/common/message", urls = ["https://github.com/apache/kafka/archive/{version}.zip"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.network.kafka_broker", "envoy.filters.network.kafka_mesh"], - release_date = "2020-03-03", + release_date = "2021-09-14", cpe = "cpe:2.3:a:apache:kafka:*", ), edenhill_librdkafka = dict( @@ -956,11 +956,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Kafka (server binary)", project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", - version = "2.4.1", - sha256 = "2177cbd14118999e1d76fec628ca78ace7e6f841219dbc6035027c796bbe1a2a", - strip_prefix = "kafka_2.12-{version}", - urls = ["https://archive.apache.org/dist/kafka/{version}/kafka_2.12-{version}.tgz"], - release_date = "2020-03-12", + version = "2.8.1", + sha256 = "4888b03e3b27dd94f2d830ce3bae9d7d98b0ccee3a5d30c919ccb60e0fa1f139", + strip_prefix = "kafka_2.13-{version}", + urls = ["https://archive.apache.org/dist/kafka/{version}/kafka_2.13-{version}.tgz"], + release_date = "2021-09-14", use_category = ["test_only"], ), kafka_python_client = dict( diff --git a/contrib/kafka/filters/network/source/kafka_response.h b/contrib/kafka/filters/network/source/kafka_response.h index 32bc8317f5131..f135f5cacb744 100644 --- a/contrib/kafka/filters/network/source/kafka_response.h +++ b/contrib/kafka/filters/network/source/kafka_response.h @@ -13,7 +13,7 @@ namespace Kafka { * Decides if response with given api key & version should have tagged fields in header. * Bear in mind, that ApiVersions responses DO NOT contain tagged fields in header (despite having * flexible versions) as per - * https://github.com/apache/kafka/blob/2.4.0/clients/src/main/resources/common/message/ApiVersionsResponse.json#L24 + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/resources/common/message/ApiVersionsResponse.json#L24 * This method gets implemented in generated code through 'kafka_response_resolver_cc.j2'. * * @param api_key Kafka request key. diff --git a/contrib/kafka/filters/network/source/kafka_types.h b/contrib/kafka/filters/network/source/kafka_types.h index 3240b9a9c2d6c..d01c304984e4c 100644 --- a/contrib/kafka/filters/network/source/kafka_types.h +++ b/contrib/kafka/filters/network/source/kafka_types.h @@ -31,6 +31,20 @@ using NullableBytes = absl::optional; */ template using NullableArray = absl::optional>; +/** + * Analogous to: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/Uuid.java#L28 + */ +struct Uuid { + + const int64_t msb_; + const int64_t lsb_; + + Uuid(const int64_t msb, const int64_t lsb) : msb_{msb}, lsb_{lsb} {}; + + bool operator==(const Uuid& rhs) const { return msb_ == rhs.msb_ && lsb_ == rhs.lsb_; }; +}; + } // namespace Kafka } // namespace NetworkFilters } // namespace Extensions diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc index 1fa8cfa8f5b82..31cb53f12a402 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/api_versions.cc @@ -38,10 +38,10 @@ AbstractResponseSharedPtr ApiVersionsRequestHolder::computeAnswer() const { request_header_.correlation_id_}; const int16_t error_code = 0; - const ApiVersionsResponseKey produce_entry = {PRODUCE_REQUEST_API_KEY, MIN_PRODUCE_SUPPORTED, - MAX_PRODUCE_SUPPORTED}; - const ApiVersionsResponseKey metadata_entry = {METADATA_REQUEST_API_KEY, MIN_METADATA_SUPPORTED, - MAX_METADATA_SUPPORTED}; + const ApiVersion produce_entry = {PRODUCE_REQUEST_API_KEY, MIN_PRODUCE_SUPPORTED, + MAX_PRODUCE_SUPPORTED}; + const ApiVersion metadata_entry = {METADATA_REQUEST_API_KEY, MIN_METADATA_SUPPORTED, + MAX_METADATA_SUPPORTED}; const ApiVersionsResponse real_response = {error_code, {produce_entry, metadata_entry}}; return std::make_shared>(metadata, real_response); diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc index 05b63b451d1fa..07f402a80802e 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/metadata.cc @@ -33,8 +33,14 @@ AbstractResponseSharedPtr MetadataRequestHolder::computeAnswer() const { advertised_address.second}; std::vector response_topics; if (request_->data_.topics_) { - for (const auto& topic : *(request_->data_.topics_)) { - const std::string& topic_name = topic.name_; + for (const MetadataRequestTopic& topic : *(request_->data_.topics_)) { + if (!topic.name_) { + // The client sent request without topic name (UUID was sent instead). + // We do not know how to handle it, so do not send any metadata. + // This will cause failures in clients downstream. + continue; + } + const std::string& topic_name = *(topic.name_); std::vector topic_partitions; const absl::optional cluster_config = configuration_.computeClusterConfigForTopic(topic_name); diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc index e2ed06fdbb17e..48d08947f0890 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce.cc @@ -20,7 +20,7 @@ ProduceRequestHolder::ProduceRequestHolder(AbstractRequestListener& filter, const RecordExtractor& record_extractor, const std::shared_ptr> request) : BaseInFlightRequest{filter}, kafka_facade_{kafka_facade}, request_{request} { - outbound_records_ = record_extractor.extractRecords(request_->data_.topics_); + outbound_records_ = record_extractor.extractRecords(request_->data_.topic_data_); expected_responses_ = outbound_records_.size(); } diff --git a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc index 3c98dc4885cf9..6ea8620d600a5 100644 --- a/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc +++ b/contrib/kafka/filters/network/source/mesh/command_handlers/produce_record_extractor.cc @@ -10,11 +10,11 @@ std::vector RecordExtractorImpl::extractRecords(const std::vector& data) const { std::vector result; for (const auto& topic_data : data) { - for (const auto& partition_data : topic_data.partitions_) { + for (const auto& partition_data : topic_data.partition_data_) { // Kafka protocol allows nullable data. if (partition_data.records_) { - const auto topic_result = extractPartitionRecords( - topic_data.name_, partition_data.partition_index_, *(partition_data.records_)); + const auto topic_result = extractPartitionRecords(topic_data.name_, partition_data.index_, + *(partition_data.records_)); std::copy(topic_result.begin(), topic_result.end(), std::back_inserter(result)); } } diff --git a/contrib/kafka/filters/network/source/protocol/generator.py b/contrib/kafka/filters/network/source/protocol/generator.py index 846dd2aa2d9b7..2fd18ebc2d69b 100755 --- a/contrib/kafka/filters/network/source/protocol/generator.py +++ b/contrib/kafka/filters/network/source/protocol/generator.py @@ -15,7 +15,7 @@ def generate_main_code(type, main_header_file, resolver_cc_file, metrics_header_ - resolver_cc_file - contains request api key & version mapping to deserializer (from header file) - metrics_header_file - contains metrics with names corresponding to messages """ - processor = StatefulProcessor() + processor = StatefulProcessor(type) # Parse provided input files. messages = processor.parse_messages(input_files) @@ -66,7 +66,7 @@ def generate_test_code( - codec_test_cc_file - tests involving codec and Request/ResponseParserResolver, - utilities_cc_file - utilities for creating sample messages. """ - processor = StatefulProcessor() + processor = StatefulProcessor(type) # Parse provided input files. messages = processor.parse_messages(input_files) @@ -97,7 +97,8 @@ class StatefulProcessor: AlterConfigsResource, what would cause a compile-time error if we were to handle it trivially). """ - def __init__(self): + def __init__(self, type): + self.type = type # Complex types that have been encountered during processing. self.known_types = set() # Name of parent message type that's being processed right now. @@ -107,8 +108,8 @@ def __init__(self): def parse_messages(self, input_files): """ - Parse request/response structures from provided input files. - """ + Parse request/response structures from provided input files. + """ import re import json @@ -123,9 +124,18 @@ def parse_messages(self, input_files): without_comments = re.sub(r'\s*//.*\n', '\n', raw_contents) without_empty_newlines = re.sub( r'^\s*$', '', without_comments, flags=re.MULTILINE) - message_spec = json.loads(without_empty_newlines) - message = self.parse_top_level_element(message_spec) - messages.append(message) + # Windows support: see PR 10542 for details. + amended = re.sub(r'-2147483648', 'INT32_MIN', without_empty_newlines) + # Kafka JSON files are malformed. See KAFKA-12794. + if input_file == 'external/kafka_source/DescribeProducersRequest.json': + amended = amended[:-6] + message_spec = json.loads(amended) + # Adopt publicly available messages only: + # https://kafka.apache.org/28/protocol.html#protocol_api_keys + api_key = message_spec['apiKey'] + if api_key <= 51 or api_key in [56, 57, 60, 61]: + message = self.parse_top_level_element(message_spec) + messages.append(message) except Exception as e: print('could not process %s' % input_file) raise @@ -195,7 +205,9 @@ def parse_complex_type(self, type_name, field_spec, versions): child = self.parse_field(child_field, versions[-1]) if child is not None: fields.append(child) - + # Some structures share the same name, use request/response as prefix. + if type_name in ['EntityData', 'EntryData', 'PartitionData', 'TopicData']: + type_name = self.type.capitalize() + type_name # Some of the types repeat multiple times (e.g. AlterableConfig). # In such a case, every second or later occurrence of the same name is going to be prefixed # with parent type, e.g. we have AlterableConfig (for AlterConfigsRequest) and then @@ -379,7 +391,7 @@ def default_value(self): return str(self.type.default_value()) def example_value_for_test(self, version): - if self.is_nullable(): + if self.is_nullable_in_version(version): return 'absl::make_optional<%s>(%s)' % ( self.type.name, self.type.example_value_for_test(version)) else: @@ -470,7 +482,10 @@ class Primitive(TypeSpecification): Represents a Kafka primitive value. """ - USABLE_PRIMITIVE_TYPE_NAMES = ['bool', 'int8', 'int16', 'int32', 'int64', 'string', 'bytes'] + USABLE_PRIMITIVE_TYPE_NAMES = [ + 'bool', 'int8', 'int16', 'int32', 'int64', 'uint16', 'float64', 'string', 'bytes', + 'records', 'uuid' + ] KAFKA_TYPE_TO_ENVOY_TYPE = { 'string': 'std::string', @@ -479,7 +494,11 @@ class Primitive(TypeSpecification): 'int16': 'int16_t', 'int32': 'int32_t', 'int64': 'int64_t', + 'uint16': 'uint16_t', + 'float64': 'double', 'bytes': 'Bytes', + 'records': 'Bytes', + 'uuid': 'Uuid', 'tagged_fields': 'TaggedFields', } @@ -490,13 +509,18 @@ class Primitive(TypeSpecification): 'int16': 'Int16Deserializer', 'int32': 'Int32Deserializer', 'int64': 'Int64Deserializer', + 'uint16': 'UInt16Deserializer', + 'float64': 'Float64Deserializer', 'bytes': 'BytesDeserializer', + 'records': 'BytesDeserializer', + 'uuid': 'UuidDeserializer', 'tagged_fields': 'TaggedFieldsDeserializer', } KAFKA_TYPE_TO_COMPACT_DESERIALIZER = { 'string': 'CompactStringDeserializer', - 'bytes': 'CompactBytesDeserializer' + 'bytes': 'CompactBytesDeserializer', + 'records': 'CompactBytesDeserializer' } # See https://github.com/apache/kafka/tree/trunk/clients/src/main/resources/common/message#deserializing-messages @@ -508,6 +532,7 @@ class Primitive(TypeSpecification): 'int32': '0', 'int64': '0', 'bytes': '{}', + 'uuid': 'Uuid{0, 0}', 'tagged_fields': 'TaggedFields({})', } @@ -525,8 +550,14 @@ class Primitive(TypeSpecification): 'static_cast(32)', 'int64': 'static_cast(64)', + 'float64': + 'static_cast(13.125)', 'bytes': 'Bytes({0, 1, 2, 3})', + 'records': + 'Bytes({0, 1, 2, 3})', + 'uuid': + 'Uuid{13, 42}', 'tagged_fields': 'TaggedFields{std::vector{{10, Bytes({1, 2, 3})}, {20, Bytes({4, 5, 6})}}}', } @@ -561,7 +592,7 @@ def default_value(self): return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_DEFAULT_VALUE) def has_flexible_handling(self): - return self.original_name in ['string', 'bytes', 'tagged_fields'] + return self.original_name in ['string', 'bytes', 'records', 'tagged_fields'] def example_value_for_test(self, version): return Primitive.compute(self.original_name, Primitive.KAFKA_TYPE_TO_EXAMPLE_VALUE_FOR_TEST) diff --git a/contrib/kafka/filters/network/source/serialization.cc b/contrib/kafka/filters/network/source/serialization.cc index fc8464f7aac0d..b78085fdbf18e 100644 --- a/contrib/kafka/filters/network/source/serialization.cc +++ b/contrib/kafka/filters/network/source/serialization.cc @@ -205,6 +205,21 @@ uint32_t CompactBytesDeserializer::feed(absl::string_view& data) { false); } +uint32_t NullableCompactBytesDeserializer::feed(absl::string_view& data) { + return feedCompactBytesIntoBuffers(data, length_buf_, length_consumed_, required_, + data_buf_, ready_, NULL_COMPACT_BYTES_LENGTH, + true); +} + +NullableBytes NullableCompactBytesDeserializer::get() const { + const uint32_t original_data_len = length_buf_.get(); + if (NULL_COMPACT_BYTES_LENGTH == original_data_len) { + return absl::nullopt; + } else { + return absl::make_optional(data_buf_); + } +} + } // namespace Kafka } // namespace NetworkFilters } // namespace Extensions diff --git a/contrib/kafka/filters/network/source/serialization.h b/contrib/kafka/filters/network/source/serialization.h index 3401199c002f8..7f77de45b4687 100644 --- a/contrib/kafka/filters/network/source/serialization.h +++ b/contrib/kafka/filters/network/source/serialization.h @@ -65,7 +65,7 @@ template class Deserializer { * Generic integer deserializer (uses array of sizeof(T) bytes). * After all bytes are filled in, the value is converted from network byte-order and returned. */ -template class IntDeserializer : public Deserializer { +template class FixedSizeDeserializer : public Deserializer { public: uint32_t feed(absl::string_view& data) override { const uint32_t available = std::min(sizeof(buf_) - written_, data.size()); @@ -92,7 +92,7 @@ template class IntDeserializer : public Deserializer { /** * Integer deserializer for int8_t. */ -class Int8Deserializer : public IntDeserializer { +class Int8Deserializer : public FixedSizeDeserializer { public: int8_t get() const override { int8_t result = buf_[0]; @@ -103,7 +103,7 @@ class Int8Deserializer : public IntDeserializer { /** * Integer deserializer for int16_t. */ -class Int16Deserializer : public IntDeserializer { +class Int16Deserializer : public FixedSizeDeserializer { public: int16_t get() const override { int16_t result; @@ -112,10 +112,22 @@ class Int16Deserializer : public IntDeserializer { } }; +/** + * Integer deserializer for uint16_t. + */ +class UInt16Deserializer : public FixedSizeDeserializer { +public: + uint16_t get() const override { + uint16_t result; + safeMemcpyUnsafeSrc(&result, buf_); + return be16toh(result); + } +}; + /** * Integer deserializer for int32_t. */ -class Int32Deserializer : public IntDeserializer { +class Int32Deserializer : public FixedSizeDeserializer { public: int32_t get() const override { int32_t result; @@ -127,7 +139,7 @@ class Int32Deserializer : public IntDeserializer { /** * Integer deserializer for uint32_t. */ -class UInt32Deserializer : public IntDeserializer { +class UInt32Deserializer : public FixedSizeDeserializer { public: uint32_t get() const override { uint32_t result; @@ -139,7 +151,7 @@ class UInt32Deserializer : public IntDeserializer { /** * Integer deserializer for uint64_t. */ -class Int64Deserializer : public IntDeserializer { +class Int64Deserializer : public FixedSizeDeserializer { public: int64_t get() const override { int64_t result; @@ -148,12 +160,34 @@ class Int64Deserializer : public IntDeserializer { } }; +/** + * Deserializer for Kafka Float64 type. + * Reference: https://kafka.apache.org/28/protocol.html#protocol_types + * Represents a double-precision 64-bit format IEEE 754 value. The values are encoded using eight + * bytes in network byte order (big-endian). + */ +class Float64Deserializer : public FixedSizeDeserializer { + + static_assert(sizeof(double) == sizeof(uint64_t), "sizeof(double) != sizeof(uint64_t)"); + static_assert(std::numeric_limits::is_iec559, "non-IEC559 (IEEE 754) double"); + +public: + double get() const override { + uint64_t in_network_order; + safeMemcpyUnsafeSrc(&in_network_order, buf_); + uint64_t in_host_order = be64toh(in_network_order); + double result; + safeMemcpy(&result, &in_host_order); + return result; + } +}; + /** * Deserializer for boolean values. * Uses a single int8 deserializer, and checks whether the results equals 0. * When reading a boolean value, any non-zero value is considered true. - * Impl note: could have been a subclass of IntDeserializer with a different get function, - * but it makes it harder to understand. + * Impl note: could have been a subclass of FixedSizeDeserializer with a different get + * function, but it makes it harder to understand. */ class BooleanDeserializer : public Deserializer { public: @@ -175,9 +209,10 @@ class BooleanDeserializer : public Deserializer { * https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields#KIP-482:TheKafkaProtocolshouldSupportOptionalTaggedFields-UnsignedVarints * * Impl note: - * This implementation is equivalent to the one present in Kafka 2.4.0, what means that for 5-byte + * This implementation is equivalent to the one present in Kafka, what means that for 5-byte * inputs, the data at bits 5-7 in 5th byte are *ignored* (as long as 8th bit is unset). - * Reference: org.apache.kafka.common.utils.ByteUtils.readUnsignedVarint + * Reference: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/utils/ByteUtils.java#L142 */ class VarUInt32Deserializer : public Deserializer { public: @@ -227,12 +262,13 @@ class VarUInt32Deserializer : public Deserializer { /** * Deserializer for Kafka 'varint' type. - * Encoding documentation: https://kafka.apache.org/24/protocol.html#protocol_types + * Encoding documentation: https://kafka.apache.org/28/protocol.html#protocol_types * * Impl note: - * This implementation is equivalent to the one present in Kafka 2.4.0, what means that for 5-byte + * This implementation is equivalent to the one present in Kafka, what means that for 5-byte * inputs, the data at bits 5-7 in 5th byte are *ignored* (as long as 8th bit is unset). - * Reference: org.apache.kafka.common.utils.ByteUtils.readVarint + * Reference: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/utils/ByteUtils.java#L189 */ class VarInt32Deserializer : public Deserializer { public: @@ -253,12 +289,13 @@ class VarInt32Deserializer : public Deserializer { /** * Deserializer for Kafka 'varlong' type. - * Encoding documentation: https://kafka.apache.org/24/protocol.html#protocol_types + * Encoding documentation: https://kafka.apache.org/28/protocol.html#protocol_types * * Impl note: - * This implementation is equivalent to the one present in Kafka 2.4.0, what means that for 10-byte + * This implementation is equivalent to the one present in Kafka, what means that for 10-byte * inputs, the data at bits 3-7 in 10th byte are *ignored* (as long as 8th bit is unset). - * Reference: org.apache.kafka.common.utils.ByteUtils.readVarlong + * Reference: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/utils/ByteUtils.java#L242 */ class VarInt64Deserializer : public Deserializer { public: @@ -515,6 +552,36 @@ class NullableBytesDeserializer : public Deserializer { bool ready_{false}; }; +/** + * Deserializer of nullable compact bytes value. + * First reads length (UNSIGNED_VARINT32) and then allocates the buffer of given length. + * If length was 0, buffer allocation is omitted and deserializer is immediately ready (returning + * null value). + * + * From Kafka documentation: + * First the length N+1 is given as an UNSIGNED_VARINT. Then N bytes follow. + * A null object is represented with a length of 0. + */ +class NullableCompactBytesDeserializer : public Deserializer { +public: + /** + * Can throw EnvoyException if given bytes length is not valid. + */ + uint32_t feed(absl::string_view& data) override; + + bool ready() const override { return ready_; } + + NullableBytes get() const override; + +private: + VarUInt32Deserializer length_buf_; + bool length_consumed_{false}; + uint32_t required_; + + std::vector data_buf_; + bool ready_{false}; +}; + /** * Deserializer for array of objects of the same type. * @@ -851,6 +918,31 @@ class NullableCompactArrayDeserializer bool ready_{false}; }; +/** + * Kafka UUID is basically two longs, so we are going to keep model them the same way. + * Reference: + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/Uuid.java#L38 + */ +class UuidDeserializer : public Deserializer { +public: + uint32_t feed(absl::string_view& data) override { + uint32_t consumed = 0; + consumed += high_bytes_deserializer_.feed(data); + consumed += low_bytes_deserializer_.feed(data); + return consumed; + } + + bool ready() const override { return low_bytes_deserializer_.ready(); } + + Uuid get() const override { + return {high_bytes_deserializer_.get(), low_bytes_deserializer_.get()}; + } + +private: + Int64Deserializer high_bytes_deserializer_; + Int64Deserializer low_bytes_deserializer_; +}; + /** * Encodes provided argument in Kafka format. * In case of primitive types, this is done explicitly as per specification. @@ -961,9 +1053,11 @@ template inline uint32_t EncodingContext::computeSize(const T& arg) COMPUTE_SIZE_OF_NUMERIC_TYPE(bool) COMPUTE_SIZE_OF_NUMERIC_TYPE(int8_t) COMPUTE_SIZE_OF_NUMERIC_TYPE(int16_t) +COMPUTE_SIZE_OF_NUMERIC_TYPE(uint16_t) COMPUTE_SIZE_OF_NUMERIC_TYPE(int32_t) COMPUTE_SIZE_OF_NUMERIC_TYPE(uint32_t) COMPUTE_SIZE_OF_NUMERIC_TYPE(int64_t) +COMPUTE_SIZE_OF_NUMERIC_TYPE(double) /** * Template overload for string. @@ -1019,6 +1113,13 @@ inline uint32_t EncodingContext::computeSize(const NullableArray& arg) const return arg ? computeSize(*arg) : sizeof(int32_t); } +/** + * Template overload for Uuid. + */ +template <> inline uint32_t EncodingContext::computeSize(const Uuid&) const { + return 2 * sizeof(uint64_t); +} + /** * For non-primitive types, call `computeCompactSize` on them, to delegate the work to the entity * itself. The entity may use the information in context to decide which fields are included etc. @@ -1079,6 +1180,14 @@ template <> inline uint32_t EncodingContext::computeCompactSize(const Bytes& arg return computeCompactSize(static_cast(arg.size()) + 1) + arg.size(); } +/** + * Template overload for nullable compact byte array. + * Kafka NullableCompactBytes' size is var-len encoding of N+1 + N bytes. + */ +template <> inline uint32_t EncodingContext::computeCompactSize(const NullableBytes& arg) const { + return arg ? computeCompactSize(*arg) : 1; +} + /** * Template overload for CompactArray of T. * The size of array is compact size of header and all of its elements. @@ -1131,10 +1240,24 @@ template <> inline uint32_t EncodingContext::encode(const int8_t& arg, Buffer::I } ENCODE_NUMERIC_TYPE(int16_t, htobe16); +ENCODE_NUMERIC_TYPE(uint16_t, htobe16); ENCODE_NUMERIC_TYPE(int32_t, htobe32); ENCODE_NUMERIC_TYPE(uint32_t, htobe32); ENCODE_NUMERIC_TYPE(int64_t, htobe64); +/** + * Template overload for double. + * Encodes 8 bytes. + */ +template <> inline uint32_t EncodingContext::encode(const double& arg, Buffer::Instance& dst) { + double tmp = arg; + uint64_t in_host_order; + safeMemcpy(&in_host_order, &tmp); + const uint64_t in_network_order = htobe64(in_host_order); + dst.add(&in_network_order, sizeof(uint64_t)); + return sizeof(uint64_t); +} + /** * Template overload for bool. * Encode boolean as a single byte. @@ -1227,6 +1350,16 @@ uint32_t EncodingContext::encode(const NullableArray& arg, Buffer::Instance& } } +/** + * Template overload for Uuid. + */ +template <> inline uint32_t EncodingContext::encode(const Uuid& arg, Buffer::Instance& dst) { + uint32_t result = 0; + result += encode(arg.msb_, dst); + result += encode(arg.lsb_, dst); + return result; +} + /** * For non-primitive types, call `encodeCompact` on them, to delegate the serialization to the * entity itself. @@ -1309,6 +1442,20 @@ inline uint32_t EncodingContext::encodeCompact(const Bytes& arg, Buffer::Instanc return header_length + data_length; } +/** + * Template overload for NullableBytes. + * Encode byte array as VAR_UINT + N bytes. + */ +template <> +inline uint32_t EncodingContext::encodeCompact(const NullableBytes& arg, Buffer::Instance& dst) { + if (arg.has_value()) { + return encodeCompact(*arg, dst); + } else { + const uint32_t len = 0; + return encodeCompact(len, dst); + } +} + /** * Encode object array of T as VAR_UINT + N elements. * Each element of type T then serializes itself on its own. diff --git a/contrib/kafka/filters/network/source/tagged_fields.h b/contrib/kafka/filters/network/source/tagged_fields.h index f9aebaf7472bf..96e3c72e50bba 100644 --- a/contrib/kafka/filters/network/source/tagged_fields.h +++ b/contrib/kafka/filters/network/source/tagged_fields.h @@ -6,7 +6,7 @@ /** * This header file provides serialization support for tagged fields structure added in 2.4. - * https://github.com/apache/kafka/blob/2.4.0/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java + * https://github.com/apache/kafka/blob/2.8.1/clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java * * Impl note: contrary to other compact data structures, data in tagged field does not have +1 in * data length. diff --git a/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc b/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc index 8d790b14806eb..3f30be33c7e47 100644 --- a/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc +++ b/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc @@ -143,7 +143,7 @@ TEST_F(KafkaBrokerFilterProtocolTest, ShouldProcessMessages) { ASSERT_EQ(result2, Network::FilterStatus::Continue); // Also, assert that every message type has been processed properly. - for (int16_t i = 0; i < MessageUtilities::apiKeys(); ++i) { + for (const int16_t i : MessageUtilities::apiKeys()) { // We should have received one request per api version. const Stats::Counter& request_counter = scope_.counter(MessageUtilities::requestMetric(i)); ASSERT_EQ(request_counter.value(), MessageUtilities::requestApiVersions(i)); diff --git a/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc b/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc index d9ffda89635c8..ba144e3ce74c2 100644 --- a/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc +++ b/contrib/kafka/filters/network/test/mesh/command_handlers/metadata_unit_test.cc @@ -39,8 +39,13 @@ TEST(MetadataTest, shouldBeAlwaysReadyForAnswer) { // Second topic is not going to have configuration present. EXPECT_CALL(configuration, computeClusterConfigForTopic("topic2")) .WillOnce(Return(absl::nullopt)); - const RequestHeader header = {0, 0, 0, absl::nullopt}; - const MetadataRequest data = {{MetadataRequestTopic{"topic1"}, MetadataRequestTopic{"topic2"}}}; + const RequestHeader header = {METADATA_REQUEST_API_KEY, METADATA_REQUEST_MAX_VERSION, 0, + absl::nullopt}; + const MetadataRequestTopic t1 = MetadataRequestTopic{"topic1"}; + const MetadataRequestTopic t2 = MetadataRequestTopic{"topic2"}; + // Third topic is not going to have an explicit name. + const MetadataRequestTopic t3 = MetadataRequestTopic{Uuid{13, 42}, absl::nullopt, TaggedFields{}}; + const MetadataRequest data = {{t1, t2, t3}}; const auto message = std::make_shared>(header, data); MetadataRequestHolder testee = {filter, configuration, message}; @@ -61,6 +66,7 @@ TEST(MetadataTest, shouldBeAlwaysReadyForAnswer) { ASSERT_TRUE(response); const auto topics = response->data_.topics_; EXPECT_EQ(topics.size(), 1); + EXPECT_EQ(topics[0].name_, *(t1.name_)); EXPECT_EQ(topics[0].partitions_.size(), 42); } diff --git a/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc b/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc index efa05e82e1f88..cdcc53d219786 100644 --- a/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc +++ b/contrib/kafka/filters/network/test/mesh/command_handlers/produce_unit_test.cc @@ -128,10 +128,10 @@ TEST_F(ProduceUnitTest, ShouldSendRecordsInNormalFlow) { ASSERT_TRUE(response); const std::vector responses = response->data_.responses_; EXPECT_EQ(responses.size(), 2); - EXPECT_EQ(responses[0].partitions_[0].error_code_, dm1.error_code_); - EXPECT_EQ(responses[0].partitions_[0].base_offset_, dm1.offset_); - EXPECT_EQ(responses[1].partitions_[0].error_code_, dm2.error_code_); - EXPECT_EQ(responses[1].partitions_[0].base_offset_, dm2.offset_); + EXPECT_EQ(responses[0].partition_responses_[0].error_code_, dm1.error_code_); + EXPECT_EQ(responses[0].partition_responses_[0].base_offset_, dm1.offset_); + EXPECT_EQ(responses[1].partition_responses_[0].error_code_, dm2.error_code_); + EXPECT_EQ(responses[1].partition_responses_[0].base_offset_, dm2.offset_); } // Typical flow without errors. @@ -182,9 +182,9 @@ TEST_F(ProduceUnitTest, ShouldMergeOutboundRecordResponses) { ASSERT_TRUE(response); const std::vector responses = response->data_.responses_; EXPECT_EQ(responses.size(), 1); - EXPECT_EQ(responses[0].partitions_.size(), 1); - EXPECT_EQ(responses[0].partitions_[0].error_code_, 0); - EXPECT_EQ(responses[0].partitions_[0].base_offset_, 1313); + EXPECT_EQ(responses[0].partition_responses_.size(), 1); + EXPECT_EQ(responses[0].partition_responses_[0].error_code_, 0); + EXPECT_EQ(responses[0].partition_responses_[0].base_offset_, 1313); } // Flow with errors. @@ -237,7 +237,7 @@ TEST_F(ProduceUnitTest, ShouldHandleDeliveryErrors) { ASSERT_TRUE(response); const std::vector responses = response->data_.responses_; EXPECT_EQ(responses.size(), 1); - EXPECT_EQ(responses[0].partitions_[0].error_code_, dm1.error_code_); + EXPECT_EQ(responses[0].partition_responses_[0].error_code_, dm1.error_code_); } // As with current version of Kafka library we have no capability of linking producer's notification diff --git a/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc b/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc index 605019141e707..c49b449138c50 100644 --- a/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc +++ b/contrib/kafka/filters/network/test/mesh/request_processor_unit_test.cc @@ -94,9 +94,9 @@ TEST_F(RequestProcessorTest, ShouldProcessApiVersionsRequest) { TEST_F(RequestProcessorTest, ShouldHandleUnsupportedRequest) { // given - const RequestHeader header = {LIST_OFFSET_REQUEST_API_KEY, 0, 0, absl::nullopt}; - const ListOffsetRequest data = {0, {}}; - const auto message = std::make_shared>(header, data); + const RequestHeader header = {LIST_OFFSETS_REQUEST_API_KEY, 0, 0, absl::nullopt}; + const ListOffsetsRequest data = {0, {}}; + const auto message = std::make_shared>(header, data); // when, then - exception gets thrown. EXPECT_THROW_WITH_REGEX(testee_.onMessage(message), EnvoyException, "unsupported"); diff --git a/contrib/kafka/filters/network/test/message_utilities.h b/contrib/kafka/filters/network/test/message_utilities.h index 00278094e2cc1..1e8b5bac8bb44 100644 --- a/contrib/kafka/filters/network/test/message_utilities.h +++ b/contrib/kafka/filters/network/test/message_utilities.h @@ -25,10 +25,9 @@ class MessageUtilities { public: /** - * How many request/response types are supported. - * Proper values are 0..apiKeys() - 1. + * What are the supported request / response types. */ - static int16_t apiKeys(); + static std::vector apiKeys(); /** * How many request types are supported for given api key. diff --git a/contrib/kafka/filters/network/test/metrics_integration_test.cc b/contrib/kafka/filters/network/test/metrics_integration_test.cc index 47873fbfaeef6..488befd0c28eb 100644 --- a/contrib/kafka/filters/network/test/metrics_integration_test.cc +++ b/contrib/kafka/filters/network/test/metrics_integration_test.cc @@ -21,7 +21,7 @@ class MetricsIntegrationTest : public testing::Test { constexpr static int32_t UPDATE_COUNT = 42; TEST_F(MetricsIntegrationTest, ShouldUpdateRequestMetrics) { - for (int16_t api_key = 0; api_key < MessageUtilities::apiKeys(); ++api_key) { + for (const int16_t api_key : MessageUtilities::apiKeys()) { // given // when for (int i = 0; i < UPDATE_COUNT; ++i) { @@ -46,7 +46,7 @@ TEST_F(MetricsIntegrationTest, ShouldHandleUnparseableRequest) { } TEST_F(MetricsIntegrationTest, ShouldUpdateResponseMetrics) { - for (int16_t api_key = 0; api_key < MessageUtilities::apiKeys(); ++api_key) { + for (const int16_t api_key : MessageUtilities::apiKeys()) { // given // when for (int i = 0; i < UPDATE_COUNT; ++i) { diff --git a/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 b/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 index a90796c0acc11..3ec7d9f5535e9 100644 --- a/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/request_utilities_cc.j2 @@ -3,6 +3,8 @@ This file contains implementation of request-related methods contained in 'message_utilities.h'. #} +#include + #include "contrib/kafka/filters/network/test/message_utilities.h" #include "contrib/kafka/filters/network/source/external/requests.h" @@ -12,8 +14,12 @@ namespace Extensions { namespace NetworkFilters { namespace Kafka { -int16_t MessageUtilities::apiKeys() { - return {{ message_types | length }}; +std::vector MessageUtilities::apiKeys() { + std::vector result; + {% for message_type in message_types %} + result.push_back({{ message_type.get_extra('api_key') }}); + {% endfor %} + return result; } int16_t MessageUtilities::requestApiVersions(const int16_t api_key) { @@ -30,7 +36,8 @@ int16_t MessageUtilities::requestApiVersions(const int16_t api_key) { std::vector MessageUtilities::makeRequests( const int16_t api_key, int32_t& correlation_id) { - if ((api_key < 0) || (api_key >= {{ message_types | length }})) { + const std::vector api_keys = apiKeys(); + if (std::find(api_keys.begin(), api_keys.end(), api_key) == api_keys.end()) { throw EnvoyException("unsupported api key used in test code"); } @@ -56,7 +63,7 @@ std::vector MessageUtilities::makeRequests( std::vector MessageUtilities::makeAllRequests() { std::vector result; int32_t correlation_id = 0; - for (int16_t i = 0; i < MessageUtilities::apiKeys(); ++i) { + for (const int16_t i : MessageUtilities::apiKeys()) { const std::vector tmp = MessageUtilities::makeRequests(i, correlation_id); result.insert(result.end(), tmp.begin(), tmp.end()); diff --git a/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 b/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 index cf41d02e3ca78..c57d386a5b565 100644 --- a/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 +++ b/contrib/kafka/filters/network/test/protocol/response_utilities_cc.j2 @@ -3,6 +3,8 @@ This file contains implementation of response-related methods contained in 'message_utilities.h'. #} +#include + #include "contrib/kafka/filters/network/test/message_utilities.h" #include "contrib/kafka/filters/network/source/external/responses.h" @@ -26,7 +28,8 @@ int16_t MessageUtilities::responseApiVersions(const int16_t api_key) { std::vector MessageUtilities::makeResponses( const int16_t api_key, int32_t& correlation_id) { - if ((api_key < 0) || (api_key >= {{ message_types | length }})) { + const std::vector api_keys = apiKeys(); + if (std::find(api_keys.begin(), api_keys.end(), api_key) == api_keys.end()) { throw EnvoyException("unsupported api key used in test code"); } @@ -51,7 +54,7 @@ std::vector MessageUtilities::makeResponses( std::vector MessageUtilities::makeAllResponses() { std::vector result; int32_t correlation_id = 0; - for (int16_t i = 0; i < MessageUtilities::apiKeys(); ++i) { + for (const int16_t i : MessageUtilities::apiKeys()) { const std::vector tmp = MessageUtilities::makeResponses(i, correlation_id); result.insert(result.end(), tmp.begin(), tmp.end()); diff --git a/contrib/kafka/filters/network/test/serialization_test.cc b/contrib/kafka/filters/network/test/serialization_test.cc index c177e86364dd7..b9264cf237e47 100644 --- a/contrib/kafka/filters/network/test/serialization_test.cc +++ b/contrib/kafka/filters/network/test/serialization_test.cc @@ -23,9 +23,11 @@ namespace SerializationTest { TEST_EmptyDeserializerShouldNotBeReady(Int8Deserializer); TEST_EmptyDeserializerShouldNotBeReady(Int16Deserializer); +TEST_EmptyDeserializerShouldNotBeReady(UInt16Deserializer); TEST_EmptyDeserializerShouldNotBeReady(Int32Deserializer); TEST_EmptyDeserializerShouldNotBeReady(UInt32Deserializer); TEST_EmptyDeserializerShouldNotBeReady(Int64Deserializer); +TEST_EmptyDeserializerShouldNotBeReady(Float64Deserializer); TEST_EmptyDeserializerShouldNotBeReady(BooleanDeserializer); TEST_EmptyDeserializerShouldNotBeReady(VarUInt32Deserializer); TEST_EmptyDeserializerShouldNotBeReady(VarInt32Deserializer); @@ -38,6 +40,8 @@ TEST_EmptyDeserializerShouldNotBeReady(NullableCompactStringDeserializer); TEST_EmptyDeserializerShouldNotBeReady(BytesDeserializer); TEST_EmptyDeserializerShouldNotBeReady(CompactBytesDeserializer); TEST_EmptyDeserializerShouldNotBeReady(NullableBytesDeserializer); +TEST_EmptyDeserializerShouldNotBeReady(NullableCompactBytesDeserializer); +TEST_EmptyDeserializerShouldNotBeReady(UuidDeserializer); TEST(ArrayDeserializer, EmptyBufferShouldNotBeReady) { // given @@ -77,9 +81,11 @@ TEST(NullableCompactArrayDeserializer, EmptyBufferShouldNotBeReady) { TEST_DeserializerShouldDeserialize(Int8Deserializer, int8_t, 42); TEST_DeserializerShouldDeserialize(Int16Deserializer, int16_t, 42); +TEST_DeserializerShouldDeserialize(UInt16Deserializer, uint16_t, 42); TEST_DeserializerShouldDeserialize(Int32Deserializer, int32_t, 42); TEST_DeserializerShouldDeserialize(UInt32Deserializer, uint32_t, 42); TEST_DeserializerShouldDeserialize(Int64Deserializer, int64_t, 42); +TEST_DeserializerShouldDeserialize(Float64Deserializer, double, 13.25); TEST_DeserializerShouldDeserialize(BooleanDeserializer, bool, true); EncodingContext encoder{-1}; // Provided api_version does not matter for primitive types. @@ -424,7 +430,24 @@ TEST(NullableBytesDeserializer, ShouldThrowOnInvalidLength) { EXPECT_THROW(testee.feed(data), EnvoyException); } -// Generic array tests. +// Nullable compact byte-array tests. + +TEST(NullableCompactBytesDeserializer, ShouldDeserialize) { + const NullableBytes value{{'a', 'b', 'c', 'd'}}; + serializeCompactThenDeserializeAndCheckEquality(value); +} + +TEST(NullableCompactBytesDeserializer, ShouldDeserializeEmptyBytes) { + const NullableBytes value = {{}}; + serializeCompactThenDeserializeAndCheckEquality(value); +} + +TEST(NullableCompactBytesDeserializer, ShouldDeserializeNullBytes) { + const NullableBytes value = absl::nullopt; + serializeCompactThenDeserializeAndCheckEquality(value); +} + +// Generic-array tests. TEST(ArrayDeserializer, ShouldConsumeCorrectAmountOfData) { const std::vector value{{"aaa", "bbbbb", "cc", "d", "e", "ffffffff"}}; @@ -446,7 +469,7 @@ TEST(ArrayDeserializer, ShouldThrowOnInvalidLength) { EXPECT_THROW(testee.feed(data), EnvoyException); } -// Compact generic array tests. +// Compact generic-array tests. TEST(CompactArrayDeserializer, ShouldConsumeCorrectAmountOfData) { const std::vector value{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}}; @@ -469,7 +492,7 @@ TEST(CompactArrayDeserializer, ShouldThrowOnInvalidLength) { EXPECT_THROW(testee.feed(data), EnvoyException); } -// Generic nullable array tests. +// Nullable generic-array tests. TEST(NullableArrayDeserializer, ShouldConsumeCorrectAmountOfData) { const NullableArray value{{"aaa", "bbbbb", "cc", "d", "e", "ffffffff"}}; @@ -496,7 +519,7 @@ TEST(NullableArrayDeserializer, ShouldThrowOnInvalidLength) { EXPECT_THROW(testee.feed(data), EnvoyException); } -// Compact nullable generic array tests. +// Nullable compact generic-array tests. TEST(NullableCompactArrayDeserializer, ShouldConsumeCorrectAmountOfData) { const NullableArray value{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}}; @@ -521,6 +544,13 @@ TEST(NullableCompactArrayDeserializer, ShouldConsumeCorrectAmountOfDataForLargeI NullableCompactArrayDeserializer>(value); } +// UUID. + +TEST(UuidDeserializer, ShouldDeserialize) { + const Uuid value = {13, 42}; + serializeThenDeserializeAndCheckEquality(value); +} + // Tagged fields. TEST(TaggedFieldDeserializer, ShouldConsumeCorrectAmountOfData) { diff --git a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst index 2c286686f33f5..ec7828db5c120 100644 --- a/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst +++ b/docs/root/configuration/listeners/network_filters/kafka_broker_filter.rst @@ -5,7 +5,7 @@ Kafka Broker filter The Apache Kafka broker filter decodes the client protocol for `Apache Kafka `_, both the requests and responses in the payload. -The message versions in `Kafka 2.4.0 `_ +The message versions in `Kafka 2.8.1 `_ are supported. The filter attempts not to influence the communication between client and brokers, so the messages that could not be decoded (due to Kafka client or broker running a newer version than supported by diff --git a/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst b/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst index 4a8504b7d67e3..6942bf8bb2187 100644 --- a/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst +++ b/docs/root/configuration/listeners/network_filters/kafka_mesh_filter.rst @@ -6,7 +6,7 @@ Kafka Mesh filter The Apache Kafka mesh filter provides a facade for `Apache Kafka `_ producers. Produce requests sent to this filter insance can be forwarded to one of multiple clusters, depending on configured forwarding rules. Corresponding message versions from -Kafka 2.4.0 are supported. +Kafka 2.8.1 are supported. * :ref:`v3 API reference ` * This filter should be configured with the name *envoy.filters.network.kafka_mesh*. From c5110abab87d2e0b8602802aa205cbc67bd943a0 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 24 Sep 2021 02:32:52 +0800 Subject: [PATCH 103/121] quic: updating test comment (#18196) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- test/integration/tcp_tunneling_integration_test.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index a8505323b5956..ea3d79afd5894 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -743,7 +743,10 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyDownstreamFlush) { // Test that an upstream flush works correctly (all data is flushed) TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { if (upstreamProtocol() == Http::CodecType::HTTP3) { - // TODO(alyssawilk) debug. + // The payload data depends on having TCP buffers upstream and downstream. + // For HTTP/3, upstream, the flow control window will back up sooner, Envoy + // flow control will kick in, and the large write of |data| will fail to + // complete. return; } // Use a very large size to make sure it is larger than the kernel socket read buffer. From cdc7ccd09a06fbe8854a4d9f0b7af3ec9a533c9d Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 24 Sep 2021 03:06:32 +0800 Subject: [PATCH 104/121] test: deflaking coverage (#18234) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- test/server/guarddog_impl_test.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 53b2dcc962e6a..dbbe963a175fc 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -761,6 +761,8 @@ TEST_P(GuardDogActionsTest, MegaMissShouldSaturateOnMegaMissEvent) { EXPECT_THAT(events_, ElementsAre("MEGAMISS : 10", "MEGAMISS : 10")); } +// Disabled for coverage per #18229 +#if !defined(ENVOY_CONFIG_COVERAGE) TEST_P(GuardDogActionsTest, ShouldRespectEventPriority) { // Priority of events are KILL, MULTIKILL, MEGAMISS and MISS @@ -804,6 +806,7 @@ TEST_P(GuardDogActionsTest, ShouldRespectEventPriority) { guard_dog_->forceCheckForTest(); EXPECT_THAT(events_, ElementsAre("MEGAMISS : 10", "MISS : 10")); } +#endif TEST_P(GuardDogActionsTest, KillShouldTriggerGuardDogActions) { auto die_function = [&]() -> void { From 289ce88e3d190726fe8c106be57bd97db857000d Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 24 Sep 2021 03:37:23 +0800 Subject: [PATCH 105/121] http3: sending goaways (#18188) Fixing client side goaways to send (in opt mode) rather than crash. In debug mode, this unfortunately still crashes due to an upstream quiche perspective check, so commenting that out and adding a TODO. Risk Level: low Testing: updated Docs Changes: n/a Release Notes: n/a Fixes #12930 Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- source/common/quic/codec_impl.cc | 8 ++++++++ source/common/quic/codec_impl.h | 4 ++-- test/integration/multiplexed_integration_test.cc | 7 ++----- test/integration/protocol_integration_test.cc | 6 ------ 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/source/common/quic/codec_impl.cc b/source/common/quic/codec_impl.cc index 0afcf65b99055..6ef3eb1be5940 100644 --- a/source/common/quic/codec_impl.cc +++ b/source/common/quic/codec_impl.cc @@ -71,6 +71,14 @@ QuicHttpClientConnectionImpl::QuicHttpClientConnectionImpl( session.set_max_inbound_header_list_size(max_request_headers_kb * 1024); } +void QuicHttpClientConnectionImpl::goAway() { +// TODO(alyssawilk) remove these guards once QUICHE has been updated to remove +// the perspective check. +#if defined(NDEBUG) + quic_client_session_.SendHttp3GoAway(quic::QUIC_PEER_GOING_AWAY, "client goaway"); +#endif +} + Http::RequestEncoder& QuicHttpClientConnectionImpl::newStream(Http::ResponseDecoder& response_decoder) { EnvoyQuicClientStream* stream = diff --git a/source/common/quic/codec_impl.h b/source/common/quic/codec_impl.h index 7faed7d94136f..b5136bb1b6313 100644 --- a/source/common/quic/codec_impl.h +++ b/source/common/quic/codec_impl.h @@ -70,8 +70,8 @@ class QuicHttpClientConnectionImpl : public QuicHttpConnectionImplBase, Http::RequestEncoder& newStream(Http::ResponseDecoder& response_decoder) override; // Http::Connection - void goAway() override { NOT_REACHED_GCOVR_EXCL_LINE; } - void shutdownNotice() override { NOT_REACHED_GCOVR_EXCL_LINE; } + void goAway() override; + void shutdownNotice() override {} void onUnderlyingConnectionAboveWriteBufferHighWatermark() override; void onUnderlyingConnectionBelowWriteBufferLowWatermark() override; diff --git a/test/integration/multiplexed_integration_test.cc b/test/integration/multiplexed_integration_test.cc index 0357402a6993a..58b9b6d129237 100644 --- a/test/integration/multiplexed_integration_test.cc +++ b/test/integration/multiplexed_integration_test.cc @@ -29,7 +29,6 @@ using ::testing::MatchesRegex; namespace Envoy { -// TODO(#2557) fix all the failures. #define EXCLUDE_DOWNSTREAM_HTTP3 \ if (downstreamProtocol() == Http::CodecType::HTTP3) { \ return; \ @@ -906,8 +905,7 @@ TEST_P(Http2IntegrationTest, GrpcRetry) { testGrpcRetry(); } // Verify the case where there is an HTTP/2 codec/protocol error with an active stream. TEST_P(Http2IntegrationTest, CodecErrorAfterStreamStart) { - // TODO(#16757) Needs HTTP/3 "bad frame" equivalent. - EXCLUDE_DOWNSTREAM_HTTP3; + EXCLUDE_DOWNSTREAM_HTTP3; // The HTTP/3 client has no "bad frame" equivalent. initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -940,7 +938,7 @@ TEST_P(Http2IntegrationTest, Http2BadMagic) { } TEST_P(Http2IntegrationTest, BadFrame) { - EXCLUDE_DOWNSTREAM_HTTP3; // Needs HTTP/3 "bad frame" equivalent. + EXCLUDE_DOWNSTREAM_HTTP3; // The HTTP/3 client has no "bad frame" equivalent. initialize(); std::string response; @@ -956,7 +954,6 @@ TEST_P(Http2IntegrationTest, BadFrame) { // Send client headers, a GoAway and then a body and ensure the full request and // response are received. TEST_P(Http2IntegrationTest, GoAway) { - EXCLUDE_DOWNSTREAM_HTTP3; // QuicHttpClientConnectionImpl::goAway NOT_REACHED_GCOVR_EXCL_LINE config_helper_.prependFilter(ConfigHelper::defaultHealthCheckFilter()); initialize(); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 133dc3f87f0fb..a33c7bc0e83d4 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -53,12 +53,6 @@ void setDoNotValidateRouteConfig( route_config->mutable_validate_clusters()->set_value(false); }; -// TODO(#2557) fix all the failures. -#define EXCLUDE_DOWNSTREAM_HTTP3 \ - if (downstreamProtocol() == Http::CodecType::HTTP3) { \ - return; \ - } - TEST_P(ProtocolIntegrationTest, TrailerSupportHttp1) { config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1()); From 54e147965eff4151fb08f93807894fcb23cb7c77 Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Thu, 23 Sep 2021 13:23:47 -0700 Subject: [PATCH 106/121] bazel: update rules_foreign_cc (#18174) This update is required for this commit bazelbuild/rules_foreign_cc@c41020e in order to support Apple Silicon. Since our last update there have been some breaking API changes. I followed these instructions to migrate: https://github.com/bazelbuild/rules_foreign_cc/releases/tag/0.3.0 https://github.com/bazelbuild/rules_foreign_cc/releases/tag/0.4.0 This was reverted once because of a regression fixed by bazelbuild/rules_foreign_cc@da8952e It was reverted again because we saw a failure on envoy-mobile, but that issue turned out to be a configuration issue there, not the fault of this change. bazelbuild/rules_foreign_cc#789 provides a better error for that case in the future, but isn't required This reverts commit 7760bc0. Signed-off-by: Keith Smiley Signed-off-by: gayang --- bazel/EXTERNAL_DEPS.md | 2 +- bazel/dependency_imports.bzl | 5 ++- bazel/envoy_build_system.bzl | 16 +++---- bazel/foreign_cc/BUILD | 79 +++++++++++++++++----------------- bazel/repository_locations.bzl | 8 ++-- 5 files changed, 55 insertions(+), 55 deletions(-) diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index 34fc92b21f123..9820ff4cf993d 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -88,7 +88,7 @@ The name of the dependency can be found in [the repository locations file.](https://github.com/envoyproxy/envoy/blob/main/bazel/repository_locations.bzl) The path of the local copy has to be absolute path. -For repositories built by `envoy_cmake_external()` in `bazel/foreign_cc/BUILD`, +For repositories built by `envoy_cmake()` in `bazel/foreign_cc/BUILD`, it is necessary to populate the local copy with some additional Bazel machinery to support `--override_repository`: 1. Place an empty `WORKSPACE` in the root. diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index 7c806b08c98a6..aab2cf5337fa1 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -1,4 +1,4 @@ -load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") +load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies") load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") load("@envoy_build_tools//toolchains:rbe_toolchains_config.bzl", "rbe_toolchains_config") load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe_exec_properties_dict", "custom_exec_properties") @@ -14,7 +14,8 @@ load("@proxy_wasm_rust_sdk//bazel:dependencies.bzl", "proxy_wasm_rust_sdk_depend GO_VERSION = "1.15.5" def envoy_dependency_imports(go_version = GO_VERSION): - rules_foreign_cc_dependencies() + # TODO: allow building of tools for easier onboarding + rules_foreign_cc_dependencies(register_default_tools = False, register_built_tools = False) go_rules_dependencies() go_register_toolchains(go_version) rbe_toolchains_config() diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index 644824f19e830..f48ebe70564e9 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -1,6 +1,6 @@ # The main Envoy bazel file. Load this file for all Envoy-specific build macros # and rules that you'd like to use in your BUILD files. -load("@rules_foreign_cc//tools/build_defs:cmake.bzl", "cmake_external") +load("@rules_foreign_cc//foreign_cc:cmake.bzl", "cmake") load(":envoy_binary.bzl", _envoy_cc_binary = "envoy_cc_binary") load(":envoy_internal.bzl", "envoy_external_dep_path") load( @@ -92,15 +92,12 @@ envoy_directory_genrule = rule( # External CMake C++ library targets should be specified with this function. This defaults # to building the dependencies with ninja -def envoy_cmake_external( +def envoy_cmake( name, cache_entries = {}, debug_cache_entries = {}, - cmake_options = ["-GNinja"], - make_commands = ["ninja -v", "ninja -v install"], lib_source = "", postfix_script = "", - static_libraries = [], copy_pdb = False, pdb_name = "", cmake_files_dir = "$BUILD_TMPDIR/CMakeFiles", @@ -128,22 +125,23 @@ def envoy_cmake_external( else: pf = postfix_script - cmake_external( + cmake( name = name, cache_entries = select({ "@envoy//bazel:dbg_build": cache_entries_debug, "//conditions:default": cache_entries, }), - cmake_options = cmake_options, + generate_args = ["-GNinja"], + targets = ["", "install"], + # TODO: Remove install target and make this work + install = False, # TODO(lizan): Make this always true generate_crosstool_file = select({ "@envoy//bazel:windows_x86_64": True, "//conditions:default": generate_crosstool_file, }), lib_source = lib_source, - make_commands = make_commands, postfix_script = pf, - static_libraries = static_libraries, **kwargs ) diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 2c9b481282cb0..044f4562ee4d6 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -1,6 +1,6 @@ load("@rules_cc//cc:defs.bzl", "cc_library") -load("//bazel:envoy_build_system.bzl", "envoy_cmake_external", "envoy_package") -load("@rules_foreign_cc//tools/build_defs:configure.bzl", "configure_make") +load("//bazel:envoy_build_system.bzl", "envoy_cmake", "envoy_package") +load("@rules_foreign_cc//foreign_cc:configure.bzl", "configure_make") licenses(["notice"]) # Apache 2 @@ -20,12 +20,14 @@ configure_make( }), lib_source = "@com_github_gperftools_gperftools//:all", linkopts = ["-lpthread"], - make_commands = ["make install-libLTLIBRARIES install-perftoolsincludeHEADERS"], - static_libraries = select({ + out_static_libs = select({ "//bazel:debug_tcmalloc": ["libtcmalloc_debug.a"], "//conditions:default": ["libtcmalloc_and_profiler.a"], }), tags = ["skip_on_windows"], + targets = [ + "install-libLTLIBRARIES install-perftoolsincludeHEADERS", + ], ) # Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/227 @@ -44,14 +46,14 @@ configure_make( configure_in_place = True, configure_options = ["--disable-ssl --disable-gssapi --disable-lz4-ext --disable-zstd && cp Makefile.config src/.. && cp config.h src/.."], lib_source = "@edenhill_librdkafka//:all", - make_commands = [ - "make ARFLAGS='' libs install-subdirs", - ], - static_libraries = [ + out_static_libs = [ "librdkafka.a", "librdkafka++.a", ], tags = ["skip_on_windows"], + targets = [ + "ARFLAGS='' libs install-subdirs", + ], alwayslink = True, ) @@ -66,7 +68,7 @@ cc_library( configure_make( name = "luajit", configure_command = "build.py", - configure_env_vars = select({ + env = select({ # This shouldn't be needed! See # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed @@ -76,18 +78,18 @@ configure_make( "//conditions:default": {}, }), lib_source = "@com_github_luajit_luajit//:all", - make_commands = [], out_include_dir = "include/luajit-2.1", - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["lua51.lib"], "//conditions:default": ["libluajit-5.1.a"], }), + targets = [], ) configure_make( name = "moonjit", configure_command = "build.py", - configure_env_vars = select({ + env = select({ # This shouldn't be needed! See # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed @@ -96,13 +98,12 @@ configure_make( "//conditions:default": {}, }), lib_source = "@com_github_moonjit_moonjit//:all", - make_commands = [], out_include_dir = "include/moonjit-2.2", - static_libraries = ["libluajit-5.1.a"], + out_static_libs = ["libluajit-5.1.a"], tags = ["skip_on_windows"], ) -envoy_cmake_external( +envoy_cmake( name = "libsxg", cache_entries = { "CMAKE_BUILD_TYPE": "Release", @@ -115,12 +116,12 @@ envoy_cmake_external( "CMAKE_TRY_COMPILE_TARGET_TYPE": "STATIC_LIBRARY", }, lib_source = "@com_github_google_libsxg//:all", - static_libraries = ["libsxg.a"], + out_static_libs = ["libsxg.a"], tags = ["skip_on_windows"], deps = ["@boringssl//:ssl"], ) -envoy_cmake_external( +envoy_cmake( name = "ares", cache_entries = { "CARES_BUILD_TOOLS": "no", @@ -135,17 +136,17 @@ envoy_cmake_external( "//bazel:apple": ["-lresolv"], "//conditions:default": [], }), - postfix_script = select({ - "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/ares_nameser.h $INSTALLDIR/include/ares_nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", - "//conditions:default": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", - }), - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["cares.lib"], "//conditions:default": ["libcares.a"], }), + postfix_script = select({ + "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/ares_nameser.h $INSTALLDIR/include/ares_nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", + "//conditions:default": "rm -f $INSTALLDIR/include/ares_dns.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", + }), ) -envoy_cmake_external( +envoy_cmake( name = "curl", cache_entries = { "BUILD_CURL_EXE": "off", @@ -186,7 +187,7 @@ envoy_cmake_external( defines = ["CURL_STATICLIB"], generate_crosstool_file = True, lib_source = "@com_github_curl//:all", - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["libcurl.lib"], "//conditions:default": ["libcurl.a"], }), @@ -198,7 +199,7 @@ envoy_cmake_external( ], ) -envoy_cmake_external( +envoy_cmake( name = "event", cache_entries = { "EVENT__DISABLE_OPENSSL": "on", @@ -215,7 +216,7 @@ envoy_cmake_external( "_GNU_SOURCE": "on", }, lib_source = "@com_github_libevent_libevent//:all", - static_libraries = select({ + out_static_libs = select({ # macOS organization of libevent is different from Windows/Linux. # Including libevent_core is a requirement on those platforms, but # results in duplicate symbols when built on macOS. @@ -236,7 +237,7 @@ envoy_cmake_external( }), ) -envoy_cmake_external( +envoy_cmake( name = "llvm", cache_entries = { # Disable both: BUILD and INCLUDE, since some of the INCLUDE @@ -267,7 +268,7 @@ envoy_cmake_external( # using -l:libstdc++.a. "CMAKE_CXX_FLAGS": "-lstdc++", }, - env_vars = { + env = { # Workaround for the -DDEBUG flag added in fastbuild on macOS, # which conflicts with DEBUG macro used in LLVM. "CFLAGS": "-UDEBUG", @@ -275,7 +276,7 @@ envoy_cmake_external( "ASMFLAGS": "-UDEBUG", }, lib_source = "@org_llvm_llvm//:all", - static_libraries = select({ + out_static_libs = select({ "//conditions:default": [ # Order from llvm-config --libnames asmparser core debuginfodwarf # engine lto mcparser mirparser orcjit passes runtimedyld @@ -336,7 +337,7 @@ envoy_cmake_external( alwayslink = True, ) -envoy_cmake_external( +envoy_cmake( name = "nghttp2", cache_entries = { "ENABLE_LIB_ONLY": "on", @@ -349,13 +350,13 @@ envoy_cmake_external( debug_cache_entries = {"ENABLE_DEBUG": "on"}, defines = ["NGHTTP2_STATICLIB"], lib_source = "@com_github_nghttp2_nghttp2//:all", - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["nghttp2.lib"], "//conditions:default": ["libnghttp2.a"], }), ) -envoy_cmake_external( +envoy_cmake( name = "wamr", cache_entries = { "LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm", @@ -368,14 +369,13 @@ envoy_cmake_external( "WAMR_BUILD_TAIL_CALL": "1", }, lib_source = "@com_github_wamr//:all", - static_libraries = ["libvmlib.a"], + out_static_libs = ["libvmlib.a"], tags = ["skip_on_windows"], deps = [":llvm"], ) -envoy_cmake_external( +envoy_cmake( name = "wavm", - binaries = ["wavm"], cache_entries = { "LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm", "WAVM_ENABLE_STATIC_LINKING": "on", @@ -385,7 +385,7 @@ envoy_cmake_external( # using -l:libstdc++.a. "CMAKE_CXX_FLAGS": "-lstdc++ -Wno-unused-command-line-argument", }, - env_vars = { + env = { # Workaround for the -DDEBUG flag added in fastbuild on macOS, # which conflicts with DEBUG macro used in LLVM. "CFLAGS": "-UDEBUG", @@ -393,7 +393,8 @@ envoy_cmake_external( "ASMFLAGS": "-UDEBUG", }, lib_source = "@com_github_wavm_wavm//:all", - static_libraries = select({ + out_binaries = ["wavm"], + out_static_libs = select({ "//conditions:default": [ "libWAVM.a", "libWAVMUnwind.a", @@ -403,7 +404,7 @@ envoy_cmake_external( deps = [":llvm"], ) -envoy_cmake_external( +envoy_cmake( name = "zlib", cache_entries = { "CMAKE_CXX_COMPILER_FORCED": "on", @@ -436,7 +437,7 @@ envoy_cmake_external( "//bazel:zlib_ng": "@com_github_zlib_ng_zlib_ng//:all", "//conditions:default": "@net_zlib//:all", }), - static_libraries = select({ + out_static_libs = select({ "//bazel:windows_x86_64": ["zlib.lib"], "//conditions:default": ["libz.a"], }), diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index f1f6d073f1ceb..853690229f558 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -664,12 +664,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Rules for using foreign build systems in Bazel", project_desc = "Rules for using foreign build systems in Bazel", project_url = "https://github.com/bazelbuild/rules_foreign_cc", - version = "d54c78ab86b40770ee19f0949db9d74a831ab9f0", - sha256 = "e7446144277c9578141821fc91c55a61df7ae01bda890902f7286f5fd2f6ae46", + version = "6c0c2af3d599f4c23117a5e65e811ebab75bb151", + sha256 = "8a438371fa742bbbae8b6d995905280053098c5aac28cd434240cd75bc2415a5", strip_prefix = "rules_foreign_cc-{version}", urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz"], - release_date = "2020-10-26", - use_category = ["build"], + release_date = "2021-09-22", + use_category = ["build", "dataplane_core", "controlplane"], ), rules_python = dict( project_name = "Python rules for Bazel", From e47e5571730f7b8676ba5eb3d0a1d73380199d6d Mon Sep 17 00:00:00 2001 From: danzh Date: Thu, 23 Sep 2021 16:59:43 -0400 Subject: [PATCH 107/121] quiche: support inplace filter chain update (#17988) Commit Message: updateListenerConfig() and onFilterChainDraining() to ActiveListener interface and implement them in ActiveQuicListener. Unblock listener update for UDP listener. Risk Level: high, touches LDS. Testing: new integration tests Fixes #13115 Signed-off-by: Dan Zhang Signed-off-by: gayang --- docs/root/version_history/current.rst | 2 + envoy/network/connection_handler.h | 11 + source/common/quic/BUILD | 1 + source/common/quic/active_quic_listener.cc | 18 ++ source/common/quic/active_quic_listener.h | 5 + .../common/quic/envoy_quic_client_session.h | 3 +- source/common/quic/envoy_quic_dispatcher.cc | 57 +++-- source/common/quic/envoy_quic_dispatcher.h | 7 +- source/common/quic/envoy_quic_proof_source.cc | 9 +- source/common/quic/envoy_quic_proof_source.h | 6 +- .../common/quic/envoy_quic_server_session.cc | 29 ++- .../common/quic/envoy_quic_server_session.h | 28 ++- .../quic_filter_manager_connection_impl.cc | 15 +- .../quic_filter_manager_connection_impl.h | 2 +- source/common/runtime/runtime_features.cc | 1 + source/server/active_stream_listener_base.h | 4 +- source/server/active_tcp_listener.h | 2 +- source/server/active_udp_listener.h | 6 + source/server/connection_handler_impl.cc | 14 +- source/server/listener_impl.cc | 49 +++-- source/server/listener_impl.h | 5 +- test/common/quic/active_quic_listener_test.cc | 6 +- .../common/quic/envoy_quic_dispatcher_test.cc | 51 +++++ .../quic/envoy_quic_proof_source_test.cc | 1 + .../quic/envoy_quic_server_session_test.cc | 3 +- .../integration/quic_http_integration_test.cc | 197 +++++++++++++++++- test/server/connection_handler_test.cc | 2 +- test/server/listener_manager_impl_test.cc | 36 +++- 28 files changed, 486 insertions(+), 84 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index b8fa93fa3613b..32000cdc1e6ae 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -130,6 +130,8 @@ New Features * router: added an optional :ref:`override_auto_sni_header ` to support setting SNI value from an arbitrary header other than host/authority. * sxg_filter: added filter to transform response to SXG package to :ref:`contrib images `. This can be enabled by setting :ref:`SXG ` configuration. * thrift_proxy: added support for :ref:`mirroring requests `. +* udp: allows updating filter chain in-place through LDS, which is supported by Quic listener. Such listener config will be rejected in other connection-less UDP listener implementations. It can be reverted by ``envoy.reloadable_features.udp_listener_updates_filter_chain_in_place``. +* udp: disallow L4 filter chain in config which configures connection-less UDP listener. It can be reverted by ``envoy.reloadable_features.udp_listener_updates_filter_chain_in_place``. Deprecated ---------- diff --git a/envoy/network/connection_handler.h b/envoy/network/connection_handler.h index beabccddf8cf2..5cab5acdc26bc 100644 --- a/envoy/network/connection_handler.h +++ b/envoy/network/connection_handler.h @@ -132,6 +132,17 @@ class ConnectionHandler { * Stop listening according to implementation's own definition. */ virtual void shutdownListener() PURE; + + /** + * Update the listener config. + */ + virtual void updateListenerConfig(Network::ListenerConfig& config) PURE; + + /** + * Called when the given filter chains are about to be removed. + */ + virtual void onFilterChainDraining( + const std::list& draining_filter_chains) PURE; }; using ActiveListenerPtr = std::unique_ptr; diff --git a/source/common/quic/BUILD b/source/common/quic/BUILD index fed592a8ddb11..f1fd86dd5c2b5 100644 --- a/source/common/quic/BUILD +++ b/source/common/quic/BUILD @@ -265,6 +265,7 @@ envoy_cc_library( ":envoy_quic_utils_lib", ":quic_filter_manager_connection_lib", ":quic_stat_names_lib", + ":quic_transport_socket_factory_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/http:codes_lib", diff --git a/source/common/quic/active_quic_listener.cc b/source/common/quic/active_quic_listener.cc index 5bd7651758ada..304ff8bc7d34f 100644 --- a/source/common/quic/active_quic_listener.cc +++ b/source/common/quic/active_quic_listener.cc @@ -221,6 +221,24 @@ size_t ActiveQuicListener::numPacketsExpectedPerEventLoop() const { return quic_dispatcher_->NumSessions() * packets_to_read_to_connection_count_ratio_; } +void ActiveQuicListener::updateListenerConfig(Network::ListenerConfig& config) { + config_ = &config; + dynamic_cast(crypto_config_->proof_source()) + ->updateFilterChainManager(config.filterChainManager()); + quic_dispatcher_->updateListenerConfig(config); +} + +void ActiveQuicListener::onFilterChainDraining( + const std::list& draining_filter_chains) { + for (auto* filter_chain : draining_filter_chains) { + closeConnectionsWithFilterChain(filter_chain); + } +} + +void ActiveQuicListener::closeConnectionsWithFilterChain(const Network::FilterChain* filter_chain) { + quic_dispatcher_->closeConnectionsWithFilterChain(filter_chain); +} + ActiveQuicListenerFactory::ActiveQuicListenerFactory( const envoy::config::listener::v3::QuicProtocolOptions& config, uint32_t concurrency, QuicStatNames& quic_stat_names) diff --git a/source/common/quic/active_quic_listener.h b/source/common/quic/active_quic_listener.h index be72d17334770..eca1f28c0caa8 100644 --- a/source/common/quic/active_quic_listener.h +++ b/source/common/quic/active_quic_listener.h @@ -70,10 +70,15 @@ class ActiveQuicListener : public Envoy::Server::ActiveUdpListenerBase, void pauseListening() override; void resumeListening() override; void shutdownListener() override; + void updateListenerConfig(Network::ListenerConfig& config) override; + void onFilterChainDraining( + const std::list& draining_filter_chains) override; private: friend class ActiveQuicListenerPeer; + void closeConnectionsWithFilterChain(const Network::FilterChain* filter_chain); + uint8_t random_seed_[16]; std::unique_ptr crypto_config_; Event::Dispatcher& dispatcher_; diff --git a/source/common/quic/envoy_quic_client_session.h b/source/common/quic/envoy_quic_client_session.h index def847ea0eb23..a4d650166393f 100644 --- a/source/common/quic/envoy_quic_client_session.h +++ b/source/common/quic/envoy_quic_client_session.h @@ -99,7 +99,8 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, // These callbacks are owned by network filters and quic session should outlive // them. Http::ConnectionCallbacks* http_connection_callbacks_{nullptr}; - const absl::string_view host_name_; + // TODO(danzh) deprecate this field once server_id() is made const. + const std::string host_name_; std::shared_ptr crypto_config_; EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory_; QuicStatNames& quic_stat_names_; diff --git a/source/common/quic/envoy_quic_dispatcher.cc b/source/common/quic/envoy_quic_dispatcher.cc index 217e561e2b65d..37fd7472c9a6a 100644 --- a/source/common/quic/envoy_quic_dispatcher.cc +++ b/source/common/quic/envoy_quic_dispatcher.cc @@ -2,12 +2,14 @@ #include +#include +#include + #include "envoy/common/optref.h" #include "source/common/common/safe_memcpy.h" #include "source/common/http/utility.h" #include "source/common/quic/envoy_quic_server_connection.h" -#include "source/common/quic/envoy_quic_server_session.h" #include "source/common/quic/envoy_quic_utils.h" namespace Envoy { @@ -26,7 +28,7 @@ EnvoyQuicDispatcher::EnvoyQuicDispatcher( : quic::QuicDispatcher(&quic_config, crypto_config, version_manager, std::move(helper), std::make_unique(), std::move(alarm_factory), expected_server_connection_id_length), - connection_handler_(connection_handler), listener_config_(listener_config), + connection_handler_(connection_handler), listener_config_(&listener_config), listener_stats_(listener_stats), per_worker_stats_(per_worker_stats), dispatcher_(dispatcher), listen_socket_(listen_socket), quic_stat_names_(quic_stat_names), crypto_server_stream_factory_(crypto_server_stream_factory) { @@ -52,19 +54,21 @@ void EnvoyQuicDispatcher::OnConnectionClosed(quic::QuicConnectionId connection_i listener_stats_.downstream_cx_active_.dec(); per_worker_stats_.downstream_cx_active_.dec(); connection_handler_.decNumConnections(); - quic_stat_names_.chargeQuicConnectionCloseStats(listener_config_.listenerScope(), error, source, + quic_stat_names_.chargeQuicConnectionCloseStats(listener_config_->listenerScope(), error, source, /*is_upstream*/ false); } std::unique_ptr EnvoyQuicDispatcher::CreateQuicSession( quic::QuicConnectionId server_connection_id, const quic::QuicSocketAddress& self_address, - const quic::QuicSocketAddress& peer_address, absl::string_view alpn, + const quic::QuicSocketAddress& peer_address, absl::string_view /*alpn*/, const quic::ParsedQuicVersion& version, absl::string_view sni) { quic::QuicConfig quic_config = config(); + // TODO(danzh) use passed-in ALPN instead of hard-coded h3 after proof source interfaces takes in + // ALPN. Network::ConnectionSocketPtr connection_socket = createServerConnectionSocket( - listen_socket_.ioHandle(), self_address, peer_address, std::string(sni), alpn); + listen_socket_.ioHandle(), self_address, peer_address, std::string(sni), "h3"); const Network::FilterChain* filter_chain = - listener_config_.filterChainManager().findFilterChain(*connection_socket); + listener_config_->filterChainManager().findFilterChain(*connection_socket); auto quic_connection = std::make_unique( server_connection_id, self_address, peer_address, *helper(), *alarm_factory(), writer(), @@ -72,24 +76,21 @@ std::unique_ptr EnvoyQuicDispatcher::CreateQuicSession( auto quic_session = std::make_unique( quic_config, quic::ParsedQuicVersionVector{version}, std::move(quic_connection), this, session_helper(), crypto_config(), compressed_certs_cache(), dispatcher_, - listener_config_.perConnectionBufferLimitBytes(), quic_stat_names_, - listener_config_.listenerScope(), crypto_server_stream_factory_, - makeOptRefFromPtr(filter_chain == nullptr ? nullptr - : &filter_chain->transportSocketFactory())); + listener_config_->perConnectionBufferLimitBytes(), quic_stat_names_, + listener_config_->listenerScope(), crypto_server_stream_factory_); if (filter_chain != nullptr) { + // Setup filter chain before Initialize(). const bool has_filter_initialized = - listener_config_.filterChainFactory().createNetworkFilterChain( + listener_config_->filterChainFactory().createNetworkFilterChain( *quic_session, filter_chain->networkFilterFactories()); // QUIC listener must have HCM filter configured. Otherwise, stream creation later will fail. ASSERT(has_filter_initialized); + connections_by_filter_chain_[filter_chain].push_front( + std::reference_wrapper(*quic_session)); + quic_session->storeConnectionMapPosition(connections_by_filter_chain_, *filter_chain, + connections_by_filter_chain_[filter_chain].begin()); } quic_session->Initialize(); - // Filter chain can't be retrieved here as self address is unknown at this - // point. - // TODO(danzh): change QUIC interface to pass in self address as it is already - // known. In this way, filter chain can be retrieved at this point. But one - // thing to pay attention is that if the retrieval fails, connection needs to - // be closed, and it should be added to time wait list instead of session map. connection_handler_.incNumConnections(); listener_stats_.downstream_cx_active_.inc(); listener_stats_.downstream_cx_total_.inc(); @@ -107,5 +108,27 @@ quic::QuicConnectionId EnvoyQuicDispatcher::ReplaceLongServerConnectionId( return new_connection_id; } +void EnvoyQuicDispatcher::closeConnectionsWithFilterChain( + const Network::FilterChain* filter_chain) { + auto iter = connections_by_filter_chain_.find(filter_chain); + if (iter != connections_by_filter_chain_.end()) { + std::list>& connections = iter->second; + // Retain the number of connections in the list early because closing the connection will change + // the size. + const size_t num_connections = connections.size(); + for (size_t i = 0; i < num_connections; ++i) { + Network::Connection& connection = connections.front().get(); + // This will remove the connection from the list. And the last removal will remove connections + // from the map as well. + connection.close(Network::ConnectionCloseType::NoFlush); + } + ASSERT(connections_by_filter_chain_.find(filter_chain) == connections_by_filter_chain_.end()); + } +} + +void EnvoyQuicDispatcher::updateListenerConfig(Network::ListenerConfig& new_listener_config) { + listener_config_ = &new_listener_config; +} + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_dispatcher.h b/source/common/quic/envoy_quic_dispatcher.h index b429e908d11be..77ed2ffcb361c 100644 --- a/source/common/quic/envoy_quic_dispatcher.h +++ b/source/common/quic/envoy_quic_dispatcher.h @@ -20,6 +20,7 @@ #include "source/server/connection_handler_impl.h" #include "source/server/active_listener_base.h" #include "source/common/quic/envoy_quic_crypto_stream_factory.h" +#include "source/common/quic/envoy_quic_server_session.h" #include "source/common/quic/quic_stat_names.h" namespace Envoy { @@ -54,6 +55,9 @@ class EnvoyQuicDispatcher : public quic::QuicDispatcher { void OnConnectionClosed(quic::QuicConnectionId connection_id, quic::QuicErrorCode error, const std::string& error_details, quic::ConnectionCloseSource source) override; + void closeConnectionsWithFilterChain(const Network::FilterChain* filter_chain); + + void updateListenerConfig(Network::ListenerConfig& new_listener_config); protected: // quic::QuicDispatcher @@ -72,13 +76,14 @@ class EnvoyQuicDispatcher : public quic::QuicDispatcher { private: Network::ConnectionHandler& connection_handler_; - Network::ListenerConfig& listener_config_; + Network::ListenerConfig* listener_config_{nullptr}; Server::ListenerStats& listener_stats_; Server::PerHandlerListenerStats& per_worker_stats_; Event::Dispatcher& dispatcher_; Network::Socket& listen_socket_; QuicStatNames& quic_stat_names_; EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory_; + FilterChainToConnectionMap connections_by_filter_chain_; }; } // namespace Quic diff --git a/source/common/quic/envoy_quic_proof_source.cc b/source/common/quic/envoy_quic_proof_source.cc index d755be071e9f5..56f21560eb8f2 100644 --- a/source/common/quic/envoy_quic_proof_source.cc +++ b/source/common/quic/envoy_quic_proof_source.cc @@ -103,13 +103,15 @@ EnvoyQuicProofSource::getTlsCertConfigAndFilterChain(const quic::QuicSocketAddre Network::ConnectionSocketPtr connection_socket = createServerConnectionSocket( listen_socket_.ioHandle(), server_address, client_address, hostname, "h3"); const Network::FilterChain* filter_chain = - filter_chain_manager_.findFilterChain(*connection_socket); + filter_chain_manager_->findFilterChain(*connection_socket); if (filter_chain == nullptr) { listener_stats_.no_filter_chain_match_.inc(); ENVOY_LOG(warn, "No matching filter chain found for handshake."); return {absl::nullopt, absl::nullopt}; } + ENVOY_LOG(trace, "Got a matching cert chain {}", filter_chain->name()); + auto& transport_socket_factory = dynamic_cast(filter_chain->transportSocketFactory()); @@ -125,5 +127,10 @@ EnvoyQuicProofSource::getTlsCertConfigAndFilterChain(const quic::QuicSocketAddre return {tls_cert_configs[0].get(), *filter_chain}; } +void EnvoyQuicProofSource::updateFilterChainManager( + Network::FilterChainManager& filter_chain_manager) { + filter_chain_manager_ = &filter_chain_manager; +} + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_proof_source.h b/source/common/quic/envoy_quic_proof_source.h index 69d62fd549184..84668caf4d906 100644 --- a/source/common/quic/envoy_quic_proof_source.h +++ b/source/common/quic/envoy_quic_proof_source.h @@ -14,7 +14,7 @@ class EnvoyQuicProofSource : public EnvoyQuicProofSourceBase { EnvoyQuicProofSource(Network::Socket& listen_socket, Network::FilterChainManager& filter_chain_manager, Server::ListenerStats& listener_stats) - : listen_socket_(listen_socket), filter_chain_manager_(filter_chain_manager), + : listen_socket_(listen_socket), filter_chain_manager_(&filter_chain_manager), listener_stats_(listener_stats) {} ~EnvoyQuicProofSource() override = default; @@ -25,6 +25,8 @@ class EnvoyQuicProofSource : public EnvoyQuicProofSourceBase { const quic::QuicSocketAddress& client_address, const std::string& hostname, bool* cert_matched_sni) override; + void updateFilterChainManager(Network::FilterChainManager& filter_chain_manager); + protected: // quic::ProofSource void signPayload(const quic::QuicSocketAddress& server_address, @@ -44,7 +46,7 @@ class EnvoyQuicProofSource : public EnvoyQuicProofSourceBase { const std::string& hostname); Network::Socket& listen_socket_; - Network::FilterChainManager& filter_chain_manager_; + Network::FilterChainManager* filter_chain_manager_{nullptr}; Server::ListenerStats& listener_stats_; }; diff --git a/source/common/quic/envoy_quic_server_session.cc b/source/common/quic/envoy_quic_server_session.cc index 15d1d28745dd3..fc3fe9b2b938f 100644 --- a/source/common/quic/envoy_quic_server_session.cc +++ b/source/common/quic/envoy_quic_server_session.cc @@ -1,5 +1,6 @@ #include "source/common/quic/envoy_quic_server_session.h" +#include #include #include "source/common/common/assert.h" @@ -15,15 +16,14 @@ EnvoyQuicServerSession::EnvoyQuicServerSession( quic::QuicCryptoServerStream::Helper* helper, const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache, Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, QuicStatNames& quic_stat_names, Stats::Scope& listener_scope, - EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory, - OptRef transport_socket_factory) + EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory) : quic::QuicServerSessionBase(config, supported_versions, connection.get(), visitor, helper, crypto_config, compressed_certs_cache), QuicFilterManagerConnectionImpl(*connection, connection->connection_id(), dispatcher, send_buffer_limit), quic_connection_(std::move(connection)), quic_stat_names_(quic_stat_names), - listener_scope_(listener_scope), crypto_server_stream_factory_(crypto_server_stream_factory), - transport_socket_factory_(transport_socket_factory) {} + listener_scope_(listener_scope), crypto_server_stream_factory_(crypto_server_stream_factory) { +} EnvoyQuicServerSession::~EnvoyQuicServerSession() { ASSERT(!quic_connection_->connected()); @@ -39,7 +39,9 @@ EnvoyQuicServerSession::CreateQuicCryptoServerStream( const quic::QuicCryptoServerConfig* crypto_config, quic::QuicCompressedCertsCache* compressed_certs_cache) { return crypto_server_stream_factory_.createEnvoyQuicCryptoServerStream( - crypto_config, compressed_certs_cache, this, stream_helper(), transport_socket_factory_, + crypto_config, compressed_certs_cache, this, stream_helper(), + makeOptRefFromPtr(position_.has_value() ? &position_->filter_chain_.transportSocketFactory() + : nullptr), dispatcher()); } @@ -89,6 +91,17 @@ void EnvoyQuicServerSession::OnConnectionClosed(const quic::QuicConnectionCloseF quic::ConnectionCloseSource source) { quic::QuicServerSessionBase::OnConnectionClosed(frame, source); onConnectionCloseEvent(frame, source, version()); + if (position_.has_value()) { + // Remove this connection from the map. + std::list>& connections = + position_->connection_map_[&position_->filter_chain_]; + connections.erase(position_->iterator_); + if (connections.empty()) { + // Remove the whole entry if this is the last connection using this filter chain. + position_->connection_map_.erase(&position_->filter_chain_); + } + position_.reset(); + } } void EnvoyQuicServerSession::Initialize() { @@ -133,5 +146,11 @@ void EnvoyQuicServerSession::OnRstStream(const quic::QuicRstStreamFrame& frame) /*from_self*/ false, /*is_upstream*/ false); } +void EnvoyQuicServerSession::storeConnectionMapPosition(FilterChainToConnectionMap& connection_map, + const Network::FilterChain& filter_chain, + ConnectionMapIter position) { + position_.emplace(connection_map, filter_chain, position); +} + } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_server_session.h b/source/common/quic/envoy_quic_server_session.h index a3a804023aef9..313c447101bb4 100644 --- a/source/common/quic/envoy_quic_server_session.h +++ b/source/common/quic/envoy_quic_server_session.h @@ -29,6 +29,25 @@ namespace Envoy { namespace Quic { +using FilterChainToConnectionMap = + absl::flat_hash_map>>; +using ConnectionMapIter = std::list>::iterator; + +// Used to track the matching filter chain and its position in the filter chain to connection map. +struct ConnectionMapPosition { + ConnectionMapPosition(FilterChainToConnectionMap& connection_map, + const Network::FilterChain& filter_chain, ConnectionMapIter iterator) + : connection_map_(connection_map), filter_chain_(filter_chain), iterator_(iterator) {} + + // Stores the map from filter chain of connections. + FilterChainToConnectionMap& connection_map_; + // The matching filter chain of a connection. + const Network::FilterChain& filter_chain_; + // The position of the connection in the map. + ConnectionMapIter iterator_; +}; + // Act as a Network::Connection to HCM and a FilterManager to FilterFactoryCb. // TODO(danzh) Lifetime of quic connection and filter manager connection can be // simplified by changing the inheritance to a member variable instantiated @@ -45,8 +64,7 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, quic::QuicCompressedCertsCache* compressed_certs_cache, Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, QuicStatNames& quic_stat_names, Stats::Scope& listener_scope, - EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory, - OptRef transport_socket_factory); + EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory); ~EnvoyQuicServerSession() override; @@ -77,6 +95,10 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, headers_with_underscores_action_ = headers_with_underscores_action; } + void storeConnectionMapPosition(FilterChainToConnectionMap& connection_map, + const Network::FilterChain& filter_chain, + ConnectionMapIter position); + using quic::QuicSession::PerformActionOnActiveStreams; protected: @@ -113,7 +135,7 @@ class EnvoyQuicServerSession : public quic::QuicServerSessionBase, Stats::Scope& listener_scope_; EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory_; - OptRef transport_socket_factory_; + absl::optional position_; }; } // namespace Quic diff --git a/source/common/quic/quic_filter_manager_connection_impl.cc b/source/common/quic/quic_filter_manager_connection_impl.cc index a2049252c60db..b69a63f2681d1 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.cc +++ b/source/common/quic/quic_filter_manager_connection_impl.cc @@ -12,7 +12,9 @@ QuicFilterManagerConnectionImpl::QuicFilterManagerConnectionImpl( // Using this for purpose other than logging is not safe. Because QUIC connection id can be // 18 bytes, so there might be collision when it's hashed to 8 bytes. : Network::ConnectionImplBase(dispatcher, /*id=*/connection_id.Hash()), - network_connection_(&connection), filter_manager_(*this, *connection.connectionSocket()), + network_connection_(&connection), + filter_manager_( + std::make_unique(*this, *connection.connectionSocket())), stream_info_(dispatcher.timeSource(), connection.connectionSocket()->connectionInfoProviderSharedPtr()), write_buffer_watermark_simulation_( @@ -22,23 +24,23 @@ QuicFilterManagerConnectionImpl::QuicFilterManagerConnectionImpl( } void QuicFilterManagerConnectionImpl::addWriteFilter(Network::WriteFilterSharedPtr filter) { - filter_manager_.addWriteFilter(filter); + filter_manager_->addWriteFilter(filter); } void QuicFilterManagerConnectionImpl::addFilter(Network::FilterSharedPtr filter) { - filter_manager_.addFilter(filter); + filter_manager_->addFilter(filter); } void QuicFilterManagerConnectionImpl::addReadFilter(Network::ReadFilterSharedPtr filter) { - filter_manager_.addReadFilter(filter); + filter_manager_->addReadFilter(filter); } void QuicFilterManagerConnectionImpl::removeReadFilter(Network::ReadFilterSharedPtr filter) { - filter_manager_.removeReadFilter(filter); + filter_manager_->removeReadFilter(filter); } bool QuicFilterManagerConnectionImpl::initializeReadFilters() { - return filter_manager_.initializeReadFilters(); + return filter_manager_->initializeReadFilters(); } void QuicFilterManagerConnectionImpl::enableHalfClose(bool enabled) { @@ -171,6 +173,7 @@ void QuicFilterManagerConnectionImpl::onConnectionCloseEvent( network_connection_ = nullptr; } + filter_manager_ = nullptr; if (!codec_stats_.has_value()) { // The connection was closed before it could be used. Stats are not recorded. return; diff --git a/source/common/quic/quic_filter_manager_connection_impl.h b/source/common/quic/quic_filter_manager_connection_impl.h index f2e112297f828..7003860c80116 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.h +++ b/source/common/quic/quic_filter_manager_connection_impl.h @@ -183,7 +183,7 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, // filters are added, ConnectionManagerImpl should always be the last one. // Its onRead() is only called once to trigger ReadFilter::onNewConnection() // and the rest incoming data bypasses these filters. - Network::FilterManagerImpl filter_manager_; + std::unique_ptr filter_manager_; StreamInfo::StreamInfoImpl stream_info_; std::string transport_failure_reason_; diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 0abb465135964..c60de73836047 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -91,6 +91,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.strip_port_from_connect", "envoy.reloadable_features.treat_host_like_authority", "envoy.reloadable_features.treat_upstream_connect_timeout_as_connect_failure", + "envoy.reloadable_features.udp_listener_updates_filter_chain_in_place", "envoy.reloadable_features.udp_per_event_loop_read_limit", "envoy.reloadable_features.unquote_log_string_values", "envoy.reloadable_features.upstream_host_weight_change_causes_rebuild", diff --git a/source/server/active_stream_listener_base.h b/source/server/active_stream_listener_base.h index 89ebd2877c659..38e50a1a4af58 100644 --- a/source/server/active_stream_listener_base.h +++ b/source/server/active_stream_listener_base.h @@ -38,8 +38,8 @@ class ActiveStreamListenerBase : public ActiveListenerImplBase, * Schedule to remove and destroy the active connections which are not tracked by listener * config. Caution: The connection are not destroyed yet when function returns. */ - void - deferredRemoveFilterChains(const std::list& draining_filter_chains) { + void onFilterChainDraining( + const std::list& draining_filter_chains) override { // Need to recover the original deleting state. const bool was_deleting = is_deleting_; is_deleting_ = true; diff --git a/source/server/active_tcp_listener.h b/source/server/active_tcp_listener.h index 00d93e744a8d7..0d0299a44914f 100644 --- a/source/server/active_tcp_listener.h +++ b/source/server/active_tcp_listener.h @@ -74,7 +74,7 @@ class ActiveTcpListener final : public Network::TcpListenerCallbacks, * Update the listener config. The follow up connections will see the new config. The existing * connections are not impacted. */ - void updateListenerConfig(Network::ListenerConfig& config); + void updateListenerConfig(Network::ListenerConfig& config) override; Network::TcpConnectionHandler& tcp_conn_handler_; // The number of connections currently active on this listener. This is typically used for diff --git a/source/server/active_udp_listener.h b/source/server/active_udp_listener.h index eef7ca228e738..68918ffc39301 100644 --- a/source/server/active_udp_listener.h +++ b/source/server/active_udp_listener.h @@ -103,6 +103,12 @@ class ActiveRawUdpListener : public ActiveUdpListenerBase, read_filter_.reset(); udp_listener_.reset(); } + // These two are unreachable because a config will be rejected if it configures both this listener + // and any L4 filter chain. + void updateListenerConfig(Network::ListenerConfig&) override { NOT_REACHED_GCOVR_EXCL_LINE; } + void onFilterChainDraining(const std::list&) override { + NOT_REACHED_GCOVR_EXCL_LINE; + } // Network::UdpListenerFilterManager void addReadFilter(Network::UdpListenerReadFilterPtr&& filter) override; diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 4f64afc227b06..4bcdcd9cc16aa 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -27,9 +27,19 @@ void ConnectionHandlerImpl::decNumConnections() { void ConnectionHandlerImpl::addListener(absl::optional overridden_listener, Network::ListenerConfig& config) { + const bool support_udp_in_place_filter_chain_update = Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.udp_listener_updates_filter_chain_in_place"); + if (support_udp_in_place_filter_chain_update && overridden_listener.has_value()) { + ActiveListenerDetailsOptRef listener_detail = + findActiveListenerByTag(overridden_listener.value()); + ASSERT(listener_detail.has_value()); + listener_detail->get().listener_->updateListenerConfig(config); + return; + } + ActiveListenerDetails details; if (config.listenSocketFactory().socketType() == Network::Socket::Type::Stream) { - if (overridden_listener.has_value()) { + if (!support_udp_in_place_filter_chain_update && overridden_listener.has_value()) { for (auto& listener : listeners_) { if (listener.second.listener_->listenerTag() == overridden_listener) { listener.second.tcpListener()->get().updateListenerConfig(config); @@ -89,7 +99,7 @@ void ConnectionHandlerImpl::removeFilterChains( std::function completion) { for (auto& listener : listeners_) { if (listener.second.listener_->listenerTag() == listener_tag) { - listener.second.tcpListener()->get().deferredRemoveFilterChains(filter_chains); + listener.second.listener_->onFilterChainDraining(filter_chains); break; } } diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index df53a49ec6535..943ba20d4c18b 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -373,7 +373,7 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, const envoy::config::listener::v3::Listener& config, const std::string& version_info, ListenerManagerImpl& parent, const std::string& name, bool added_via_api, bool workers_started, - uint64_t hash, uint32_t concurrency) + uint64_t hash) : parent_(parent), address_(origin.address_), bind_to_port_(shouldBindToPort(config)), hand_off_restored_destination_connections_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, use_original_dst, false)), @@ -394,6 +394,7 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, listener_filters_timeout_( PROTOBUF_GET_MS_OR_DEFAULT(config, listener_filters_timeout, 15000)), continue_on_listener_filters_timeout_(config.continue_on_listener_filters_timeout()), + udp_listener_config_(origin.udp_listener_config_), connection_balancer_(origin.connection_balancer_), listener_factory_context_(std::make_shared( origin.listener_factory_context_->listener_factory_context_base_, this, *this)), @@ -409,18 +410,18 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, quic_stat_names_(parent_.quicStatNames()) { buildAccessLog(); auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); - // buildUdpListenerFactory() must come before buildListenSocketOptions() because the UDP - // listener factory can provide additional options. - buildUdpListenerFactory(socket_type, concurrency); buildListenSocketOptions(socket_type); createListenerFilterFactories(socket_type); validateFilterChains(socket_type); buildFilterChains(); - // In place update is tcp only so it's safe to apply below tcp only initialization. - buildSocketOptions(); - buildOriginalDstListenerFilter(); - buildProxyProtocolListenerFilter(); - open_connections_ = origin.open_connections_; + + if (socket_type == Network::Socket::Type::Stream) { + // Apply the options below only for TCP. + buildSocketOptions(); + buildOriginalDstListenerFilter(); + buildProxyProtocolListenerFilter(); + open_connections_ = origin.open_connections_; + } } void ListenerImpl::buildAccessLog() { @@ -443,7 +444,7 @@ void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, "set concurrency = 1."); } - udp_listener_config_ = std::make_unique(config_.udp_listener_config()); + udp_listener_config_ = std::make_shared(config_.udp_listener_config()); if (config_.udp_listener_config().has_quic_options()) { #ifdef ENVOY_ENABLE_QUIC if (config_.has_connection_balance_config()) { @@ -554,6 +555,15 @@ void ListenerImpl::validateFilterChains(Network::Socket::Type socket_type) { "specified for connection oriented UDP listener", address_->asString())); } + } else if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.udp_listener_updates_filter_chain_in_place") && + (!config_.filter_chains().empty() || config_.has_default_filter_chain()) && + udp_listener_config_ != nullptr && + udp_listener_config_->listener_factory_->isTransportConnectionless()) { + + throw EnvoyException(fmt::format("error adding listener '{}': {} filter chain(s) specified for " + "connection-less UDP listener.", + address_->asString(), config_.filter_chains_size())); } } @@ -769,11 +779,12 @@ bool ListenerImpl::supportUpdateFilterChain(const envoy::config::listener::v3::L return false; } - // Currently we only support TCP filter chain update. - if (Network::Utility::protobufAddressSocketType(config_.address()) != - Network::Socket::Type::Stream || - Network::Utility::protobufAddressSocketType(config.address()) != - Network::Socket::Type::Stream) { + if (!Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.udp_listener_updates_filter_chain_in_place") && + (Network::Utility::protobufAddressSocketType(config_.address()) != + Network::Socket::Type::Stream || + Network::Utility::protobufAddressSocketType(config.address()) != + Network::Socket::Type::Stream)) { return false; } @@ -795,10 +806,10 @@ ListenerImplPtr ListenerImpl::newListenerWithFilterChain(const envoy::config::listener::v3::Listener& config, bool workers_started, uint64_t hash) { // Use WrapUnique since the constructor is private. - return absl::WrapUnique( - new ListenerImpl(*this, config, version_info_, parent_, name_, added_via_api_, - /* new new workers started state */ workers_started, - /* use new hash */ hash, parent_.server_.options().concurrency())); + return absl::WrapUnique(new ListenerImpl(*this, config, version_info_, parent_, name_, + added_via_api_, + /* new new workers started state */ workers_started, + /* use new hash */ hash)); } void ListenerImpl::diffFilterChain(const ListenerImpl& another_listener, diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index f621731d7d632..46292ec422697 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -365,8 +365,7 @@ class ListenerImpl final : public Network::ListenerConfig, */ ListenerImpl(ListenerImpl& origin, const envoy::config::listener::v3::Listener& config, const std::string& version_info, ListenerManagerImpl& parent, - const std::string& name, bool added_via_api, bool workers_started, uint64_t hash, - uint32_t concurrency); + const std::string& name, bool added_via_api, bool workers_started, uint64_t hash); // Helpers for constructor. void buildAccessLog(); void buildUdpListenerFactory(Network::Socket::Type socket_type, uint32_t concurrency); @@ -413,7 +412,7 @@ class ListenerImpl final : public Network::ListenerConfig, Network::Socket::OptionsSharedPtr listen_socket_options_; const std::chrono::milliseconds listener_filters_timeout_; const bool continue_on_listener_filters_timeout_; - std::unique_ptr udp_listener_config_; + std::shared_ptr udp_listener_config_; Network::ConnectionBalancerSharedPtr connection_balancer_; std::shared_ptr listener_factory_context_; FilterChainManagerImpl filter_chain_manager_; diff --git a/test/common/quic/active_quic_listener_test.cc b/test/common/quic/active_quic_listener_test.cc index 06b49bd5cac6a..9f92852df8769 100644 --- a/test/common/quic/active_quic_listener_test.cc +++ b/test/common/quic/active_quic_listener_test.cc @@ -190,12 +190,12 @@ class ActiveQuicListenerTest : public testing::TestWithParam read_filter(new Network::MockReadFilter()); + Network::MockConnectionCallbacks network_connection_callbacks; + testing::StrictMock read_total; + testing::StrictMock read_current; + testing::StrictMock write_total; + testing::StrictMock write_current; + + std::vector filter_factory( + {[&](Network::FilterManager& filter_manager) { + filter_manager.addReadFilter(read_filter); + read_filter->callbacks_->connection().addConnectionCallbacks(network_connection_callbacks); + read_filter->callbacks_->connection().setConnectionStats( + {read_total, read_current, write_total, write_current, nullptr, nullptr}); + }}); + EXPECT_CALL(listener_config_, filterChainManager()).WillOnce(ReturnRef(filter_chain_manager)); + EXPECT_CALL(filter_chain_manager, findFilterChain(_)) + .WillOnce(Return(&proof_source_->filterChain())); + Network::MockTransportSocketFactory transport_socket_factory; + EXPECT_CALL(proof_source_->filterChain(), transportSocketFactory()) + .WillOnce(ReturnRef(transport_socket_factory)); + EXPECT_CALL(proof_source_->filterChain(), networkFilterFactories()) + .WillOnce(ReturnRef(filter_factory)); + EXPECT_CALL(listener_config_, filterChainFactory()); + EXPECT_CALL(listener_config_.filter_chain_factory_, createNetworkFilterChain(_, _)) + .WillOnce(Invoke([](Network::Connection& connection, + const std::vector& filter_factories) { + EXPECT_EQ(1u, filter_factories.size()); + Server::Configuration::FilterChainUtility::buildFilterChain(connection, filter_factories); + return true; + })); + EXPECT_CALL(*read_filter, onNewConnection()) + // Stop iteration to avoid calling getRead/WriteBuffer(). + .WillOnce(Return(Network::FilterStatus::StopIteration)); + + quic::QuicSocketAddress peer_addr(version_ == Network::Address::IpVersion::v4 + ? quic::QuicIpAddress::Loopback4() + : quic::QuicIpAddress::Loopback6(), + 54321); + // Set QuicDispatcher::new_sessions_allowed_per_event_loop_ to + // |kNumSessionsToCreatePerLoopForTests| so that received CHLOs can be + // processed immediately. + envoy_quic_dispatcher_.ProcessBufferedChlos(kNumSessionsToCreatePerLoopForTests); + + processValidChloPacket(peer_addr); + + EXPECT_CALL(network_connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); + envoy_quic_dispatcher_.closeConnectionsWithFilterChain(&proof_source_->filterChain()); +} + } // namespace Quic } // namespace Envoy diff --git a/test/common/quic/envoy_quic_proof_source_test.cc b/test/common/quic/envoy_quic_proof_source_test.cc index a2deedcb29d16..9ef2e3fe54706 100644 --- a/test/common/quic/envoy_quic_proof_source_test.cc +++ b/test/common/quic/envoy_quic_proof_source_test.cc @@ -144,6 +144,7 @@ class EnvoyQuicProofSourceTest : public ::testing::Test { listener_config_.listenerScope(), std::unique_ptr(mock_context_config_)); transport_socket_factory_->initialize(); + EXPECT_CALL(filter_chain_, name()).WillRepeatedly(Return("")); } void expectCertChainAndPrivateKey(const std::string& cert, bool expect_private_key) { diff --git a/test/common/quic/envoy_quic_server_session_test.cc b/test/common/quic/envoy_quic_server_session_test.cc index 8ac32ca898135..a8bcadbaf4b17 100644 --- a/test/common/quic/envoy_quic_server_session_test.cc +++ b/test/common/quic/envoy_quic_server_session_test.cc @@ -164,8 +164,7 @@ class EnvoyQuicServerSessionTest : public testing::Test { &compressed_certs_cache_, *dispatcher_, /*send_buffer_limit*/ quic::kDefaultFlowControlSendWindow * 1.5, quic_stat_names_, listener_config_.listenerScope(), - crypto_stream_factory_, - makeOptRefFromPtr(nullptr)), + crypto_stream_factory_), stats_({ALL_HTTP3_CODEC_STATS( POOL_COUNTER_PREFIX(listener_config_.listenerScope(), "http3."), POOL_GAUGE_PREFIX(listener_config_.listenerScope(), "http3."))}) { diff --git a/test/integration/quic_http_integration_test.cc b/test/integration/quic_http_integration_test.cc index 5fe18fe47eadb..13d2741bdb4f7 100644 --- a/test/integration/quic_http_integration_test.cc +++ b/test/integration/quic_http_integration_test.cc @@ -87,6 +87,12 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, uint32_t port, const Network::ConnectionSocket::OptionsSharedPtr& options) override { // Setting socket options is not supported. ASSERT(!options); + return makeClientConnectionWithHost(port, ""); + } + + Network::ClientConnectionPtr makeClientConnectionWithHost(uint32_t port, + const std::string& host) { + // Setting socket options is not supported. server_addr_ = Network::Utility::resolveUrl( fmt::format("udp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); Network::Address::InstanceConstSharedPtr local_addr = @@ -103,8 +109,9 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, auto& persistent_info = static_cast(*quic_connection_persistent_info_); auto session = std::make_unique( persistent_info.quic_config_, supported_versions_, std::move(connection), - persistent_info.server_id_, persistent_info.cryptoConfig(), &push_promise_index_, - *dispatcher_, + (host.empty() ? persistent_info.server_id_ + : quic::QuicServerId{host, static_cast(port), false}), + persistent_info.cryptoConfig(), &push_promise_index_, *dispatcher_, // Use smaller window than the default one to have test coverage of client codec buffer // exceeding high watermark. /*send_buffer_limit=*/2 * Http2::Utility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE, @@ -119,8 +126,6 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, HttpIntegrationTest::makeRawHttpConnection(std::move(conn), http2_options); if (!codec->disconnected()) { codec->setCodecClientCallbacks(client_codec_callback_); - EXPECT_EQ(transport_socket_factory_->clientContextConfig().serverNameIndication(), - codec->connection()->requestedServerName()); } return codec; } @@ -240,6 +245,8 @@ TEST_P(QuicHttpIntegrationTest, ZeroRtt) { initialize(); // Start the first connection. codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + EXPECT_EQ(transport_socket_factory_->clientContextConfig().serverNameIndication(), + codec_client_->connection()->requestedServerName()); // Send a complete request on the first connection. auto response1 = codec_client_->makeHeaderOnlyRequest(default_request_headers_); waitForNextUpstreamRequest(0); @@ -436,5 +443,187 @@ TEST_P(QuicHttpIntegrationTest, ResetRequestWithInvalidCharacter) { ASSERT_TRUE(response->waitForReset()); } +class QuicInplaceLdsIntegrationTest : public QuicHttpIntegrationTest { +public: + void inplaceInitialize(bool add_default_filter_chain = false) { + autonomous_upstream_ = true; + setUpstreamCount(2); + + config_helper_.addConfigModifier([add_default_filter_chain]( + envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* filter_chain_0 = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_filter_chains(0); + *filter_chain_0->mutable_filter_chain_match()->mutable_server_names()->Add() = "www.lyft.com"; + auto* filter_chain_1 = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_filter_chains() + ->Add(); + filter_chain_1->MergeFrom(*filter_chain_0); + + // filter chain 1 route to cluster_1 + *filter_chain_1->mutable_filter_chain_match()->mutable_server_names(0) = "lyft.com"; + + filter_chain_0->set_name("filter_chain_0"); + filter_chain_1->set_name("filter_chain_1"); + + auto* config_blob = filter_chain_1->mutable_filters(0)->mutable_typed_config(); + + ASSERT_TRUE(config_blob->Is()); + auto hcm_config = MessageUtil::anyConvert< + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>( + *config_blob); + hcm_config.mutable_route_config() + ->mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->set_cluster("cluster_1"); + config_blob->PackFrom(hcm_config); + bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( + *bootstrap.mutable_static_resources()->mutable_clusters(0)); + bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name("cluster_1"); + + if (add_default_filter_chain) { + auto default_filter_chain = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_default_filter_chain(); + default_filter_chain->MergeFrom(*filter_chain_0); + default_filter_chain->set_name("filter_chain_default"); + } + }); + + QuicHttpIntegrationTest::initialize(); + } + + void makeRequestAndWaitForResponse(IntegrationCodecClient& codec_client) { + IntegrationStreamDecoderPtr response = + codec_client.makeHeaderOnlyRequest(default_request_headers_); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_FALSE(codec_client.sawGoAway()); + } +}; + +INSTANTIATE_TEST_SUITE_P(QuicHttpIntegrationTests, QuicInplaceLdsIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +TEST_P(QuicInplaceLdsIntegrationTest, ReloadConfigUpdateNonDefaultFilterChain) { + inplaceInitialize(/*add_default_filter_chain=*/false); + + auto codec_client_0 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "www.lyft.com")); + auto codec_client_1 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "lyft.com")); + + // Remove filter_chain_1. + ConfigHelper new_config_helper( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->RemoveLast(); + }); + + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + makeRequestAndWaitForResponse(*codec_client_0); + EXPECT_TRUE(codec_client_1->sawGoAway()); + codec_client_1->close(); + + auto codec_client_2 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "www.lyft.com")); + makeRequestAndWaitForResponse(*codec_client_2); + codec_client_2->close(); + + // Update filter chain again to add back filter_chain_1. + config_helper_.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 2); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 3); + + auto codec_client_3 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "lyft.com")); + makeRequestAndWaitForResponse(*codec_client_3); + makeRequestAndWaitForResponse(*codec_client_0); + codec_client_0->close(); + codec_client_3->close(); +} + +// Verify that the connection received GO_AWAY after its filter chain gets deleted during the +// listener update. +TEST_P(QuicInplaceLdsIntegrationTest, ReloadConfigUpdateDefaultFilterChain) { + inplaceInitialize(/*add_default_filter_chain=*/true); + + auto codec_client_0 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "www.lyft.com")); + + // Remove filter_chain_1. + ConfigHelper new_config_helper( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->RemoveLast(); + }); + + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + // This connection should pick up the default filter chain. + auto codec_client_default = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "lyft.com")); + makeRequestAndWaitForResponse(*codec_client_default); + makeRequestAndWaitForResponse(*codec_client_0); + + // Modify the default filter chain. + ConfigHelper new_config_helper1( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(new_config_helper.bootstrap())); + new_config_helper1.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) + -> void { + auto default_filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_default_filter_chain(); + default_filter_chain->set_name("default_filter_chain_v3"); + }); + + new_config_helper1.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 2); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + + makeRequestAndWaitForResponse(*codec_client_0); + EXPECT_TRUE(codec_client_default->sawGoAway()); + codec_client_default->close(); + + // This connection should pick up the new default filter chain. + auto codec_client_1 = + makeHttpConnection(makeClientConnectionWithHost(lookupPort("http"), "lyft.com")); + makeRequestAndWaitForResponse(*codec_client_1); + + // Remove the default filter chain. + ConfigHelper new_config_helper2( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(new_config_helper1.bootstrap())); + new_config_helper2.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->clear_default_filter_chain(); + }); + + new_config_helper2.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 3); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); + test_server_->waitForGaugeEq("listener_manager.total_filter_chains_draining", 0); + + makeRequestAndWaitForResponse(*codec_client_0); + codec_client_0->close(); + EXPECT_TRUE(codec_client_1->sawGoAway()); + codec_client_1->close(); +} + } // namespace Quic } // namespace Envoy diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index 442c38adc9999..5de495694bfc1 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -216,13 +216,13 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggablesocket_factory_, socketType()).WillOnce(Return(socket_type)); if (listener == nullptr) { // Expecting listener config in place update. // If so, dispatcher would not create new network listener. return listeners_.back().get(); } + EXPECT_CALL(listeners_.back()->socket_factory_, socketType()).WillOnce(Return(socket_type)); EXPECT_CALL(listeners_.back()->socket_factory_, getListenSocket(_)) .WillOnce(Return(listeners_.back()->socket_)); if (socket_type == Network::Socket::Type::Stream) { diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 43f0e9b0269e7..8bf8e13880543 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -297,7 +297,6 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) { port_value: 1234 } } - filter_chains: {} )EOF"; envoy::config::listener::v3::Listener listener_proto; EXPECT_TRUE(Protobuf::TextFormat::ParseFromString(proto_text, &listener_proto)); @@ -389,6 +388,22 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, BadFilterConfig) { EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), EnvoyException, "foo: Cannot find field"); } + +TEST_F(ListenerManagerImplWithRealFiltersTest, BadConnectionLessUdpConfigWithFilterChain) { + const std::string yaml = R"EOF( +address: + socket_address: + protocol: UDP + address: 127.0.0.1 + port_value: 1234 +filter_chains: {} + )EOF"; + + EXPECT_THROW_WITH_REGEX(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true), + EnvoyException, + "1 filter chain\\(s\\) specified for connection-less UDP listener"); +} + class NonTerminalFilterFactory : public Configuration::NamedNetworkFilterConfigFactory { public: // Configuration::NamedNetworkFilterConfigFactory @@ -5102,9 +5117,9 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfWo EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); } -// This case also verifies that listeners that share port but do not share socket type (TCP vs. UDP) +// This case verifies that listeners that share port but do not share socket type (TCP vs. UDP) // do not share a listener. -TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAnyListenerIsNotTcp) { +TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfDifferentSocketType) { EXPECT_CALL(*worker_, start(_, _)); manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); @@ -5117,12 +5132,15 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAn auto new_listener_proto = listener_proto; new_listener_proto.mutable_address()->mutable_socket_address()->set_protocol( envoy::config::core::v3::SocketAddress_Protocol::SocketAddress_Protocol_UDP); + EXPECT_CALL(server_.validation_context_, staticValidationVisitor()).Times(0); + EXPECT_CALL(server_.validation_context_, dynamicValidationVisitor()); + EXPECT_CALL(listener_factory_, createDrainManager_(_)); + EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(new_listener_proto, "", true), + EnvoyException, + "error adding listener '127.0.0.1:1234': 1 filter chain(s) specified " + "for connection-less UDP listener."); - ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); - expectUpdateToThenDrain(new_listener_proto, listener_foo, OptRef(), - ListenerComponentFactory::BindType::ReusePort); - expectRemove(new_listener_proto, listener_foo_update1, *listener_factory_.socket_); - + expectRemove(new_listener_proto, listener_foo, *listener_factory_.socket_); EXPECT_EQ(0UL, manager_->listeners().size()); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); } @@ -5204,8 +5222,6 @@ TEST_F(ListenerManagerImplTest, UdpDefaultWriterConfig) { address: 127.0.0.1 protocol: UDP port_value: 1234 -filter_chains: - filters: [] )EOF"); manager_->addOrUpdateListener(listener, "", true); EXPECT_EQ(1U, manager_->listeners().size()); From 8ea36c442c018ce2f2b7095866b256608a6ba13d Mon Sep 17 00:00:00 2001 From: zirain Date: Fri, 24 Sep 2021 09:44:16 +0800 Subject: [PATCH 108/121] add ratelimit log (#17902) Signed-off-by: zirain Signed-off-by: gayang --- source/extensions/filters/common/ratelimit/ratelimit_impl.cc | 4 +++- source/extensions/filters/http/ratelimit/ratelimit.cc | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index fbb8858dfd2fe..599442401f464 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -110,9 +110,11 @@ void GrpcClientImpl::onSuccess( callbacks_ = nullptr; } -void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string&, +void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string& msg, Tracing::Span&) { ASSERT(status != Grpc::Status::WellKnownGrpcStatus::Ok); + ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::filter), debug, + "rate limit fail, status={} msg={}", status, msg); callbacks_->complete(LimitStatus::Error, nullptr, nullptr, nullptr, EMPTY_STRING, nullptr); callbacks_ = nullptr; } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 538c4f7425c04..c007471d7779c 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -159,6 +159,8 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, cluster_->statsScope().counterFromStatName(stat_names.ok_).inc(); break; case Filters::Common::RateLimit::LimitStatus::Error: + ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::filter), debug, + "rate limit status, status={}", status); cluster_->statsScope().counterFromStatName(stat_names.error_).inc(); break; case Filters::Common::RateLimit::LimitStatus::OverLimit: From 5388ccb5e7ce6751d179eeb3e1eea306e523c776 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Thu, 23 Sep 2021 18:52:30 -0700 Subject: [PATCH 109/121] dns cache: upgrade event (#18247) Signed-off-by: Jose Nino Signed-off-by: gayang --- .../extensions/common/dynamic_forward_proxy/dns_cache_impl.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index db0b51873e2fe..17e6f2b0a7cc8 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -282,7 +282,8 @@ void DnsCacheImpl::forceRefreshHosts() { if (primary_host.second->active_query_ == nullptr) { ASSERT(!primary_host.second->timeout_timer_->enabled()); primary_host.second->refresh_timer_->enableTimer(std::chrono::milliseconds(0), nullptr); - ENVOY_LOG(debug, "force refreshing host='{}'", primary_host.first); + ENVOY_LOG_EVENT(debug, "force_refresh_host", "force refreshing host='{}'", + primary_host.first); } } } From 90d7e99914d2ae55246cf4e4de06c433a74b3a19 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Thu, 23 Sep 2021 22:15:14 -0400 Subject: [PATCH 110/121] network: set failure reason for transport socket connect timeout (#18244) Signed-off-by: Snow Pettersen Signed-off-by: gayang --- source/common/network/connection_impl.cc | 1 + test/common/network/connection_impl_test.cc | 1 + 2 files changed, 2 insertions(+) diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index b5d3472ad8f57..edbd5adbe34a8 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -827,6 +827,7 @@ void ServerConnectionImpl::onTransportSocketConnectTimeout() { stream_info_.setConnectionTerminationDetails(kTransportSocketConnectTimeoutTerminationDetails); closeConnectionImmediately(); transport_socket_timeout_stat_->inc(); + failure_reason_ = "connect timeout"; } ClientConnectionImpl::ClientConnectionImpl( diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index a7169ee003047..b666ded243ef4 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -419,6 +419,7 @@ TEST_P(ConnectionImplTest, SetServerTransportSocketTimeout) { mock_timer->invokeCallback(); EXPECT_THAT(stream_info_.connectionTerminationDetails(), Optional(HasSubstr("transport socket timeout"))); + EXPECT_EQ(server_connection->transportFailureReason(), "connect timeout"); } TEST_P(ConnectionImplTest, SetServerTransportSocketTimeoutAfterConnect) { From 6914d2ddb35c0763bd85ffd9b751469a44a83a32 Mon Sep 17 00:00:00 2001 From: Yao Zengzeng Date: Fri, 24 Sep 2021 10:15:48 +0800 Subject: [PATCH 111/121] log: make log of listener filters consistent. (#18232) Signed-off-by: YaoZengzeng Signed-off-by: gayang --- source/extensions/filters/listener/original_dst/original_dst.cc | 2 +- .../filters/listener/proxy_protocol/proxy_protocol.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/extensions/filters/listener/original_dst/original_dst.cc b/source/extensions/filters/listener/original_dst/original_dst.cc index ccb0f93a5ff06..9364b55c79765 100644 --- a/source/extensions/filters/listener/original_dst/original_dst.cc +++ b/source/extensions/filters/listener/original_dst/original_dst.cc @@ -17,7 +17,7 @@ Network::Address::InstanceConstSharedPtr OriginalDstFilter::getOriginalDst(Netwo } Network::FilterStatus OriginalDstFilter::onAccept(Network::ListenerFilterCallbacks& cb) { - ENVOY_LOG(debug, "original_dst: New connection accepted"); + ENVOY_LOG(debug, "original_dst: new connection accepted"); Network::ConnectionSocket& socket = cb.socket(); if (socket.addressType() == Network::Address::Type::Ip) { diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index d19be984e0fda..aed47748e9aed 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -64,7 +64,7 @@ const KeyValuePair* Config::isTlvTypeNeeded(uint8_t type) const { size_t Config::numberOfNeededTlvTypes() const { return tlv_types_.size(); } Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { - ENVOY_LOG(debug, "proxy_protocol: New connection accepted"); + ENVOY_LOG(debug, "proxy_protocol: new connection accepted"); Network::ConnectionSocket& socket = cb.socket(); socket.ioHandle().initializeFileEvent( cb.dispatcher(), From e094ee9ed6b1743b8e4315e024a206c989b33ee8 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Fri, 24 Sep 2021 10:16:23 +0800 Subject: [PATCH 112/121] test: moving redirect extensions test out of core (#18214) Signed-off-by: Alyssa Wilk Signed-off-by: gayang --- .../allow_listed_routes/BUILD | 4 - .../internal_redirect/previous_routes/BUILD | 4 - .../internal_redirect/safe_cross_scheme/BUILD | 4 - test/extensions/internal_redirect/BUILD | 29 ++ .../redirect_extension_integration_test.cc | 299 ++++++++++++++++++ test/integration/BUILD | 6 - test/integration/redirect_integration_test.cc | 204 ------------ 7 files changed, 328 insertions(+), 222 deletions(-) create mode 100644 test/extensions/internal_redirect/BUILD create mode 100644 test/extensions/internal_redirect/redirect_extension_integration_test.cc diff --git a/source/extensions/internal_redirect/allow_listed_routes/BUILD b/source/extensions/internal_redirect/allow_listed_routes/BUILD index bb6a1f6091dbd..a149d39d42959 100644 --- a/source/extensions/internal_redirect/allow_listed_routes/BUILD +++ b/source/extensions/internal_redirect/allow_listed_routes/BUILD @@ -23,10 +23,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up by moving the redirect test to extensions. - extra_visibility = [ - "//test/integration:__subpackages__", - ], deps = [ ":allow_listed_routes_lib", "//envoy/registry", diff --git a/source/extensions/internal_redirect/previous_routes/BUILD b/source/extensions/internal_redirect/previous_routes/BUILD index 969d1ac9a13b6..d7f98d55be9dc 100644 --- a/source/extensions/internal_redirect/previous_routes/BUILD +++ b/source/extensions/internal_redirect/previous_routes/BUILD @@ -23,10 +23,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up by moving the redirect test to extensions. - extra_visibility = [ - "//test/integration:__subpackages__", - ], deps = [ ":previous_routes_lib", "//envoy/registry", diff --git a/source/extensions/internal_redirect/safe_cross_scheme/BUILD b/source/extensions/internal_redirect/safe_cross_scheme/BUILD index bc464c310b148..7afb838b8ebe2 100644 --- a/source/extensions/internal_redirect/safe_cross_scheme/BUILD +++ b/source/extensions/internal_redirect/safe_cross_scheme/BUILD @@ -22,10 +22,6 @@ envoy_cc_extension( name = "config", srcs = ["config.cc"], hdrs = ["config.h"], - # TODO(#9953) clean up by moving the redirect test to extensions. - extra_visibility = [ - "//test/integration:__subpackages__", - ], deps = [ ":safe_cross_scheme_lib", "//envoy/registry", diff --git a/test/extensions/internal_redirect/BUILD b/test/extensions/internal_redirect/BUILD new file mode 100644 index 0000000000000..61bfc13947665 --- /dev/null +++ b/test/extensions/internal_redirect/BUILD @@ -0,0 +1,29 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "redirect_extension_integration_test", + srcs = [ + "redirect_extension_integration_test.cc", + ], + deps = [ + "//source/common/http:header_map_lib", + "//source/extensions/internal_redirect/allow_listed_routes:config", + "//source/extensions/internal_redirect/previous_routes:config", + "//source/extensions/internal_redirect/safe_cross_scheme:config", + "//test/integration:http_protocol_integration_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/route/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/internal_redirect/redirect_extension_integration_test.cc b/test/extensions/internal_redirect/redirect_extension_integration_test.cc new file mode 100644 index 0000000000000..3666eb7104ebb --- /dev/null +++ b/test/extensions/internal_redirect/redirect_extension_integration_test.cc @@ -0,0 +1,299 @@ +#include "envoy/config/route/v3/route_components.pb.h" +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" +#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" +#include "envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h" + +#include "test/integration/http_protocol_integration.h" + +namespace Envoy { + +using ::testing::HasSubstr; + +namespace { +constexpr char kTestHeaderKey[] = "test-header"; +} // namespace + +class RedirectExtensionIntegrationTest : public HttpProtocolIntegrationTest { +public: + void initialize() override { + setMaxRequestHeadersKb(60); + setMaxRequestHeadersCount(100); + envoy::config::route::v3::RetryPolicy retry_policy; + + auto pass_through = config_helper_.createVirtualHost("pass.through.internal.redirect"); + config_helper_.addVirtualHost(pass_through); + + auto handle = config_helper_.createVirtualHost("handle.internal.redirect"); + handle.mutable_routes(0)->set_name("redirect"); + handle.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy(); + config_helper_.addVirtualHost(handle); + + auto handle_max_3_hop = + config_helper_.createVirtualHost("handle.internal.redirect.max.three.hop"); + handle_max_3_hop.mutable_routes(0)->set_name("max_three_hop"); + handle_max_3_hop.mutable_routes(0)->mutable_route()->mutable_internal_redirect_policy(); + handle_max_3_hop.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy() + ->mutable_max_internal_redirects() + ->set_value(3); + config_helper_.addVirtualHost(handle_max_3_hop); + + HttpProtocolIntegrationTest::initialize(); + } + +protected: + // Returns the next stream that the fake upstream receives. + FakeStreamPtr waitForNextStream() { + FakeStreamPtr new_stream = nullptr; + auto wait_new_stream_fn = [this, + &new_stream](FakeHttpConnectionPtr& connection) -> AssertionResult { + AssertionResult result = + connection->waitForNewStream(*dispatcher_, new_stream, std::chrono::milliseconds(50)); + if (result) { + ASSERT(new_stream); + } + return result; + }; + + // Using a while loop to poll for new connections and new streams on all + // connections because connection reuse may or may not be triggered. + while (new_stream == nullptr) { + FakeHttpConnectionPtr new_connection = nullptr; + AssertionResult result = fake_upstreams_[0]->waitForHttpConnection( + *dispatcher_, new_connection, std::chrono::milliseconds(50)); + if (result) { + ASSERT(new_connection); + upstream_connections_.push_back(std::move(new_connection)); + } + + for (auto& connection : upstream_connections_) { + result = wait_new_stream_fn(connection); + if (result) { + break; + } + } + } + + AssertionResult result = new_stream->waitForEndStream(*dispatcher_); + ASSERT(result); + return new_stream; + } + + Http::TestResponseHeaderMapImpl redirect_response_{{":status", "302"}, + {"content-length", "0"}, + {"location", "http://authority2/new/url"}, + // Test header added to confirm that response + // headers are populated for internal redirects + {kTestHeaderKey, "test-header-value"}}; + Envoy::Http::LowerCaseString test_header_key_{kTestHeaderKey}; + std::vector upstream_connections_; +}; + +TEST_P(RedirectExtensionIntegrationTest, InternalRedirectPreventedByPreviousRoutesPredicate) { + useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); + auto handle_prevent_repeated_target = + config_helper_.createVirtualHost("handle.internal.redirect.no.repeated.target"); + auto* internal_redirect_policy = handle_prevent_repeated_target.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig + previous_routes_config; + auto* predicate = internal_redirect_policy->add_predicates(); + predicate->set_name("previous_routes"); + predicate->mutable_typed_config()->PackFrom(previous_routes_config); + config_helper_.addVirtualHost(handle_prevent_repeated_target); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect.no.repeated.target"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation("http://handle.internal.redirect.no.repeated.target/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to the same route as the first redirect. This should fail. + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("http://handle.internal.redirect.max.three.hop/yet/another/path", + response->headers().getLocationValue()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_, 0), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 1), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 2), + HasSubstr("302 via_upstream test-header-value\n")); + EXPECT_EQ("test-header-value", + response->headers().get(test_header_key_)[0]->value().getStringView()); +} + +TEST_P(RedirectExtensionIntegrationTest, InternalRedirectPreventedByAllowListedRoutesPredicate) { + useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); + auto handle_allow_listed_redirect_route = + config_helper_.createVirtualHost("handle.internal.redirect.only.allow.listed.target"); + auto* internal_redirect_policy = handle_allow_listed_redirect_route.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + + auto* allow_listed_routes_predicate = internal_redirect_policy->add_predicates(); + allow_listed_routes_predicate->set_name("allow_listed_routes"); + envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig + allow_listed_routes_config; + *allow_listed_routes_config.add_allowed_route_names() = "max_three_hop"; + allow_listed_routes_predicate->mutable_typed_config()->PackFrom(allow_listed_routes_config); + + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + + config_helper_.addVirtualHost(handle_allow_listed_redirect_route); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost("handle.internal.redirect.only.allow.listed.target"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation( + "http://handle.internal.redirect.only.allow.listed.target/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to the non-allow-listed route. This should fail. + redirect_response_.setLocation("http://handle.internal.redirect/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("http://handle.internal.redirect/yet/another/path", + response->headers().getLocationValue()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_, 0), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 1), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 2), + HasSubstr("302 via_upstream test-header-value\n")); + EXPECT_EQ("test-header-value", + response->headers().get(test_header_key_)[0]->value().getStringView()); +} + +TEST_P(RedirectExtensionIntegrationTest, InternalRedirectPreventedBySafeCrossSchemePredicate) { + useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); + auto handle_safe_cross_scheme_route = config_helper_.createVirtualHost( + "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); + auto* internal_redirect_policy = handle_safe_cross_scheme_route.mutable_routes(0) + ->mutable_route() + ->mutable_internal_redirect_policy(); + + internal_redirect_policy->set_allow_cross_scheme_redirect(true); + + auto* predicate = internal_redirect_policy->add_predicates(); + predicate->set_name("safe_cross_scheme_predicate"); + envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig + predicate_config; + predicate->mutable_typed_config()->PackFrom(predicate_config); + + internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); + + config_helper_.addVirtualHost(handle_safe_cross_scheme_route); + + // Validate that header sanitization is only called once. + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { hcm.set_via("via_value"); }); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + default_request_headers_.setHost( + "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); + IntegrationStreamDecoderPtr response = + codec_client_->makeHeaderOnlyRequest(default_request_headers_); + + auto first_request = waitForNextStream(); + // Redirect to another route + redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); + first_request->encodeHeaders(redirect_response_, true); + + auto second_request = waitForNextStream(); + // Redirect back to the original route. + redirect_response_.setLocation( + "http://handle.internal.redirect.only.allow.safe.cross.scheme.redirect/another/path"); + second_request->encodeHeaders(redirect_response_, true); + + auto third_request = waitForNextStream(); + // Redirect to https target. This should fail. + redirect_response_.setLocation("https://handle.internal.redirect/yet/another/path"); + third_request->encodeHeaders(redirect_response_, true); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("302", response->headers().getStatusValue()); + EXPECT_EQ("https://handle.internal.redirect/yet/another/path", + response->headers().getLocationValue()); + EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") + ->value()); + EXPECT_EQ( + 1, + test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); + EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); + EXPECT_THAT(waitForAccessLog(access_log_name_, 0), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 1), + HasSubstr("302 internal_redirect test-header-value\n")); + EXPECT_THAT(waitForAccessLog(access_log_name_, 2), + HasSubstr("302 via_upstream test-header-value\n")); + EXPECT_EQ("test-header-value", + response->headers().get(test_header_key_)[0]->value().getStringView()); +} + +INSTANTIATE_TEST_SUITE_P(Protocols, RedirectExtensionIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +} // namespace Envoy diff --git a/test/integration/BUILD b/test/integration/BUILD index 060cda5cdb94c..efa5e78a9abd7 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -969,15 +969,9 @@ envoy_cc_test( deps = [ ":http_protocol_integration_lib", "//source/common/http:header_map_lib", - "//source/extensions/internal_redirect/allow_listed_routes:config", - "//source/extensions/internal_redirect/previous_routes:config", - "//source/extensions/internal_redirect/safe_cross_scheme:config", "//test/test_common:utility_lib", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/internal_redirect/previous_routes/v3:pkg_cc_proto", - "@envoy_api//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg_cc_proto", ], ) diff --git a/test/integration/redirect_integration_test.cc b/test/integration/redirect_integration_test.cc index 6c9f983438f28..a88b83a1917b4 100644 --- a/test/integration/redirect_integration_test.cc +++ b/test/integration/redirect_integration_test.cc @@ -1,8 +1,5 @@ #include "envoy/config/route/v3/route_components.pb.h" #include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" -#include "envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.pb.h" -#include "envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.pb.h" -#include "envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.pb.h" #include "test/integration/http_protocol_integration.h" @@ -578,207 +575,6 @@ TEST_P(RedirectIntegrationTest, InternalRedirectToDestinationWithResponseBody) { EXPECT_THAT(waitForAccessLog(access_log_name_, 1), HasSubstr("200 via_upstream -\n")); } -TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByPreviousRoutesPredicate) { - useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); - auto handle_prevent_repeated_target = - config_helper_.createVirtualHost("handle.internal.redirect.no.repeated.target"); - auto* internal_redirect_policy = handle_prevent_repeated_target.mutable_routes(0) - ->mutable_route() - ->mutable_internal_redirect_policy(); - internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); - envoy::extensions::internal_redirect::previous_routes::v3::PreviousRoutesConfig - previous_routes_config; - auto* predicate = internal_redirect_policy->add_predicates(); - predicate->set_name("previous_routes"); - predicate->mutable_typed_config()->PackFrom(previous_routes_config); - config_helper_.addVirtualHost(handle_prevent_repeated_target); - - // Validate that header sanitization is only called once. - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { hcm.set_via("via_value"); }); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - - default_request_headers_.setHost("handle.internal.redirect.no.repeated.target"); - IntegrationStreamDecoderPtr response = - codec_client_->makeHeaderOnlyRequest(default_request_headers_); - - auto first_request = waitForNextStream(); - // Redirect to another route - redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); - first_request->encodeHeaders(redirect_response_, true); - - auto second_request = waitForNextStream(); - // Redirect back to the original route. - redirect_response_.setLocation("http://handle.internal.redirect.no.repeated.target/another/path"); - second_request->encodeHeaders(redirect_response_, true); - - auto third_request = waitForNextStream(); - // Redirect to the same route as the first redirect. This should fail. - redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/yet/another/path"); - third_request->encodeHeaders(redirect_response_, true); - - ASSERT_TRUE(response->waitForEndStream()); - ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().getStatusValue()); - EXPECT_EQ("http://handle.internal.redirect.max.three.hop/yet/another/path", - response->headers().getLocationValue()); - EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") - ->value()); - EXPECT_EQ( - 1, - test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_, 0), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 1), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 2), - HasSubstr("302 via_upstream test-header-value\n")); - EXPECT_EQ("test-header-value", - response->headers().get(test_header_key_)[0]->value().getStringView()); -} - -TEST_P(RedirectIntegrationTest, InternalRedirectPreventedByAllowListedRoutesPredicate) { - useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); - auto handle_allow_listed_redirect_route = - config_helper_.createVirtualHost("handle.internal.redirect.only.allow.listed.target"); - auto* internal_redirect_policy = handle_allow_listed_redirect_route.mutable_routes(0) - ->mutable_route() - ->mutable_internal_redirect_policy(); - - auto* allow_listed_routes_predicate = internal_redirect_policy->add_predicates(); - allow_listed_routes_predicate->set_name("allow_listed_routes"); - envoy::extensions::internal_redirect::allow_listed_routes::v3::AllowListedRoutesConfig - allow_listed_routes_config; - *allow_listed_routes_config.add_allowed_route_names() = "max_three_hop"; - allow_listed_routes_predicate->mutable_typed_config()->PackFrom(allow_listed_routes_config); - - internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); - - config_helper_.addVirtualHost(handle_allow_listed_redirect_route); - - // Validate that header sanitization is only called once. - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { hcm.set_via("via_value"); }); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - - default_request_headers_.setHost("handle.internal.redirect.only.allow.listed.target"); - IntegrationStreamDecoderPtr response = - codec_client_->makeHeaderOnlyRequest(default_request_headers_); - - auto first_request = waitForNextStream(); - // Redirect to another route - redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); - first_request->encodeHeaders(redirect_response_, true); - - auto second_request = waitForNextStream(); - // Redirect back to the original route. - redirect_response_.setLocation( - "http://handle.internal.redirect.only.allow.listed.target/another/path"); - second_request->encodeHeaders(redirect_response_, true); - - auto third_request = waitForNextStream(); - // Redirect to the non-allow-listed route. This should fail. - redirect_response_.setLocation("http://handle.internal.redirect/yet/another/path"); - third_request->encodeHeaders(redirect_response_, true); - - ASSERT_TRUE(response->waitForEndStream()); - ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().getStatusValue()); - EXPECT_EQ("http://handle.internal.redirect/yet/another/path", - response->headers().getLocationValue()); - EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") - ->value()); - EXPECT_EQ( - 1, - test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_, 0), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 1), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 2), - HasSubstr("302 via_upstream test-header-value\n")); - EXPECT_EQ("test-header-value", - response->headers().get(test_header_key_)[0]->value().getStringView()); -} - -TEST_P(RedirectIntegrationTest, InternalRedirectPreventedBySafeCrossSchemePredicate) { - useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); - auto handle_safe_cross_scheme_route = config_helper_.createVirtualHost( - "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); - auto* internal_redirect_policy = handle_safe_cross_scheme_route.mutable_routes(0) - ->mutable_route() - ->mutable_internal_redirect_policy(); - - internal_redirect_policy->set_allow_cross_scheme_redirect(true); - - auto* predicate = internal_redirect_policy->add_predicates(); - predicate->set_name("safe_cross_scheme_predicate"); - envoy::extensions::internal_redirect::safe_cross_scheme::v3::SafeCrossSchemeConfig - predicate_config; - predicate->mutable_typed_config()->PackFrom(predicate_config); - - internal_redirect_policy->mutable_max_internal_redirects()->set_value(10); - - config_helper_.addVirtualHost(handle_safe_cross_scheme_route); - - // Validate that header sanitization is only called once. - config_helper_.addConfigModifier( - [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - hcm) { hcm.set_via("via_value"); }); - initialize(); - - codec_client_ = makeHttpConnection(lookupPort("http")); - - default_request_headers_.setHost( - "handle.internal.redirect.only.allow.safe.cross.scheme.redirect"); - IntegrationStreamDecoderPtr response = - codec_client_->makeHeaderOnlyRequest(default_request_headers_); - - auto first_request = waitForNextStream(); - // Redirect to another route - redirect_response_.setLocation("http://handle.internal.redirect.max.three.hop/random/path"); - first_request->encodeHeaders(redirect_response_, true); - - auto second_request = waitForNextStream(); - // Redirect back to the original route. - redirect_response_.setLocation( - "http://handle.internal.redirect.only.allow.safe.cross.scheme.redirect/another/path"); - second_request->encodeHeaders(redirect_response_, true); - - auto third_request = waitForNextStream(); - // Redirect to https target. This should fail. - redirect_response_.setLocation("https://handle.internal.redirect/yet/another/path"); - third_request->encodeHeaders(redirect_response_, true); - - ASSERT_TRUE(response->waitForEndStream()); - ASSERT_TRUE(response->complete()); - EXPECT_EQ("302", response->headers().getStatusValue()); - EXPECT_EQ("https://handle.internal.redirect/yet/another/path", - response->headers().getLocationValue()); - EXPECT_EQ(2, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_succeeded_total") - ->value()); - EXPECT_EQ( - 1, - test_server_->counter("http.config_test.passthrough_internal_redirect_predicate")->value()); - EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); - EXPECT_THAT(waitForAccessLog(access_log_name_, 0), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 1), - HasSubstr("302 internal_redirect test-header-value\n")); - EXPECT_THAT(waitForAccessLog(access_log_name_, 2), - HasSubstr("302 via_upstream test-header-value\n")); - EXPECT_EQ("test-header-value", - response->headers().get(test_header_key_)[0]->value().getStringView()); -} - TEST_P(RedirectIntegrationTest, InvalidRedirect) { useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); initialize(); From 0281416f4fca86c7ea153dfdd6e7af820ca321b3 Mon Sep 17 00:00:00 2001 From: John Esmet Date: Fri, 24 Sep 2021 00:12:17 -0400 Subject: [PATCH 113/121] ext_authz: add dynamic_metadata_matchers and use them in the ext_authz HTTP implementation (#17895) This allows for dynamic metadata when using an HTTP authorization service. Currently, it is only possible to set dynamic metadata using a gRPC authorization service. Risk Level: low, new opt-in feature in an extension Testing: unit tests Docs Changes: proto definitions documented Release Notes: ext_authz: added :ref:dynamic_metadata_from_headers to support emitting dynamic metadata from headers returned by an external authorization service via HTTP. Signed-off-by: John Esmet Signed-off-by: gayang --- .../filters/http/ext_authz/v3/ext_authz.proto | 10 ++++ .../http/http_filters/ext_authz_filter.rst | 15 ++--- docs/root/version_history/current.rst | 1 + .../common/ext_authz/ext_authz_http_impl.cc | 41 +++++++++++--- .../common/ext_authz/ext_authz_http_impl.h | 8 +++ .../ext_authz/ext_authz_http_impl_test.cc | 56 +++++++++++++++++++ 6 files changed, 115 insertions(+), 16 deletions(-) diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 62feb51b191d5..b05420fa93cf4 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -244,6 +244,7 @@ message AuthorizationRequest { repeated config.core.v3.HeaderValue headers_to_add = 2; } +// [#next-free-field: 6] message AuthorizationResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.AuthorizationResponse"; @@ -270,6 +271,15 @@ message AuthorizationResponse { // the authorization response itself is successful, i.e. not failed or denied. When this list is // *not* set, no additional headers will be added to the client's response on success. type.matcher.v3.ListStringMatcher allowed_client_headers_on_success = 4; + + // When this :ref:`list ` is set, authorization + // response headers that have a correspondent match will be emitted as dynamic metadata to be consumed + // by the next filter. This metadata lives in a namespace specified by the canonical name of extension filter + // that requires it: + // + // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. + // - :ref:`envoy.filters.network.ext_authz ` for network filter. + type.matcher.v3.ListStringMatcher dynamic_metadata_from_headers = 5; } // Extra settings on a per virtualhost/route/weighted-cluster level. diff --git a/docs/root/configuration/http/http_filters/ext_authz_filter.rst b/docs/root/configuration/http/http_filters/ext_authz_filter.rst index 94414daa0e285..232093c7c8ee3 100644 --- a/docs/root/configuration/http/http_filters/ext_authz_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_authz_filter.rst @@ -179,16 +179,17 @@ Dynamic Metadata ---------------- .. _config_http_filters_ext_authz_dynamic_metadata: -.. note:: - - The External Authorization filter emits dynamic metadata only when it is configured to use - gRPC service as the authorization server. +The External Authorization filter supports emitting dynamic metadata as an opaque ``google.protobuf.Struct``. -The External Authorization filter emits dynamic metadata as an opaque ``google.protobuf.Struct`` -*only* when the gRPC authorization server returns a :ref:`CheckResponse -` with a filled :ref:`dynamic_metadata +When using a gRPC authorization server, dynamic metadata will be emitted only when the :ref:`CheckResponse +` contains a filled :ref:`dynamic_metadata ` field. +When using an HTTP authorization server, dynamic metadata will be emitted only when there are response headers +from the authorization server that match the configured +:ref:`dynamic_metadata_from_headers `, +if set. For every response header that matches, the filter will emit dynamic metadata whose key is the name of the matched header and whose value is the value of the matched header. + Runtime ------- The fraction of requests for which the filter is enabled can be configured via the :ref:`runtime_key diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 32000cdc1e6ae..8a3a2d6a1f08c 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -105,6 +105,7 @@ New Features * bootstrap: added :ref:`inline_headers ` in the bootstrap to make custom inline headers bootstrap configurable. * contrib: added new :ref:`contrib images ` which contain contrib extensions. * dns: added :ref:`V4_PREFERRED ` option to return V6 addresses only if V4 addresses are not available. +* ext_authz: added :ref:`dynamic_metadata_from_headers ` to support emitting dynamic metadata from headers returned by an external authorization service via HTTP. * grpc reverse bridge: added a new :ref:`option ` to support streaming response bodies when withholding gRPC frames from the upstream. * http: added cluster_header in :ref:`weighted_clusters ` to allow routing to the weighted cluster specified in the request_header. * http: added :ref:`alternate_protocols_cache_options ` for enabling HTTP/3 connections to servers which advertise HTTP/3 support via `HTTP Alternative Services `_. diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc index 388936a823e43..3cf233d661642 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.cc @@ -47,9 +47,12 @@ const Response& errorResponse() { struct SuccessResponse { SuccessResponse(const Http::HeaderMap& headers, const MatcherSharedPtr& matchers, const MatcherSharedPtr& append_matchers, - const MatcherSharedPtr& response_matchers, Response&& response) + const MatcherSharedPtr& response_matchers, + const MatcherSharedPtr& dynamic_metadata_matchers, Response&& response) : headers_(headers), matchers_(matchers), append_matchers_(append_matchers), - response_matchers_(response_matchers), response_(std::make_unique(response)) { + response_matchers_(response_matchers), + to_dynamic_metadata_matchers_(dynamic_metadata_matchers), + response_(std::make_unique(response)) { headers_.iterate([this](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { // UpstreamHeaderMatcher if (matchers_->matches(header.key().getStringView())) { @@ -74,14 +77,21 @@ struct SuccessResponse { Http::LowerCaseString{std::string(header.key().getStringView())}, std::string(header.value().getStringView())); } + if (to_dynamic_metadata_matchers_->matches(header.key().getStringView())) { + const std::string key{header.key().getStringView()}; + const std::string value{header.value().getStringView()}; + (*response_->dynamic_metadata.mutable_fields())[key] = ValueUtil::stringValue(value); + } return Http::HeaderMap::Iterate::Continue; }); } const Http::HeaderMap& headers_; + // All matchers below are used on headers_. const MatcherSharedPtr& matchers_; const MatcherSharedPtr& append_matchers_; const MatcherSharedPtr& response_matchers_; + const MatcherSharedPtr& to_dynamic_metadata_matchers_; ResponsePtr response_; }; @@ -121,6 +131,8 @@ ClientConfig::ClientConfig(const envoy::extensions::filters::http::ext_authz::v3 config.http_service().authorization_response().allowed_client_headers())), client_header_on_success_matchers_(toClientMatchersOnSuccess( config.http_service().authorization_response().allowed_client_headers_on_success())), + to_dynamic_metadata_matchers_(toDynamicMetadataMatchers( + config.http_service().authorization_response().dynamic_metadata_from_headers())), upstream_header_matchers_(toUpstreamMatchers( config.http_service().authorization_response().allowed_upstream_headers())), upstream_header_to_append_matchers_(toUpstreamMatchers( @@ -155,6 +167,12 @@ ClientConfig::toClientMatchersOnSuccess(const envoy::type::matcher::v3::ListStri return std::make_shared(std::move(matchers)); } +MatcherSharedPtr +ClientConfig::toDynamicMetadataMatchers(const envoy::type::matcher::v3::ListStringMatcher& list) { + std::vector matchers(createStringMatchers(list)); + return std::make_shared(std::move(matchers)); +} + MatcherSharedPtr ClientConfig::toClientMatchers(const envoy::type::matcher::v3::ListStringMatcher& list) { std::vector matchers(createStringMatchers(list)); @@ -327,19 +345,24 @@ ResponsePtr RawHttpClientImpl::toResponse(Http::ResponseMessagePtr message) { // Create an Ok authorization response. if (status_code == enumToInt(Http::Code::OK)) { - SuccessResponse ok{ - message->headers(), config_->upstreamHeaderMatchers(), - config_->upstreamHeaderToAppendMatchers(), config_->clientHeaderOnSuccessMatchers(), - Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, - Http::HeaderVector{}, Http::HeaderVector{}, std::move(headers_to_remove), - EMPTY_STRING, Http::Code::OK, ProtobufWkt::Struct{}}}; + SuccessResponse ok{message->headers(), + config_->upstreamHeaderMatchers(), + config_->upstreamHeaderToAppendMatchers(), + config_->clientHeaderOnSuccessMatchers(), + config_->dynamicMetadataMatchers(), + Response{CheckStatus::OK, Http::HeaderVector{}, Http::HeaderVector{}, + Http::HeaderVector{}, Http::HeaderVector{}, Http::HeaderVector{}, + std::move(headers_to_remove), EMPTY_STRING, Http::Code::OK, + ProtobufWkt::Struct{}}}; return std::move(ok.response_); } // Create a Denied authorization response. - SuccessResponse denied{message->headers(), config_->clientHeaderMatchers(), + SuccessResponse denied{message->headers(), + config_->clientHeaderMatchers(), config_->upstreamHeaderToAppendMatchers(), config_->clientHeaderOnSuccessMatchers(), + config_->dynamicMetadataMatchers(), Response{CheckStatus::Denied, Http::HeaderVector{}, Http::HeaderVector{}, diff --git a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h index ab13c2b494fa8..a517bef9480de 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_http_impl.h @@ -99,6 +99,11 @@ class ClientConfig { return client_header_on_success_matchers_; } + /** + * Returns a list of matchers used for selecting the headers to emit as dynamic metadata. + */ + const MatcherSharedPtr& dynamicMetadataMatchers() const { return to_dynamic_metadata_matchers_; } + /** * Returns a list of matchers used for selecting the authorization response headers that * should be send to an the upstream server. @@ -131,11 +136,14 @@ class ClientConfig { static MatcherSharedPtr toClientMatchersOnSuccess(const envoy::type::matcher::v3::ListStringMatcher& list); static MatcherSharedPtr + toDynamicMetadataMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); + static MatcherSharedPtr toUpstreamMatchers(const envoy::type::matcher::v3::ListStringMatcher& list); const MatcherSharedPtr request_header_matchers_; const MatcherSharedPtr client_header_matchers_; const MatcherSharedPtr client_header_on_success_matchers_; + const MatcherSharedPtr to_dynamic_metadata_matchers_; const MatcherSharedPtr upstream_header_matchers_; const MatcherSharedPtr upstream_header_to_append_matchers_; const std::string cluster_name_; diff --git a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc index 46bc2bfd7da28..27a9a70b45ad7 100644 --- a/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc +++ b/test/extensions/filters/common/ext_authz/ext_authz_http_impl_test.cc @@ -102,6 +102,52 @@ class ExtAuthzHttpClientTest : public testing::Test { return std::make_shared(proto_config, timeout, path_prefix); } + void dynamicMetadataTest(CheckStatus status, const std::string& http_status) { + const std::string yaml = R"EOF( + http_service: + server_uri: + uri: "ext_authz:9000" + cluster: "ext_authz" + timeout: 0.25s + authorization_response: + dynamic_metadata_from_headers: + patterns: + - prefix: "X-Metadata-" + ignore_case: true + failure_mode_allow: true + )EOF"; + + initialize(yaml); + envoy::service::auth::v3::CheckRequest request; + client_->check(request_callbacks_, request, parent_span_, stream_info_); + + ProtobufWkt::Struct expected_dynamic_metadata; + auto* metadata_fields = expected_dynamic_metadata.mutable_fields(); + (*metadata_fields)["x-metadata-header-0"] = ValueUtil::stringValue("zero"); + (*metadata_fields)["x-metadata-header-1"] = ValueUtil::stringValue("2"); + (*metadata_fields)["x-metadata-header-2"] = ValueUtil::stringValue("4"); + + // When we call onSuccess() at the bottom of the test we expect that all the + // dynamic metadata values that we set above to be present in the authz Response + // below. + Response authz_response; + authz_response.status = status; + authz_response.dynamic_metadata = expected_dynamic_metadata; + EXPECT_CALL(request_callbacks_, onComplete_(WhenDynamicCastTo( + AuthzResponseNoAttributes(authz_response)))); + + const HeaderValueOptionVector http_response_headers = TestCommon::makeHeaderValueOption({ + {":status", http_status, false}, + {"bar", "nope", false}, + {"x-metadata-header-0", "zero", false}, + {"x-metadata-header-1", "2", false}, + {"x-foo", "nah", false}, + {"x-metadata-header-2", "4", false}, + }); + Http::ResponseMessagePtr http_response = TestCommon::makeMessageResponse(http_response_headers); + client_->onSuccess(async_request_, std::move(http_response)); + } + Http::RequestMessagePtr sendRequest(absl::node_hash_map&& headers) { envoy::service::auth::v3::CheckRequest request{}; auto mutable_headers = @@ -420,6 +466,16 @@ TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithHeadersToRemove) { client_->onSuccess(async_request_, std::move(http_response)); } +// Test the client when an OK response is received with dynamic metadata in that OK response. +TEST_F(ExtAuthzHttpClientTest, AuthorizationOkWithDynamicMetadata) { + dynamicMetadataTest(CheckStatus::OK, "200"); +} + +// Test the client when a denied response is received with dynamic metadata in the denied response. +TEST_F(ExtAuthzHttpClientTest, AuthorizationDeniedWithDynamicMetadata) { + dynamicMetadataTest(CheckStatus::Denied, "403"); +} + // Test the client when a denied response is received. TEST_F(ExtAuthzHttpClientTest, AuthorizationDenied) { const auto expected_headers = TestCommon::makeHeaderValueOption({{":status", "403", false}}); From d6c3c2f8504d34eb9e4c3f356789897badaf8f21 Mon Sep 17 00:00:00 2001 From: Rohit Agrawal Date: Fri, 24 Sep 2021 11:22:09 -0400 Subject: [PATCH 114/121] proto: add a new field called append_action in the HeaderValueOption (#18246) Signed-off-by: Rohit Agrawal Signed-off-by: gayang --- api/envoy/config/core/v3/base.proto | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto index d6c507b8dec9a..dcfc660dd0287 100644 --- a/api/envoy/config/core/v3/base.proto +++ b/api/envoy/config/core/v3/base.proto @@ -320,12 +320,33 @@ message HeaderValueOption { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValueOption"; + // Describes the supported actions types for header append action. + enum HeaderAppendAction { + // This action will append the specified value to the existing values if the header + // already exists. If the header doesn't exist then this will add the header with + // specified key and value. + APPEND_IF_EXISTS_OR_ADD = 0; + + // This action will add the header if it doesn't already exist. If the header + // already exists then this will be a no-op. + ADD_IF_ABSENT = 1; + + // This action will overwrite the specified value by discarding any existing values if + // the header already exists. If the header doesn't exist then this will add the header + // with specified key and value. + OVERWRITE_IF_EXISTS_OR_ADD = 2; + } + // Header name/value pair that this option applies to. HeaderValue header = 1 [(validate.rules).message = {required: true}]; // Should the value be appended? If true (default), the value is appended to // existing values. Otherwise it replaces any existing values. google.protobuf.BoolValue append = 2; + + // [#not-implemented-hide:] Describes the action taken to append/overwrite the given value for an existing header + // or to only add this header if it's absent. Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD`. + HeaderAppendAction append_action = 3 [(validate.rules).enum = {defined_only: true}]; } // Wrapper for a set of headers. From 13a1beba653502606ef637794e2f2d6689df45ad Mon Sep 17 00:00:00 2001 From: Xin Date: Fri, 24 Sep 2021 11:29:54 -0400 Subject: [PATCH 115/121] Reuse the ConfigImpl held by RdsRouteConfigProviderImpl in SRDS (#18241) Signed-off-by: Xin Zhuang Signed-off-by: gayang --- source/common/router/scoped_rds.cc | 11 ++++------- source/common/router/scoped_rds.h | 3 +-- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/source/common/router/scoped_rds.cc b/source/common/router/scoped_rds.cc index bbf3bec0e716c..d863f9db6a5f5 100644 --- a/source/common/router/scoped_rds.cc +++ b/source/common/router/scoped_rds.cc @@ -196,7 +196,7 @@ void ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::initRdsConfigPro rds_update_callback_handle_ = route_provider_->subscription().addUpdateCallback([this]() { // Subscribe to RDS update. - parent_.onRdsConfigUpdate(scope_name_, route_provider_->subscription()); + parent_.onRdsConfigUpdate(scope_name_, route_provider_->config()); }); parent_.stats_.active_scopes_.inc(); } @@ -231,7 +231,7 @@ void ScopedRdsConfigSubscription::RdsRouteConfigProviderHelper::maybeInitRdsConf return; } // If RouteConfiguration has been initialized, apply update to all the threads. - parent_.onRdsConfigUpdate(scope_name_, route_provider_->subscription()); + parent_.onRdsConfigUpdate(scope_name_, route_provider_->config()); } bool ScopedRdsConfigSubscription::addOrUpdateScopes( @@ -393,16 +393,13 @@ void ScopedRdsConfigSubscription::onConfigUpdate( } void ScopedRdsConfigSubscription::onRdsConfigUpdate(const std::string& scope_name, - RdsRouteConfigSubscription& rds_subscription) { + ConfigConstSharedPtr new_rds_config) { auto iter = scoped_route_map_.find(scope_name); ASSERT(iter != scoped_route_map_.end(), fmt::format("trying to update route config for non-existing scope {}", scope_name)); auto new_scoped_route_info = std::make_shared( envoy::config::route::v3::ScopedRouteConfiguration(iter->second->configProto()), - std::make_shared( - rds_subscription.routeConfigUpdate()->protobufConfiguration(), optional_http_filters_, - factory_context_, factory_context_.messageValidationContext().dynamicValidationVisitor(), - false)); + std::move(new_rds_config)); applyConfigUpdate([new_scoped_route_info](ConfigProvider::ConfigConstSharedPtr config) -> ConfigProvider::ConfigConstSharedPtr { auto* thread_local_scoped_config = diff --git a/source/common/router/scoped_rds.h b/source/common/router/scoped_rds.h index b0e5690bee8e8..d21d812741e3c 100644 --- a/source/common/router/scoped_rds.h +++ b/source/common/router/scoped_rds.h @@ -217,8 +217,7 @@ class ScopedRdsConfigSubscription DeltaConfigSubscriptionInstance::onConfigUpdateFailed(); } // Propagate RDS updates to ScopeConfigImpl in workers. - void onRdsConfigUpdate(const std::string& scope_name, - RdsRouteConfigSubscription& rds_subscription); + void onRdsConfigUpdate(const std::string& scope_name, ConfigConstSharedPtr new_rds_config); // ScopedRouteInfo by scope name. ScopedRouteMap scoped_route_map_; From 513c2add5a8aa75ce094a1abadddc975ce9b1eee Mon Sep 17 00:00:00 2001 From: Greg Brail Date: Fri, 24 Sep 2021 09:03:27 -0700 Subject: [PATCH 116/121] ext_proc: Cache the gRPC client (#18166) Signed-off-by: Gregory Brail Signed-off-by: gayang --- .../filters/http/ext_proc/client_impl.cc | 11 ++++---- .../filters/http/ext_proc/client_impl.h | 4 ++- .../filters/http/ext_proc/ext_proc.cc | 25 +++++++++++++++---- .../filters/http/ext_proc/ext_proc.h | 1 + .../filters/http/ext_proc/client_test.cc | 16 +++++------- 5 files changed, 36 insertions(+), 21 deletions(-) diff --git a/source/extensions/filters/http/ext_proc/client_impl.cc b/source/extensions/filters/http/ext_proc/client_impl.cc index d8834c8a827a4..ee11422627036 100644 --- a/source/extensions/filters/http/ext_proc/client_impl.cc +++ b/source/extensions/filters/http/ext_proc/client_impl.cc @@ -10,14 +10,14 @@ static constexpr char kExternalMethod[] = ExternalProcessorClientImpl::ExternalProcessorClientImpl( Grpc::AsyncClientManager& client_manager, - const envoy::config::core::v3::GrpcService& grpc_service, Stats::Scope& scope) { - factory_ = client_manager.factoryForGrpcService(grpc_service, scope, true); -} + const envoy::config::core::v3::GrpcService& grpc_service, Stats::Scope& scope) + : client_manager_(client_manager), grpc_service_(grpc_service), scope_(scope) {} ExternalProcessorStreamPtr ExternalProcessorClientImpl::start(ExternalProcessorCallbacks& callbacks) { Grpc::AsyncClient grpcClient( - factory_->createUncachedRawAsyncClient()); + client_manager_.getOrCreateRawAsyncClient(grpc_service_, scope_, true, + Grpc::CacheOption::AlwaysCache)); return std::make_unique(std::move(grpcClient), callbacks); } @@ -39,8 +39,9 @@ void ExternalProcessorStreamImpl::send( bool ExternalProcessorStreamImpl::close() { if (!stream_closed_) { ENVOY_LOG(debug, "Closing gRPC stream"); - stream_->closeStream(); + stream_.closeStream(); stream_closed_ = true; + stream_.resetStream(); return true; } return false; diff --git a/source/extensions/filters/http/ext_proc/client_impl.h b/source/extensions/filters/http/ext_proc/client_impl.h index fdce71ed1a173..0b21e15ac7522 100644 --- a/source/extensions/filters/http/ext_proc/client_impl.h +++ b/source/extensions/filters/http/ext_proc/client_impl.h @@ -30,7 +30,9 @@ class ExternalProcessorClientImpl : public ExternalProcessorClient { ExternalProcessorStreamPtr start(ExternalProcessorCallbacks& callbacks) override; private: - Grpc::AsyncClientFactoryPtr factory_; + Grpc::AsyncClientManager& client_manager_; + const envoy::config::core::v3::GrpcService grpc_service_; + Stats::Scope& scope_; }; class ExternalProcessorStreamImpl : public ExternalProcessorStream, diff --git a/source/extensions/filters/http/ext_proc/ext_proc.cc b/source/extensions/filters/http/ext_proc/ext_proc.cc index e04a8cf857014..3bc4696fe0a04 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.cc +++ b/source/extensions/filters/http/ext_proc/ext_proc.cc @@ -65,18 +65,26 @@ Filter::StreamOpenState Filter::openStream() { return StreamOpenState::Ok; } -void Filter::onDestroy() { - ENVOY_LOG(trace, "onDestroy"); - // Make doubly-sure we no longer use the stream, as - // per the filter contract. - processing_complete_ = true; +void Filter::closeStream() { if (stream_) { + ENVOY_LOG(debug, "Calling close on stream"); if (stream_->close()) { stats_.streams_closed_.inc(); } + stream_.reset(); + } else { + ENVOY_LOG(debug, "Stream already closed"); } } +void Filter::onDestroy() { + ENVOY_LOG(debug, "onDestroy"); + // Make doubly-sure we no longer use the stream, as + // per the filter contract. + processing_complete_ = true; + closeStream(); +} + FilterHeadersStatus Filter::onHeaders(ProcessorState& state, Http::RequestOrResponseHeaderMap& headers, bool end_stream) { switch (openStream()) { @@ -478,7 +486,9 @@ void Filter::onReceiveMessage(std::unique_ptr&& r) { case ProcessingResponse::ResponseCase::kImmediateResponse: // We won't be sending anything more to the stream after we // receive this message. + ENVOY_LOG(debug, "Sending immediate response"); processing_complete_ = true; + closeStream(); cleanUpTimers(); sendImmediateResponse(response->immediate_response()); message_handled = true; @@ -499,6 +509,7 @@ void Filter::onReceiveMessage(std::unique_ptr&& r) { // to protect us from a malformed server. ENVOY_LOG(warn, "Spurious response message {} received on gRPC stream", response->response_case()); + closeStream(); clearAsyncState(); processing_complete_ = true; } @@ -519,6 +530,7 @@ void Filter::onGrpcError(Grpc::Status::GrpcStatus status) { } else { processing_complete_ = true; + closeStream(); // Since the stream failed, there is no need to handle timeouts, so // make sure that they do not fire now. cleanUpTimers(); @@ -535,6 +547,7 @@ void Filter::onGrpcClose() { stats_.streams_closed_.inc(); // Successful close. We can ignore the stream for the rest of our request // and response processing. + closeStream(); clearAsyncState(); } @@ -547,12 +560,14 @@ void Filter::onMessageTimeout() { // and we can't wait any more. So, as we do for a spurious message, ignore // the external processor for the rest of the request. processing_complete_ = true; + closeStream(); stats_.failure_mode_allowed_.inc(); clearAsyncState(); } else { // Return an error and stop processing the current stream. processing_complete_ = true; + closeStream(); decoding_state_.setCallbackState(ProcessorState::CallbackState::Idle); encoding_state_.setCallbackState(ProcessorState::CallbackState::Idle); ImmediateResponse errorResponse; diff --git a/source/extensions/filters/http/ext_proc/ext_proc.h b/source/extensions/filters/http/ext_proc/ext_proc.h index ca6c87060dd82..f1fb8994f502e 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.h +++ b/source/extensions/filters/http/ext_proc/ext_proc.h @@ -149,6 +149,7 @@ class Filter : public Logger::Loggable, private: void mergePerRouteConfig(); StreamOpenState openStream(); + void closeStream(); void cleanUpTimers(); void clearAsyncState(); diff --git a/test/extensions/filters/http/ext_proc/client_test.cc b/test/extensions/filters/http/ext_proc/client_test.cc index 07f0ecbb3bc2d..ed42463ecb5d8 100644 --- a/test/extensions/filters/http/ext_proc/client_test.cc +++ b/test/extensions/filters/http/ext_proc/client_test.cc @@ -32,21 +32,14 @@ class ExtProcStreamTest : public testing::Test, public ExternalProcessorCallback auto grpc = service.mutable_envoy_grpc(); grpc->set_cluster_name("test"); - EXPECT_CALL(client_manager_, factoryForGrpcService(_, _, _)) + EXPECT_CALL(client_manager_, getOrCreateRawAsyncClient(_, _, _, _)) .WillOnce(Invoke(this, &ExtProcStreamTest::doFactory)); client_ = std::make_unique(client_manager_, service, stats_store_); } - Grpc::AsyncClientFactoryPtr doFactory(Unused, Unused, Unused) { - auto factory = std::make_unique(); - EXPECT_CALL(*factory, createUncachedRawAsyncClient()) - .WillOnce(Invoke(this, &ExtProcStreamTest::doCreate)); - return factory; - } - - Grpc::RawAsyncClientPtr doCreate() { - auto async_client = std::make_unique(); + Grpc::RawAsyncClientSharedPtr doFactory(Unused, Unused, Unused, Unused) { + auto async_client = std::make_shared(); EXPECT_CALL(*async_client, startRaw("envoy.service.ext_proc.v3alpha.ExternalProcessor", "Process", _, _)) .WillOnce(Invoke(this, &ExtProcStreamTest::doStartRaw)); @@ -83,6 +76,7 @@ class ExtProcStreamTest : public testing::Test, public ExternalProcessorCallback TEST_F(ExtProcStreamTest, OpenCloseStream) { auto stream = client_->start(*this); EXPECT_CALL(stream_, closeStream()); + EXPECT_CALL(stream_, resetStream()); stream->close(); } @@ -93,6 +87,7 @@ TEST_F(ExtProcStreamTest, SendToStream) { ProcessingRequest req; stream->send(std::move(req), false); EXPECT_CALL(stream_, closeStream()); + EXPECT_CALL(stream_, resetStream()); stream->close(); } @@ -129,6 +124,7 @@ TEST_F(ExtProcStreamTest, ReceiveFromStream) { stream_callbacks_->onReceiveTrailingMetadata(std::move(empty_response_trailers)); EXPECT_CALL(stream_, closeStream()); + EXPECT_CALL(stream_, resetStream()); stream->close(); } From cd72c55f16b7f0046103dc26eb97fd0d78452214 Mon Sep 17 00:00:00 2001 From: code Date: Sat, 25 Sep 2021 00:16:14 +0800 Subject: [PATCH 117/121] fix possible use-after-free introduced by the cross-priority host map updates in the thread aware lb (#18253) Signed-off-by: wbpcode Signed-off-by: gayang --- .../common/upstream/thread_aware_lb_impl.cc | 10 ++--- source/common/upstream/thread_aware_lb_impl.h | 45 +++++++------------ test/common/upstream/maglev_lb_test.cc | 13 ++++++ test/common/upstream/ring_hash_lb_test.cc | 13 ++++++ test/config/utility.cc | 14 +++--- test/config/utility.h | 5 ++- test/integration/cds_integration_test.cc | 34 ++++++++++++++ 7 files changed, 90 insertions(+), 44 deletions(-) diff --git a/source/common/upstream/thread_aware_lb_impl.cc b/source/common/upstream/thread_aware_lb_impl.cc index 8da946d77d980..771da91b7f2bf 100644 --- a/source/common/upstream/thread_aware_lb_impl.cc +++ b/source/common/upstream/thread_aware_lb_impl.cc @@ -95,10 +95,7 @@ void ThreadAwareLoadBalancerBase::initialize() { // complicated initialization as the load balancer would need its own initialized callback. I // think the synchronous/asynchronous split is probably the best option. priority_update_cb_ = priority_set_.addPriorityUpdateCb( - [this](uint32_t, const HostVector&, const HostVector&) -> void { - refresh(); - threadSafeSetCrossPriorityHostMap(priority_set_.crossPriorityHostMap()); - }); + [this](uint32_t, const HostVector&, const HostVector&) -> void { refresh(); }); refresh(); } @@ -134,6 +131,7 @@ void ThreadAwareLoadBalancerBase::refresh() { factory_->healthy_per_priority_load_ = healthy_per_priority_load; factory_->degraded_per_priority_load_ = degraded_per_priority_load; factory_->per_priority_state_ = per_priority_state_vector; + factory_->cross_priority_host_map_ = priority_set_.crossPriorityHostMap(); } } @@ -181,8 +179,7 @@ ThreadAwareLoadBalancerBase::LoadBalancerImpl::chooseHost(LoadBalancerContext* c } LoadBalancerPtr ThreadAwareLoadBalancerBase::LoadBalancerFactoryImpl::create() { - auto lb = std::make_unique( - stats_, random_, thread_aware_lb_.threadSafeGetCrossPriorityHostMap()); + auto lb = std::make_unique(stats_, random_); // We must protect current_lb_ via a RW lock since it is accessed and written to by multiple // threads. All complex processing has already been precalculated however. @@ -190,6 +187,7 @@ LoadBalancerPtr ThreadAwareLoadBalancerBase::LoadBalancerFactoryImpl::create() { lb->healthy_per_priority_load_ = healthy_per_priority_load_; lb->degraded_per_priority_load_ = degraded_per_priority_load_; lb->per_priority_state_ = per_priority_state_; + lb->cross_priority_host_map_ = cross_priority_host_map_; return lb; } diff --git a/source/common/upstream/thread_aware_lb_impl.h b/source/common/upstream/thread_aware_lb_impl.h index 81a1e5e2e4c83..fa26abddf98b4 100644 --- a/source/common/upstream/thread_aware_lb_impl.h +++ b/source/common/upstream/thread_aware_lb_impl.h @@ -98,7 +98,7 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL Random::RandomGenerator& random, const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config) : LoadBalancerBase(priority_set, stats, runtime, random, common_config), - factory_(new LoadBalancerFactoryImpl(stats, random, *this)) {} + factory_(new LoadBalancerFactoryImpl(stats, random)) {} private: struct PerPriorityState { @@ -108,9 +108,8 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL using PerPriorityStatePtr = std::unique_ptr; struct LoadBalancerImpl : public LoadBalancer { - LoadBalancerImpl(ClusterStats& stats, Random::RandomGenerator& random, - HostMapConstSharedPtr host_map) - : stats_(stats), random_(random), cross_priority_host_map_(std::move(host_map)) {} + LoadBalancerImpl(ClusterStats& stats, Random::RandomGenerator& random) + : stats_(stats), random_(random) {} // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; @@ -128,15 +127,12 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL }; struct LoadBalancerFactoryImpl : public LoadBalancerFactory { - LoadBalancerFactoryImpl(ClusterStats& stats, Random::RandomGenerator& random, - ThreadAwareLoadBalancerBase& thread_aware_lb) - : thread_aware_lb_(thread_aware_lb), stats_(stats), random_(random) {} + LoadBalancerFactoryImpl(ClusterStats& stats, Random::RandomGenerator& random) + : stats_(stats), random_(random) {} // Upstream::LoadBalancerFactory LoadBalancerPtr create() override; - ThreadAwareLoadBalancerBase& thread_aware_lb_; - ClusterStats& stats_; Random::RandomGenerator& random_; absl::Mutex mutex_; @@ -144,6 +140,16 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL // This is split out of PerPriorityState so LoadBalancerBase::ChoosePriority can be reused. std::shared_ptr healthy_per_priority_load_ ABSL_GUARDED_BY(mutex_); std::shared_ptr degraded_per_priority_load_ ABSL_GUARDED_BY(mutex_); + + // Whenever the membership changes, the cross_priority_host_map_ will be updated automatically. + // And all workers will create a new worker local load balancer and copy the + // cross_priority_host_map_. + // This leads to the possibility of simultaneous reading and writing of cross_priority_host_map_ + // in different threads. For this reason, mutex is necessary to guard cross_priority_host_map_. + // + // Cross priority host map for fast cross priority host searching. When the priority update + // callback is executed, the host map will also be updated. + HostMapConstSharedPtr cross_priority_host_map_ ABSL_GUARDED_BY(mutex_); }; virtual HashingLoadBalancerSharedPtr @@ -151,29 +157,8 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL double min_normalized_weight, double max_normalized_weight) PURE; void refresh(); - void threadSafeSetCrossPriorityHostMap(HostMapConstSharedPtr host_map) { - absl::MutexLock ml(&cross_priority_host_map_mutex_); - cross_priority_host_map_ = std::move(host_map); - } - HostMapConstSharedPtr threadSafeGetCrossPriorityHostMap() { - absl::MutexLock ml(&cross_priority_host_map_mutex_); - return cross_priority_host_map_; - } - std::shared_ptr factory_; Common::CallbackHandlePtr priority_update_cb_; - - // Whenever the membership changes, the cross_priority_host_map_ will be updated automatically. - // And all workers will create a new worker local load balancer and copy the - // cross_priority_host_map_. - // - // This leads to the possibility of simultaneous reading and writing of cross_priority_host_map_ - // in different threads. For this reason, an additional mutex is necessary to guard - // cross_priority_host_map_. - absl::Mutex cross_priority_host_map_mutex_; - // Cross priority host map for fast cross priority host searching. When the priority update - // callback is executed, the host map will also be updated. - HostMapConstSharedPtr cross_priority_host_map_ ABSL_GUARDED_BY(cross_priority_host_map_mutex_); }; } // namespace Upstream diff --git a/test/common/upstream/maglev_lb_test.cc b/test/common/upstream/maglev_lb_test.cc index 562d4f0e90fa6..d8472c5a5aefe 100644 --- a/test/common/upstream/maglev_lb_test.cc +++ b/test/common/upstream/maglev_lb_test.cc @@ -103,6 +103,19 @@ TEST_F(MaglevLoadBalancerTest, SelectOverrideHost) { EXPECT_EQ(mock_host, lb_->factory()->create()->chooseHost(&context)); } +// Test for thread aware load balancer destructed before load balancer factory. After CDS removes a +// cluster, the operation does not immediately reach the worker thread. There may be cases where the +// thread aware load balancer is destructed, but the load balancer factory is still used in the +// worker thread. +TEST_F(MaglevLoadBalancerTest, LbDestructedBeforeFactory) { + init(7); + + auto factory = lb_->factory(); + lb_.reset(); + + EXPECT_NE(nullptr, factory->create()); +} + // Throws an exception if table size is not a prime number. TEST_F(MaglevLoadBalancerTest, NoPrimeNumber) { EXPECT_THROW_WITH_MESSAGE(init(8), EnvoyException, diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index c259f610c54af..9d5b2c4141eff 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -120,6 +120,19 @@ TEST_P(RingHashLoadBalancerTest, SelectOverrideHost) { EXPECT_EQ(mock_host, lb_->factory()->create()->chooseHost(&context)); } +// Test for thread aware load balancer destructed before load balancer factory. After CDS removes a +// cluster, the operation does not immediately reach the worker thread. There may be cases where the +// thread aware load balancer is destructed, but the load balancer factory is still used in the +// worker thread. +TEST_P(RingHashLoadBalancerTest, LbDestructedBeforeFactory) { + init(); + + auto factory = lb_->factory(); + lb_.reset(); + + EXPECT_NE(nullptr, factory->create()); +} + // Given minimum_ring_size > maximum_ring_size, expect an exception. TEST_P(RingHashLoadBalancerTest, BadRingSizeBounds) { config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig(); diff --git a/test/config/utility.cc b/test/config/utility.cc index a5aca16f6c773..208e27a70c1c7 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -404,9 +404,12 @@ std::string ConfigHelper::adsBootstrap(const std::string& api_type) { } // TODO(samflattery): bundle this up with buildCluster -envoy::config::cluster::v3::Cluster -ConfigHelper::buildStaticCluster(const std::string& name, int port, const std::string& address) { - return TestUtility::parseYaml(fmt::format(R"EOF( +envoy::config::cluster::v3::Cluster ConfigHelper::buildStaticCluster(const std::string& name, + int port, + const std::string& address, + const std::string& lb_policy) { + return TestUtility::parseYaml( + fmt::format(R"EOF( name: {} connect_timeout: 5s type: STATIC @@ -419,15 +422,14 @@ ConfigHelper::buildStaticCluster(const std::string& name, int port, const std::s socket_address: address: {} port_value: {} - lb_policy: ROUND_ROBIN + lb_policy: {} typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions explicit_http_config: http2_protocol_options: {{}} )EOF", - name, name, - address, port)); + name, name, address, port, lb_policy)); } envoy::config::cluster::v3::Cluster ConfigHelper::buildCluster(const std::string& name, diff --git a/test/config/utility.h b/test/config/utility.h index 7d94c4b8dbd0e..061726d8aff03 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -146,8 +146,9 @@ class ConfigHelper { static std::string discoveredClustersBootstrap(const std::string& api_type); static std::string adsBootstrap(const std::string& api_type); // Builds a standard Cluster config fragment, with a single endpoint (at address:port). - static envoy::config::cluster::v3::Cluster buildStaticCluster(const std::string& name, int port, - const std::string& address); + static envoy::config::cluster::v3::Cluster + buildStaticCluster(const std::string& name, int port, const std::string& address, + const std::string& lb_policy = "ROUND_ROBIN"); // ADS configurations static envoy::config::cluster::v3::Cluster diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index 3da9db2c5e897..bbffa27e9569c 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -175,6 +175,40 @@ TEST_P(CdsIntegrationTest, CdsClusterUpDownUp) { cleanupUpstreamAndDownstream(); } +// Test the fast addition and removal of clusters when they use ThreadAwareLb. +TEST_P(CdsIntegrationTest, CdsClusterWithThreadAwareLbCycleUpDownUp) { + // Calls our initialize(), which includes establishing a listener, route, and cluster. + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + test_server_->waitForCounterGe("cluster_manager.cluster_added", 1); + + // Tell Envoy that cluster_1 is gone. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "55", {}, {}, {})); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, {}, {}, + {ClusterName1}, "42"); + // Make sure that Envoy's ClusterManager has made use of the DiscoveryResponse that says cluster_1 + // is gone. + test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1); + + // Update cluster1_ to use MAGLEV load balancer policy. + cluster1_ = ConfigHelper::buildStaticCluster( + ClusterName1, fake_upstreams_[UpstreamIndex1]->localAddress()->ip()->port(), + Network::Test::getLoopbackAddressString(ipVersion()), "MAGLEV"); + + // Cyclically add and remove cluster with ThreadAwareLb. + for (int i = 42; i < 142; i += 2) { + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Cluster, absl::StrCat(i), {}, {}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {cluster1_}, {cluster1_}, {}, absl::StrCat(i + 1)); + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().Cluster, absl::StrCat(i + 1), {}, {}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {}, {}, {ClusterName1}, absl::StrCat(i + 2)); + } + + cleanupUpstreamAndDownstream(); +} + // Tests adding a cluster, adding another, then removing the first. TEST_P(CdsIntegrationTest, TwoClusters) { // Calls our initialize(), which includes establishing a listener, route, and cluster. From f05d29ebfb304890c98edbf501e9eee4ad9550eb Mon Sep 17 00:00:00 2001 From: phlax Date: Fri, 24 Sep 2021 18:02:45 +0100 Subject: [PATCH 118/121] docs: Consolidate build rules (#18142) * docs: Consolidate build rules * add-todo Signed-off-by: Ryan Northey Signed-off-by: gayang --- bazel/repository_locations.bzl | 6 +-- docs/BUILD | 89 ++++++++++++++++------------------ 2 files changed, 46 insertions(+), 49 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 853690229f558..9c10bdc6bf0b2 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -686,11 +686,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Packaging rules for Bazel", project_desc = "Bazel rules for the packaging distributions", project_url = "https://github.com/bazelbuild/rules_pkg", - version = "0.4.0", - sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d", + version = "0.5.1", + sha256 = "a89e203d3cf264e564fcb96b6e06dd70bc0557356eb48400ce4b5d97c2c3720d", urls = ["https://github.com/bazelbuild/rules_pkg/releases/download/{version}/rules_pkg-{version}.tar.gz"], use_category = ["build"], - release_date = "2021-03-03", + release_date = "2021-08-18", ), six = dict( project_name = "Six", diff --git a/docs/BUILD b/docs/BUILD index ca6cff03defa8..775644422374b 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -2,6 +2,7 @@ load( "//bazel:envoy_build_system.bzl", "envoy_package", ) +load("@rules_pkg//:mappings.bzl", "pkg_filegroup", "pkg_files") load("@rules_pkg//:pkg.bzl", "pkg_tar") licenses(["notice"]) # Apache 2 @@ -69,50 +70,6 @@ genrule( ], ) -pkg_tar( - name = "sphinx_base", - srcs = glob( - [ - "conf.py", - "_ext/*", - ], - ) + [":redirects"], - extension = "tar", - strip_prefix = "/docs/", -) - -pkg_tar( - name = "sphinx_root", - srcs = glob(["root/**/*"]), - extension = "tar", - strip_prefix = "/docs/root", -) - -pkg_tar( - name = "base_rst", - extension = "tar", - deps = [ - ":sphinx_base.tar", - ":sphinx_root.tar", - ], -) - -pkg_tar( - name = "google_vrp_config", - srcs = ["//configs:google-vrp/envoy-edge.yaml"], - extension = "tar", - package_dir = "/best_practices", - strip_prefix = "/configs/configuration", -) - -pkg_tar( - name = "examples_rst", - srcs = ["//examples:files"], - extension = "tar", - package_dir = "/start/sandboxes/_include", - strip_prefix = "/examples", -) - genrule( name = "extensions_security_rst", srcs = [ @@ -171,14 +128,54 @@ genrule( tools = ["//tools/docs:generate_api_rst"], ) +pkg_files( + name = "sphinx_base", + srcs = glob( + [ + "conf.py", + "_ext/*", + ], + ) + [":redirects"], + strip_prefix = "/docs", +) + +pkg_files( + name = "sphinx_root", + srcs = glob(["root/**/*"]), + strip_prefix = "/docs/root", +) + +# TODO(phlax): this appears unused, fix or remove +pkg_files( + name = "google_vrp_config", + srcs = ["//configs:google-vrp/envoy-edge.yaml"], + prefix = "config/best_practices", + strip_prefix = "/configs", +) + +pkg_files( + name = "examples_rst", + srcs = ["//examples:files"], + prefix = "start/sandboxes/_include", + strip_prefix = "/examples", +) + +pkg_filegroup( + name = "rst_files", + srcs = [ + ":examples_rst", + ":sphinx_base", + ":sphinx_root", + ], +) + pkg_tar( name = "rst", + srcs = [":rst_files"], extension = "tar", deps = [ ":api_rst", - ":base_rst.tar", ":empty_protos_rst", - ":examples_rst.tar", ":extensions_security_rst", ":external_deps_rst", ], From 85dd89d5761552d5edfd849f179319bbc1565e1e Mon Sep 17 00:00:00 2001 From: Ryan Hamilton Date: Fri, 24 Sep 2021 10:22:31 -0700 Subject: [PATCH 119/121] coverage: exclude a guarddog test from coverage (#18255) This is a clone of #18252 but with the formatting fixed. Risk Level: None - test only Testing: N/A Docs Changes: N/A Release Notes: N/A Platform Specific Features: N/A Signed-off-by: Ryan Hamilton Signed-off-by: gayang --- test/server/guarddog_impl_test.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index dbbe963a175fc..f3ed70f23015a 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -820,6 +820,8 @@ TEST_P(GuardDogActionsTest, KillShouldTriggerGuardDogActions) { EXPECT_DEATH(die_function(), "ASSERT_GUARDDOG_ACTION"); } +// Disabled for coverage per #18229 +#if !defined(ENVOY_CONFIG_COVERAGE) TEST_P(GuardDogActionsTest, MultikillShouldTriggerGuardDogActions) { auto die_function = [&]() -> void { const NiceMock config(DISABLE_MISS, DISABLE_MEGAMISS, DISABLE_KILL, @@ -833,6 +835,7 @@ TEST_P(GuardDogActionsTest, MultikillShouldTriggerGuardDogActions) { EXPECT_DEATH(die_function(), "ASSERT_GUARDDOG_ACTION"); } +#endif } // namespace } // namespace Server From fb6b540cd04154addf91a758d0af5443d49d0bdc Mon Sep 17 00:00:00 2001 From: gayang Date: Sun, 26 Sep 2021 14:10:04 +0000 Subject: [PATCH 120/121] add response trailers for bandwidth limit filter Signed-off-by: gayang --- .../v3alpha/bandwidth_limit.proto | 2 +- .../http_filters/bandwidth_limit_filter.rst | 10 ++- docs/root/version_history/current.rst | 3 + .../http/bandwidth_limit/bandwidth_limit.cc | 51 +++++++++-- .../http/bandwidth_limit/bandwidth_limit.h | 16 +++- .../http/common/stream_rate_limiter.cc | 13 ++- .../filters/http/common/stream_rate_limiter.h | 7 +- .../filters/http/fault/fault_filter.cc | 2 +- .../http/bandwidth_limit/config_test.cc | 6 ++ .../http/bandwidth_limit/filter_test.cc | 88 +++++++++++++------ .../http/common/stream_rate_limiter_test.cc | 4 +- 11 files changed, 151 insertions(+), 51 deletions(-) diff --git a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto index 2395402fd1ab6..c47878863195f 100644 --- a/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto +++ b/api/envoy/extensions/filters/http/bandwidth_limit/v3alpha/bandwidth_limit.proto @@ -20,7 +20,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Bandwidth limit :ref:`configuration overview `. // [#extension: envoy.filters.http.bandwidth_limit] -// [#next-free-field: 6] +// [#next-free-field: 7] message BandwidthLimit { // Defines the mode for the bandwidth limit filter. // Values represent bitmask. diff --git a/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst b/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst index 4576e9d3ac36a..5c032496b723a 100644 --- a/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst +++ b/docs/root/configuration/http/http_filters/bandwidth_limit_filter.rst @@ -42,14 +42,16 @@ The HTTP bandwidth limit filter outputs statistics in the ``.http_b :widths: 1, 1, 2 request_enabled, Counter, Total number of request streams for which the bandwidth limiter was consulted + request_enforced, Counter, Total number of request streams for which the bandwidth limiter was enforced request_pending, GAUGE, Number of request streams which are currently pending transfer in bandwidth limiter - request_incoming_size, GAUGE, Size in bytes of incoming request data to bandwidth limiter - request_allowed_size, GAUGE, Size in bytes of outgoing request data from bandwidth limiter + request_incoming_size, Counter, Size in bytes of incoming request data to bandwidth limiter + request_allowed_size, Counter, Size in bytes of outgoing request data from bandwidth limiter request_transfer_duration, HISTOGRAM, Total time (including added delay) it took for the request stream transfer response_enabled, Counter, Total number of response streams for which the bandwidth limiter was consulted + response_enforced, Counter, Total number of response streams for which the bandwidth limiter was enforced response_pending, GAUGE, Number of response streams which are currently pending transfer in bandwidth limiter - response_incoming_size, GAUGE, Size in bytes of incoming response data to bandwidth limiter - response_allowed_size, GAUGE, Size in bytes of outgoing response data from bandwidth limiter + response_incoming_size, Counter, Size in bytes of incoming response data to bandwidth limiter + response_allowed_size, Counter, Size in bytes of outgoing response data from bandwidth limiter response_transfer_duration, HISTOGRAM, Total time (including added delay) it took for the response stream transfer .. _config_http_filters_bandwidth_limit_runtime: diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 8a3a2d6a1f08c..8f9b71b2b0e3a 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -67,6 +67,9 @@ Minor Behavior Changes information. * listener: destroy per network filter chain stats when a network filter chain is removed during the listener in-place update. * quic: enables IETF connection migration. This feature requires a stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. +* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in place update. +* quic: enables IETF connection migration. This feature requires stable UDP packet routine in the L4 load balancer with the same first-4-bytes in connection id. It can be turned off by setting runtime guard ``envoy.reloadable_features.FLAGS_quic_reloadable_flag_quic_connection_migration_use_new_cid_v2`` to false. +* bandwidth_limit: added response trailers when request or response delay are enforced. Bug Fixes --------- diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc index 6d831d6de8903..036122b45bcbc 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc @@ -16,6 +16,11 @@ namespace Extensions { namespace HttpFilters { namespace BandwidthLimitFilter { +namespace { + const Http::LowerCaseString DefaultRequestDelayTrailer = Http::LowerCaseString("bandwidth-request-delay-ms"); + const Http::LowerCaseString DefaultResponseDelayTrailer = Http::LowerCaseString("bandwidth-response-delay-ms"); +} + FilterConfig::FilterConfig(const BandwidthLimit& config, Stats::Scope& scope, Runtime::Loader& runtime, TimeSource& time_source, bool per_route) : runtime_(runtime), time_source_(time_source), enable_mode_(config.enable_mode()), @@ -23,7 +28,11 @@ FilterConfig::FilterConfig(const BandwidthLimit& config, Stats::Scope& scope, fill_interval_(std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT( config, fill_interval, StreamRateLimiter::DefaultFillInterval.count()))), enabled_(config.runtime_enabled(), runtime), - stats_(generateStats(config.stat_prefix(), scope)) { + stats_(generateStats(config.stat_prefix(), scope)), + request_delay_trailer_(config.response_trailer_prefix().empty() ? DefaultRequestDelayTrailer + : Http::LowerCaseString(config.response_trailer_prefix() + "-" + DefaultRequestDelayTrailer.get())), + response_delay_trailer_(config.response_trailer_prefix().empty() ? DefaultResponseDelayTrailer + : Http::LowerCaseString(config.response_trailer_prefix() + "-" + DefaultResponseDelayTrailer.get())) { if (per_route && !config.has_limit_kbps()) { throw EnvoyException("bandwidthlimitfilter: limit must be set for per route filter config"); } @@ -64,7 +73,10 @@ Http::FilterHeadersStatus BandwidthLimiter::decodeHeaders(Http::RequestHeaderMap updateStatsOnDecodeFinish(); decoder_callbacks_->continueDecoding(); }, - [config](uint64_t len) { config.stats().request_allowed_size_.set(len); }, + [&config](uint64_t len, bool limit_enforced) { + config.stats().request_allowed_size_.add(len); + if (limit_enforced) { config.stats().request_enforced_.inc(); } + }, const_cast(&config)->timeSource(), decoder_callbacks_->dispatcher(), decoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval()); } @@ -82,7 +94,7 @@ Http::FilterDataStatus BandwidthLimiter::decodeData(Buffer::Instance& data, bool const_cast(&config)->timeSource()); config.stats().request_pending_.inc(); } - config.stats().request_incoming_size_.set(data.length()); + config.stats().request_incoming_size_.add(data.length()); request_limiter_->writeData(data, end_stream); return Http::FilterDataStatus::StopIterationNoBuffer; @@ -123,7 +135,10 @@ Http::FilterHeadersStatus BandwidthLimiter::encodeHeaders(Http::ResponseHeaderMa updateStatsOnEncodeFinish(); encoder_callbacks_->continueEncoding(); }, - [config](uint64_t len) { config.stats().response_allowed_size_.set(len); }, + [&config](uint64_t len, bool limit_enforced) { + config.stats().response_allowed_size_.add(len); + if (limit_enforced) { config.stats().response_enforced_.inc(); } + }, const_cast(&config)->timeSource(), encoder_callbacks_->dispatcher(), encoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval()); } @@ -135,23 +150,33 @@ Http::FilterDataStatus BandwidthLimiter::encodeData(Buffer::Instance& data, bool if (response_limiter_ != nullptr) { const auto& config = getConfig(); + // Adds encoded trailers. May only be called in encodeData when end_stream is set to true. + // If upstream has trailers, addEncodedTrailers won't be called + bool trailer_added = false; + if (end_stream) { + trailers = &encoder_callbacks_->addEncodedTrailers(); + trailer_added = true; + } + if (!response_latency_) { response_latency_ = std::make_unique( config.stats().response_transfer_duration_, const_cast(&config)->timeSource()); config.stats().response_pending_.inc(); } - config.stats().response_incoming_size_.set(data.length()); + config.stats().response_incoming_size_.add(data.length()); - response_limiter_->writeData(data, end_stream); + response_limiter_->writeData(data, end_stream, trailer_added); return Http::FilterDataStatus::StopIterationNoBuffer; } ENVOY_LOG(debug, "BandwidthLimiter : response_limiter not set"); return Http::FilterDataStatus::Continue; } -Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap&) { +Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap& responseTrailers) { if (response_limiter_ != nullptr) { + trailers = &responseTrailers; + if (response_limiter_->onTrailers()) { return Http::FilterTrailersStatus::StopIteration; } else { @@ -164,6 +189,7 @@ Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTraile void BandwidthLimiter::updateStatsOnDecodeFinish() { if (request_latency_) { + request_duration_ = request_latency_.get()->elapsed().count(); request_latency_->complete(); request_latency_.reset(); getConfig().stats().request_pending_.dec(); @@ -172,9 +198,18 @@ void BandwidthLimiter::updateStatsOnDecodeFinish() { void BandwidthLimiter::updateStatsOnEncodeFinish() { if (response_latency_) { + const auto& config = getConfig(); + + auto response_duration = response_latency_.get()->elapsed().count(); + if (trailers != nullptr && request_duration_ > 0) { + trailers->setCopy(config.request_delay_trailer(), std::to_string(request_duration_)); + } + if (trailers != nullptr && response_duration > 0) { + trailers->setCopy(config.response_delay_trailer(), std::to_string(response_duration)); + } response_latency_->complete(); response_latency_.reset(); - getConfig().stats().response_pending_.dec(); + config.stats().response_pending_.dec(); } } diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h index f5bac46426425..40ba10e45ef72 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.h @@ -32,12 +32,14 @@ namespace BandwidthLimitFilter { #define ALL_BANDWIDTH_LIMIT_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(request_enabled) \ COUNTER(response_enabled) \ + COUNTER(request_enforced) \ + COUNTER(response_enforced) \ GAUGE(request_pending, Accumulate) \ GAUGE(response_pending, Accumulate) \ - GAUGE(request_incoming_size, Accumulate) \ - GAUGE(response_incoming_size, Accumulate) \ - GAUGE(request_allowed_size, Accumulate) \ - GAUGE(response_allowed_size, Accumulate) \ + COUNTER(request_incoming_size) \ + COUNTER(response_incoming_size) \ + COUNTER(request_allowed_size) \ + COUNTER(response_allowed_size) \ HISTOGRAM(request_transfer_duration, Milliseconds) \ HISTOGRAM(response_transfer_duration, Milliseconds) @@ -71,6 +73,8 @@ class FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig { EnableMode enableMode() const { return enable_mode_; }; const std::shared_ptr tokenBucket() const { return token_bucket_; } std::chrono::milliseconds fillInterval() const { return fill_interval_; } + const Http::LowerCaseString& request_delay_trailer() const { return request_delay_trailer_; } + const Http::LowerCaseString& response_delay_trailer() const { return response_delay_trailer_; } private: friend class FilterTest; @@ -86,6 +90,8 @@ class FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig { mutable BandwidthLimitStats stats_; // Filter chain's shared token bucket std::shared_ptr token_bucket_; + const Http::LowerCaseString request_delay_trailer_; + const Http::LowerCaseString response_delay_trailer_; }; using FilterConfigSharedPtr = std::shared_ptr; @@ -142,6 +148,8 @@ class BandwidthLimiter : public Http::StreamFilter, Logger::Loggable response_limiter_; Stats::TimespanPtr request_latency_; Stats::TimespanPtr response_latency_; + uint64_t request_duration_ = 0; + Http::ResponseTrailerMap* trailers; }; } // namespace BandwidthLimitFilter diff --git a/source/extensions/filters/http/common/stream_rate_limiter.cc b/source/extensions/filters/http/common/stream_rate_limiter.cc index 6763adbeb2414..aca234224c430 100644 --- a/source/extensions/filters/http/common/stream_rate_limiter.cc +++ b/source/extensions/filters/http/common/stream_rate_limiter.cc @@ -17,7 +17,7 @@ StreamRateLimiter::StreamRateLimiter( uint64_t max_kbps, uint64_t max_buffered_data, std::function pause_data_cb, std::function resume_data_cb, std::function write_data_cb, std::function continue_cb, - std::function write_stats_cb, TimeSource& time_source, + std::function write_stats_cb, TimeSource& time_source, Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope, std::shared_ptr token_bucket, std::chrono::milliseconds fill_interval) : fill_interval_(std::move(fill_interval)), write_data_cb_(write_data_cb), @@ -63,7 +63,7 @@ void StreamRateLimiter::onTokenTimer() { // Move the data to write into the output buffer with as little copying as possible. // NOTE: This might be moving zero bytes, but that should work fine. data_to_write.move(buffer_, bytes_to_write); - write_stats_cb_(bytes_to_write); + write_stats_cb_(bytes_to_write, buffer_.length() > 0); // If the buffer still contains data in it, we couldn't get enough tokens, so schedule the next // token available time. @@ -88,10 +88,17 @@ void StreamRateLimiter::onTokenTimer() { } } -void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream) { +void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream, bool trailer_added) { auto len = incoming_buffer.length(); buffer_.move(incoming_buffer); saw_end_stream_ = end_stream; + // If trailer_added is true, set saw_trailers_ to true to continue encode trailers, added + // after buffer_.move to ensure buffer has data and won't invoke continue_cb_ before + // processing the data in last data frame. + if (trailer_added) { + saw_trailers_ = true; + } + ENVOY_LOG(debug, "StreamRateLimiter : got new {} bytes of data. token " "timer {} scheduled.", diff --git a/source/extensions/filters/http/common/stream_rate_limiter.h b/source/extensions/filters/http/common/stream_rate_limiter.h index b8aed9ac8a72a..84035c26a6c0a 100644 --- a/source/extensions/filters/http/common/stream_rate_limiter.h +++ b/source/extensions/filters/http/common/stream_rate_limiter.h @@ -49,7 +49,8 @@ class StreamRateLimiter : Logger::Loggable { StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_data, std::function pause_data_cb, std::function resume_data_cb, std::function write_data_cb, - std::function continue_cb, std::function write_stats_cb, + std::function continue_cb, + std::function write_stats_cb, TimeSource& time_source, Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope, std::shared_ptr token_bucket = nullptr, @@ -59,7 +60,7 @@ class StreamRateLimiter : Logger::Loggable { * Called by the stream to write data. All data writes happen asynchronously, the stream should * be stopped after this call (all data will be drained from incoming_buffer). */ - void writeData(Buffer::Instance& incoming_buffer, bool end_stream); + void writeData(Buffer::Instance& incoming_buffer, bool end_stream, bool trailer_added = false); /** * Called if the stream receives trailers. @@ -83,7 +84,7 @@ class StreamRateLimiter : Logger::Loggable { const std::chrono::milliseconds fill_interval_; const std::function write_data_cb_; const std::function continue_cb_; - const std::function write_stats_cb_; + const std::function write_stats_cb_; const ScopeTrackedObject& scope_; std::shared_ptr token_bucket_; Event::TimerPtr token_timer_; diff --git a/source/extensions/filters/http/fault/fault_filter.cc b/source/extensions/filters/http/fault/fault_filter.cc index 7499e47dd5b8c..809b396b2b0fc 100644 --- a/source/extensions/filters/http/fault/fault_filter.cc +++ b/source/extensions/filters/http/fault/fault_filter.cc @@ -212,7 +212,7 @@ void FaultFilter::maybeSetupResponseRateLimit(const Http::RequestHeaderMap& requ encoder_callbacks_->injectEncodedDataToFilterChain(data, end_stream); }, [this] { encoder_callbacks_->continueEncoding(); }, - [](uint64_t) { + [](uint64_t, bool) { // write stats callback. }, config_->timeSource(), decoder_callbacks_->dispatcher(), decoder_callbacks_->scope()); diff --git a/test/extensions/filters/http/bandwidth_limit/config_test.cc b/test/extensions/filters/http/bandwidth_limit/config_test.cc index b98ea7ca4d67f..6c173ad17a5bc 100644 --- a/test/extensions/filters/http/bandwidth_limit/config_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/config_test.cc @@ -38,6 +38,7 @@ TEST(Factory, RouteSpecificFilterConfig) { enable_mode: REQUEST_AND_RESPONSE limit_kbps: 10 fill_interval: 0.1s + response_trailer_prefix: test )"; BandwidthLimitFilterConfig factory; @@ -54,6 +55,8 @@ TEST(Factory, RouteSpecificFilterConfig) { EXPECT_EQ(config->fillInterval().count(), 100); EXPECT_EQ(config->enableMode(), EnableMode::BandwidthLimit_EnableMode_REQUEST_AND_RESPONSE); EXPECT_FALSE(config->tokenBucket() == nullptr); + EXPECT_EQ(const_cast(config)->request_delay_trailer(), Http::LowerCaseString("test-bandwidth-request-delay-ms")); + EXPECT_EQ(const_cast(config)->response_delay_trailer(), Http::LowerCaseString("test-bandwidth-response-delay-ms")); } TEST(Factory, RouteSpecificFilterConfigDisabledByDefault) { @@ -97,6 +100,9 @@ TEST(Factory, RouteSpecificFilterConfigDefaultFillInterval) { const auto* config = dynamic_cast(route_config.get()); EXPECT_EQ(config->limit(), 10); EXPECT_EQ(config->fillInterval().count(), 50); + //default trailers + EXPECT_EQ(const_cast(config)->request_delay_trailer(), Http::LowerCaseString("bandwidth-request-delay-ms")); + EXPECT_EQ(const_cast(config)->response_delay_trailer(), Http::LowerCaseString("bandwidth-response-delay-ms")); } TEST(Factory, PerRouteConfigNoLimits) { diff --git a/test/extensions/filters/http/bandwidth_limit/filter_test.cc b/test/extensions/filters/http/bandwidth_limit/filter_test.cc index daffe9076ea34..3977f0fb5ad3d 100644 --- a/test/extensions/filters/http/bandwidth_limit/filter_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/filter_test.cc @@ -11,6 +11,7 @@ using testing::_; using testing::AnyNumber; using testing::NiceMock; using testing::Return; +using testing::ReturnRef; namespace Envoy { namespace Extensions { @@ -57,6 +58,7 @@ class FilterTest : public testing::Test { Http::TestResponseTrailerMapImpl response_trailers_; Buffer::OwnedImpl data_; Event::SimulatedTimeSystem time_system_; + Http::TestResponseTrailerMapImpl trailers_; }; TEST_F(FilterTest, Disabled) { @@ -75,11 +77,14 @@ TEST_F(FilterTest, Disabled) { EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); EXPECT_EQ(0U, findCounter("test.http_bandwidth_limit.request_enabled")); + EXPECT_EQ(0U, findCounter("test.http_bandwidth_limit.request_enforced")); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers_, false)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(data_, false)); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); EXPECT_EQ(0U, findCounter("test.http_bandwidth_limit.response_enabled")); + EXPECT_EQ(false, response_trailers_.has("bandwidth-request-delay-ms")); + EXPECT_EQ(false, response_trailers_.has("bandwidth-response-delay-ms")); } TEST_F(FilterTest, LimitOnDecode) { @@ -90,6 +95,7 @@ TEST_F(FilterTest, LimitOnDecode) { runtime_key: foo_key enable_mode: REQUEST limit_kbps: 1 + response_trailer_prefix: test )"; setup(fmt::format(config_yaml, "1")); @@ -107,11 +113,12 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data1, false)); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.request_pending")); - EXPECT_EQ(5, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(5, findCounter("test.http_bandwidth_limit.request_incoming_size")); EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual("hello"), false)); token_timer->invokeCallback(); - EXPECT_EQ(5, findGauge("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(0, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(5, findCounter("test.http_bandwidth_limit.request_allowed_size")); // Advance time by 1s which should refill all tokens. time_system_.advanceTimeWait(std::chrono::seconds(1)); @@ -122,15 +129,16 @@ TEST_F(FilterTest, LimitOnDecode) { Buffer::OwnedImpl data2(std::string(1126, 'a')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data2, false)); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.request_pending")); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.request_incoming_size")); EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(50), _)); EXPECT_CALL(decoder_filter_callbacks_, onDecoderFilterBelowWriteBufferLowWatermark()); EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(1024, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.request_allowed_size")); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(1, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(1029, findCounter("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.request_incoming_size")); // Fire timer, also advance time. time_system_.advanceTimeWait(std::chrono::milliseconds(50)); @@ -138,14 +146,15 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(51, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.request_allowed_size")); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(2, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(1080, findCounter("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.request_incoming_size")); // Get new data with current data buffered, not end_stream. Buffer::OwnedImpl data3(std::string(51, 'b')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data3, false)); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.request_pending")); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(1182, findCounter("test.http_bandwidth_limit.request_incoming_size")); // Fire timer, also advance time. time_system_.advanceTimeWait(std::chrono::milliseconds(50)); @@ -153,7 +162,8 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(51, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.request_allowed_size")); // Fire timer, also advance time. No timer enable because there is nothing // buffered. @@ -161,7 +171,8 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(51, 'b')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(1182, findCounter("test.http_bandwidth_limit.request_allowed_size")); // Advance time by 1s for a full refill. time_system_.advanceTimeWait(std::chrono::seconds(1)); @@ -171,12 +182,15 @@ TEST_F(FilterTest, LimitOnDecode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); Buffer::OwnedImpl data4(std::string(1024, 'c')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(data4, true)); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.request_incoming_size")); + EXPECT_EQ(2206, findCounter("test.http_bandwidth_limit.request_incoming_size")); EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(1024, 'c')), true)); token_timer->invokeCallback(); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.request_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.request_enforced")); + EXPECT_EQ(2206, findCounter("test.http_bandwidth_limit.request_allowed_size")); EXPECT_EQ(0, findGauge("test.http_bandwidth_limit.request_pending")); + EXPECT_EQ(false, response_trailers_.has("test-bandwidth-request-delay-ms")); + EXPECT_EQ(false, response_trailers_.has("test-bandwidth-response-delay-ms")); filter_->onDestroy(); } @@ -189,10 +203,12 @@ TEST_F(FilterTest, LimitOnEncode) { runtime_key: foo_key enable_mode: RESPONSE limit_kbps: 1 + response_trailer_prefix: test )"; setup(fmt::format(config_yaml, "1")); ON_CALL(encoder_filter_callbacks_, encoderBufferLimit()).WillByDefault(Return(1100)); + ON_CALL(encoder_filter_callbacks_, addEncodedTrailers()).WillByDefault(ReturnRef(trailers_)); Event::MockTimer* token_timer = new NiceMock(&encoder_filter_callbacks_.dispatcher_); @@ -211,11 +227,12 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data1, false)); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.response_pending")); - EXPECT_EQ(5, findGauge("test.http_bandwidth_limit.response_incoming_size")); + EXPECT_EQ(5, findCounter("test.http_bandwidth_limit.response_incoming_size")); EXPECT_CALL(encoder_filter_callbacks_, injectEncodedDataToFilterChain(BufferStringEqual("hello"), false)); token_timer->invokeCallback(); - EXPECT_EQ(5, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(0, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(5, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Advance time by 1s which should refill all tokens. time_system_.advanceTimeWait(std::chrono::seconds(1)); @@ -225,7 +242,7 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); Buffer::OwnedImpl data2(std::string(1126, 'a')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data2, false)); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.response_incoming_size")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.response_incoming_size")); EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(50), _)); EXPECT_CALL(encoder_filter_callbacks_, onEncoderFilterBelowWriteBufferLowWatermark()); @@ -233,8 +250,9 @@ TEST_F(FilterTest, LimitOnEncode) { injectEncodedDataToFilterChain(BufferStringEqual(std::string(1024, 'a')), false)); token_timer->invokeCallback(); EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.response_pending")); - EXPECT_EQ(1126, findGauge("test.http_bandwidth_limit.response_incoming_size")); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(1, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.response_incoming_size")); + EXPECT_EQ(1029, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Fire timer, also advance time. time_system_.advanceTimeWait(std::chrono::milliseconds(50)); @@ -242,7 +260,8 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(encoder_filter_callbacks_, injectEncodedDataToFilterChain(BufferStringEqual(std::string(51, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(2, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(1080, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Get new data with current data buffered, not end_stream. Buffer::OwnedImpl data3(std::string(51, 'b')); @@ -254,7 +273,8 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(encoder_filter_callbacks_, injectEncodedDataToFilterChain(BufferStringEqual(std::string(51, 'a')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(1131, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Fire timer, also advance time. No time enable because there is nothing // buffered. @@ -262,7 +282,8 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(encoder_filter_callbacks_, injectEncodedDataToFilterChain(BufferStringEqual(std::string(51, 'b')), false)); token_timer->invokeCallback(); - EXPECT_EQ(51, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(1182, findCounter("test.http_bandwidth_limit.response_allowed_size")); // Advance time by 1s for a full refill. time_system_.advanceTimeWait(std::chrono::seconds(1)); @@ -272,12 +293,16 @@ TEST_F(FilterTest, LimitOnEncode) { EXPECT_CALL(*token_timer, enableTimer(std::chrono::milliseconds(0), _)); Buffer::OwnedImpl data4(std::string(1024, 'c')); EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(data4, true)); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.response_incoming_size")); + EXPECT_EQ(2206, findCounter("test.http_bandwidth_limit.response_incoming_size")); EXPECT_CALL(encoder_filter_callbacks_, - injectEncodedDataToFilterChain(BufferStringEqual(std::string(1024, 'c')), true)); + injectEncodedDataToFilterChain(BufferStringEqual(std::string(1024, 'c')), false)); token_timer->invokeCallback(); EXPECT_EQ(0, findGauge("test.http_bandwidth_limit.response_pending")); - EXPECT_EQ(1024, findGauge("test.http_bandwidth_limit.response_allowed_size")); + EXPECT_EQ(3, findCounter("test.http_bandwidth_limit.response_enforced")); + EXPECT_EQ(2206, findCounter("test.http_bandwidth_limit.response_allowed_size")); + + EXPECT_EQ(false, response_trailers_.has("test-bandwidth-request-delay-ms")); + EXPECT_EQ("2150", trailers_.get_("test-bandwidth-response-delay-ms")); filter_->onDestroy(); } @@ -290,11 +315,13 @@ TEST_F(FilterTest, LimitOnDecodeAndEncode) { runtime_key: foo_key enable_mode: REQUEST_AND_RESPONSE limit_kbps: 1 + response_trailer_prefix: test )"; setup(fmt::format(config_yaml, "1")); ON_CALL(decoder_filter_callbacks_, decoderBufferLimit()).WillByDefault(Return(1050)); ON_CALL(encoder_filter_callbacks_, encoderBufferLimit()).WillByDefault(Return(1100)); + ON_CALL(encoder_filter_callbacks_, addEncodedTrailers()).WillByDefault(ReturnRef(trailers_)); Event::MockTimer* request_timer = new NiceMock(&decoder_filter_callbacks_.dispatcher_); Event::MockTimer* response_timer = @@ -403,9 +430,13 @@ TEST_F(FilterTest, LimitOnDecodeAndEncode) { EXPECT_CALL(decoder_filter_callbacks_, injectDecodedDataToFilterChain(BufferStringEqual(std::string(51, 'd')), true)); EXPECT_CALL(encoder_filter_callbacks_, - injectEncodedDataToFilterChain(BufferStringEqual(std::string(960, 'e')), true)); - response_timer->invokeCallback(); + injectEncodedDataToFilterChain(BufferStringEqual(std::string(960, 'e')), false)); + EXPECT_CALL(encoder_filter_callbacks_, continueEncoding()); + request_timer->invokeCallback(); + response_timer->invokeCallback(); + EXPECT_EQ("2200", trailers_.get_("test-bandwidth-request-delay-ms")); + EXPECT_EQ("2200", trailers_.get_("test-bandwidth-response-delay-ms")); filter_->onDestroy(); } @@ -418,6 +449,7 @@ TEST_F(FilterTest, WithTrailers) { runtime_key: foo_key enable_mode: REQUEST_AND_RESPONSE limit_kbps: 1 + response_trailer_prefix: test )"; setup(fmt::format(config_yaml, "1")); @@ -479,6 +511,9 @@ TEST_F(FilterTest, WithTrailers) { injectEncodedDataToFilterChain(BufferStringEqual(std::string(5, 'e')), false)); response_timer->invokeCallback(); EXPECT_EQ(0, findGauge("test.http_bandwidth_limit.response_pending")); + + EXPECT_EQ("50", response_trailers_.get_("test-bandwidth-request-delay-ms")); + EXPECT_EQ("150", response_trailers_.get_("test-bandwidth-response-delay-ms")); } TEST_F(FilterTest, WithTrailersNoEndStream) { @@ -550,6 +585,9 @@ TEST_F(FilterTest, WithTrailersNoEndStream) { EXPECT_EQ(1, findGauge("test.http_bandwidth_limit.response_pending")); EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->encodeTrailers(response_trailers_)); EXPECT_EQ(0, findGauge("test.http_bandwidth_limit.response_pending")); + + EXPECT_EQ("50", response_trailers_.get_("bandwidth-request-delay-ms")); + EXPECT_EQ("150", response_trailers_.get_("bandwidth-response-delay-ms")); } } // namespace BandwidthLimitFilter diff --git a/test/extensions/filters/http/common/stream_rate_limiter_test.cc b/test/extensions/filters/http/common/stream_rate_limiter_test.cc index 97f18f5957ca1..7fc0aeca1c66a 100644 --- a/test/extensions/filters/http/common/stream_rate_limiter_test.cc +++ b/test/extensions/filters/http/common/stream_rate_limiter_test.cc @@ -40,7 +40,7 @@ class StreamRateLimiterTest : public testing::Test { decoder_callbacks_.injectDecodedDataToFilterChain(data, end_stream); }, [this] { decoder_callbacks_.continueDecoding(); }, - [](uint64_t /*len*/) { + [](uint64_t /*len*/, bool) { // config->stats().decode_allowed_size_.set(len); }, time_system_, decoder_callbacks_.dispatcher_, decoder_callbacks_.scope(), token_bucket, @@ -59,7 +59,7 @@ class StreamRateLimiterTest : public testing::Test { decoder_callbacks_.injectDecodedDataToFilterChain(data, end_stream); }, [this] { decoder_callbacks_.continueDecoding(); }, - [](uint64_t /*len*/) { + [](uint64_t /*len*/, bool) { // config->stats().decode_allowed_size_.set(len); }, time_system_, decoder_callbacks_.dispatcher_, decoder_callbacks_.scope()); From c5c25d19f1493f10c3513ed62be1c50ba188630d Mon Sep 17 00:00:00 2001 From: gayang Date: Sun, 26 Sep 2021 14:35:41 +0000 Subject: [PATCH 121/121] fix format Signed-off-by: gayang --- .../http/bandwidth_limit/bandwidth_limit.cc | 37 ++++++++++++------- .../http/common/stream_rate_limiter.cc | 7 ++-- .../filters/http/common/stream_rate_limiter.h | 7 ++-- .../http/bandwidth_limit/config_test.cc | 14 ++++--- 4 files changed, 40 insertions(+), 25 deletions(-) diff --git a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc index 036122b45bcbc..89c34b5b31a51 100644 --- a/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc +++ b/source/extensions/filters/http/bandwidth_limit/bandwidth_limit.cc @@ -17,9 +17,11 @@ namespace HttpFilters { namespace BandwidthLimitFilter { namespace { - const Http::LowerCaseString DefaultRequestDelayTrailer = Http::LowerCaseString("bandwidth-request-delay-ms"); - const Http::LowerCaseString DefaultResponseDelayTrailer = Http::LowerCaseString("bandwidth-response-delay-ms"); -} +const Http::LowerCaseString DefaultRequestDelayTrailer = + Http::LowerCaseString("bandwidth-request-delay-ms"); +const Http::LowerCaseString DefaultResponseDelayTrailer = + Http::LowerCaseString("bandwidth-response-delay-ms"); +} // namespace FilterConfig::FilterConfig(const BandwidthLimit& config, Stats::Scope& scope, Runtime::Loader& runtime, TimeSource& time_source, bool per_route) @@ -29,10 +31,14 @@ FilterConfig::FilterConfig(const BandwidthLimit& config, Stats::Scope& scope, config, fill_interval, StreamRateLimiter::DefaultFillInterval.count()))), enabled_(config.runtime_enabled(), runtime), stats_(generateStats(config.stat_prefix(), scope)), - request_delay_trailer_(config.response_trailer_prefix().empty() ? DefaultRequestDelayTrailer - : Http::LowerCaseString(config.response_trailer_prefix() + "-" + DefaultRequestDelayTrailer.get())), - response_delay_trailer_(config.response_trailer_prefix().empty() ? DefaultResponseDelayTrailer - : Http::LowerCaseString(config.response_trailer_prefix() + "-" + DefaultResponseDelayTrailer.get())) { + request_delay_trailer_(config.response_trailer_prefix().empty() + ? DefaultRequestDelayTrailer + : Http::LowerCaseString(config.response_trailer_prefix() + "-" + + DefaultRequestDelayTrailer.get())), + response_delay_trailer_(config.response_trailer_prefix().empty() + ? DefaultResponseDelayTrailer + : Http::LowerCaseString(config.response_trailer_prefix() + "-" + + DefaultResponseDelayTrailer.get())) { if (per_route && !config.has_limit_kbps()) { throw EnvoyException("bandwidthlimitfilter: limit must be set for per route filter config"); } @@ -73,9 +79,11 @@ Http::FilterHeadersStatus BandwidthLimiter::decodeHeaders(Http::RequestHeaderMap updateStatsOnDecodeFinish(); decoder_callbacks_->continueDecoding(); }, - [&config](uint64_t len, bool limit_enforced) { + [&config](uint64_t len, bool limit_enforced) { config.stats().request_allowed_size_.add(len); - if (limit_enforced) { config.stats().request_enforced_.inc(); } + if (limit_enforced) { + config.stats().request_enforced_.inc(); + } }, const_cast(&config)->timeSource(), decoder_callbacks_->dispatcher(), decoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval()); @@ -135,9 +143,11 @@ Http::FilterHeadersStatus BandwidthLimiter::encodeHeaders(Http::ResponseHeaderMa updateStatsOnEncodeFinish(); encoder_callbacks_->continueEncoding(); }, - [&config](uint64_t len, bool limit_enforced) { + [&config](uint64_t len, bool limit_enforced) { config.stats().response_allowed_size_.add(len); - if (limit_enforced) { config.stats().response_enforced_.inc(); } + if (limit_enforced) { + config.stats().response_enforced_.inc(); + } }, const_cast(&config)->timeSource(), encoder_callbacks_->dispatcher(), encoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval()); @@ -173,7 +183,8 @@ Http::FilterDataStatus BandwidthLimiter::encodeData(Buffer::Instance& data, bool return Http::FilterDataStatus::Continue; } -Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap& responseTrailers) { +Http::FilterTrailersStatus +BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap& responseTrailers) { if (response_limiter_ != nullptr) { trailers = &responseTrailers; @@ -199,7 +210,7 @@ void BandwidthLimiter::updateStatsOnDecodeFinish() { void BandwidthLimiter::updateStatsOnEncodeFinish() { if (response_latency_) { const auto& config = getConfig(); - + auto response_duration = response_latency_.get()->elapsed().count(); if (trailers != nullptr && request_duration_ > 0) { trailers->setCopy(config.request_delay_trailer(), std::to_string(request_duration_)); diff --git a/source/extensions/filters/http/common/stream_rate_limiter.cc b/source/extensions/filters/http/common/stream_rate_limiter.cc index aca234224c430..a4e215a5f1d03 100644 --- a/source/extensions/filters/http/common/stream_rate_limiter.cc +++ b/source/extensions/filters/http/common/stream_rate_limiter.cc @@ -88,17 +88,18 @@ void StreamRateLimiter::onTokenTimer() { } } -void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream, bool trailer_added) { +void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream, + bool trailer_added) { auto len = incoming_buffer.length(); buffer_.move(incoming_buffer); saw_end_stream_ = end_stream; - // If trailer_added is true, set saw_trailers_ to true to continue encode trailers, added + // If trailer_added is true, set saw_trailers_ to true to continue encode trailers, added // after buffer_.move to ensure buffer has data and won't invoke continue_cb_ before // processing the data in last data frame. if (trailer_added) { saw_trailers_ = true; } - + ENVOY_LOG(debug, "StreamRateLimiter : got new {} bytes of data. token " "timer {} scheduled.", diff --git a/source/extensions/filters/http/common/stream_rate_limiter.h b/source/extensions/filters/http/common/stream_rate_limiter.h index 84035c26a6c0a..42b6c21015f91 100644 --- a/source/extensions/filters/http/common/stream_rate_limiter.h +++ b/source/extensions/filters/http/common/stream_rate_limiter.h @@ -49,10 +49,9 @@ class StreamRateLimiter : Logger::Loggable { StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_data, std::function pause_data_cb, std::function resume_data_cb, std::function write_data_cb, - std::function continue_cb, - std::function write_stats_cb, - TimeSource& time_source, Event::Dispatcher& dispatcher, - const ScopeTrackedObject& scope, + std::function continue_cb, + std::function write_stats_cb, TimeSource& time_source, + Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope, std::shared_ptr token_bucket = nullptr, std::chrono::milliseconds fill_interval = DefaultFillInterval); diff --git a/test/extensions/filters/http/bandwidth_limit/config_test.cc b/test/extensions/filters/http/bandwidth_limit/config_test.cc index 6c173ad17a5bc..e3a16aa07a9b8 100644 --- a/test/extensions/filters/http/bandwidth_limit/config_test.cc +++ b/test/extensions/filters/http/bandwidth_limit/config_test.cc @@ -55,8 +55,10 @@ TEST(Factory, RouteSpecificFilterConfig) { EXPECT_EQ(config->fillInterval().count(), 100); EXPECT_EQ(config->enableMode(), EnableMode::BandwidthLimit_EnableMode_REQUEST_AND_RESPONSE); EXPECT_FALSE(config->tokenBucket() == nullptr); - EXPECT_EQ(const_cast(config)->request_delay_trailer(), Http::LowerCaseString("test-bandwidth-request-delay-ms")); - EXPECT_EQ(const_cast(config)->response_delay_trailer(), Http::LowerCaseString("test-bandwidth-response-delay-ms")); + EXPECT_EQ(const_cast(config)->request_delay_trailer(), + Http::LowerCaseString("test-bandwidth-request-delay-ms")); + EXPECT_EQ(const_cast(config)->response_delay_trailer(), + Http::LowerCaseString("test-bandwidth-response-delay-ms")); } TEST(Factory, RouteSpecificFilterConfigDisabledByDefault) { @@ -100,9 +102,11 @@ TEST(Factory, RouteSpecificFilterConfigDefaultFillInterval) { const auto* config = dynamic_cast(route_config.get()); EXPECT_EQ(config->limit(), 10); EXPECT_EQ(config->fillInterval().count(), 50); - //default trailers - EXPECT_EQ(const_cast(config)->request_delay_trailer(), Http::LowerCaseString("bandwidth-request-delay-ms")); - EXPECT_EQ(const_cast(config)->response_delay_trailer(), Http::LowerCaseString("bandwidth-response-delay-ms")); + // default trailers + EXPECT_EQ(const_cast(config)->request_delay_trailer(), + Http::LowerCaseString("bandwidth-request-delay-ms")); + EXPECT_EQ(const_cast(config)->response_delay_trailer(), + Http::LowerCaseString("bandwidth-response-delay-ms")); } TEST(Factory, PerRouteConfigNoLimits) {