Skip to content

Commit

Permalink
http: fixing a bug in the router filter when logging response headers…
Browse files Browse the repository at this point in the history
… to upstream access logs (#6785)

Risk Level: Medium (will have a perf effect for folks using access logs)
Testing: regression integration test
Docs Changes: n/a
Release Notes: n/a
Fixes #6744

Signed-off-by: Alyssa Wilk <[email protected]>
  • Loading branch information
alyssawilk authored May 3, 2019
1 parent 281fa1a commit 3e1aa6d
Show file tree
Hide file tree
Showing 5 changed files with 74 additions and 6 deletions.
1 change: 1 addition & 0 deletions docs/root/intro/version_history.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Version history
* ext_authz: added a `x-envoy-auth-partial-body` metadata header set to `false|true` indicating if there is a partial body sent in the authorization request message.
* ext_authz: added option to `ext_authz` that allows the filter clearing route cache.
* hot restart: stats are no longer shared between hot restart parent/child via shared memory, but rather by RPC. Hot restart version incremented to 11.
* http: fixed a crashing bug where gRPC local replies would cause segfaults when upstream access logging was on.
* http: mitigated a race condition with the :ref:`delayed_close_timeout<envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.delayed_close_timeout>` where it could trigger while actively flushing a pending write buffer for a downstream connection.
* jwt_authn: make filter's parsing of JWT more flexible, allowing syntax like ``jwt=eyJhbGciOiJS...ZFnFIw,extra=7,realm=123``
* redis: added :ref:`prefix routing <envoy_api_field_config.filter.network.redis_proxy.v2.RedisProxy.prefix_routes>` to enable routing commands based on their key's prefix to different upstream.
Expand Down
12 changes: 8 additions & 4 deletions source/common/router/router.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1038,8 +1038,8 @@ Filter::UpstreamRequest::~UpstreamRequest() {
stream_info_.setUpstreamTiming(upstream_timing_);
stream_info_.onRequestComplete();
for (const auto& upstream_log : parent_.config_.upstream_logs_) {
upstream_log->log(parent_.downstream_headers_, upstream_headers_, upstream_trailers_,
stream_info_);
upstream_log->log(parent_.downstream_headers_, upstream_headers_.get(),
upstream_trailers_.get(), stream_info_);
}
}

Expand All @@ -1053,7 +1053,9 @@ void Filter::UpstreamRequest::decodeHeaders(Http::HeaderMapPtr&& headers, bool e
upstream_timing_.onFirstUpstreamRxByteReceived(parent_.callbacks_->dispatcher().timeSource());
maybeEndDecode(end_stream);

upstream_headers_ = headers.get();
if (!parent_.config_.upstream_logs_.empty()) {
upstream_headers_ = std::make_unique<Http::HeaderMapImpl>(*headers);
}
const uint64_t response_code = Http::Utility::getResponseStatus(*headers);
stream_info_.response_code_ = static_cast<uint32_t>(response_code);
parent_.onUpstreamHeaders(response_code, std::move(headers), *this, end_stream);
Expand All @@ -1067,7 +1069,9 @@ void Filter::UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream

void Filter::UpstreamRequest::decodeTrailers(Http::HeaderMapPtr&& trailers) {
maybeEndDecode(true);
upstream_trailers_ = trailers.get();
if (!parent_.config_.upstream_logs_.empty()) {
upstream_trailers_ = std::make_unique<Http::HeaderMapImpl>(*trailers);
}
parent_.onUpstreamTrailers(std::move(trailers), *this);
}

Expand Down
6 changes: 4 additions & 2 deletions source/common/router/router.h
Original file line number Diff line number Diff line change
Expand Up @@ -348,8 +348,10 @@ class Filter : Logger::Loggable<Logger::Id::router>,
Tracing::SpanPtr span_;
StreamInfo::StreamInfoImpl stream_info_;
StreamInfo::UpstreamTiming upstream_timing_;
Http::HeaderMap* upstream_headers_{};
Http::HeaderMap* upstream_trailers_{};
// Copies of upstream headers/trailers. These are only set if upstream
// access logging is configured.
Http::HeaderMapPtr upstream_headers_;
Http::HeaderMapPtr upstream_trailers_;

bool calling_encode_headers_ : 1;
bool upstream_canary_ : 1;
Expand Down
1 change: 1 addition & 0 deletions test/integration/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@ envoy_cc_test(
":http_integration_lib",
"//source/common/http:header_map_lib",
"//source/extensions/filters/http/buffer:config",
"//source/extensions/filters/http/dynamo:config",
"//source/extensions/filters/http/health_check:config",
"//test/integration/filters:random_pause_filter_lib",
"//test/test_common:utility_lib",
Expand Down
60 changes: 60 additions & 0 deletions test/integration/http2_upstream_integration_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@

#include "gtest/gtest.h"

using testing::HasSubstr;

namespace Envoy {

INSTANTIATE_TEST_SUITE_P(IpVersions, Http2UpstreamIntegrationTest,
Expand Down Expand Up @@ -313,4 +315,62 @@ TEST_P(Http2UpstreamIntegrationTest, UpstreamConnectionCloseWithManyStreams) {
}
}

// Regression test for https://github.com/envoyproxy/envoy/issues/6744
TEST_P(Http2UpstreamIntegrationTest, HittingEncoderFilterLimitForGrpc) {
config_helper_.addConfigModifier(
[&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm)
-> void {
const std::string access_log_name =
TestEnvironment::temporaryPath(TestUtility::uniqueFilename());
// Configure just enough of an upstream access log to reference the upstream headers.
const std::string yaml_string = R"EOF(
name: envoy.router
config:
upstream_log:
name: envoy.file_access_log
filter:
not_health_check_filter: {}
config:
path: /dev/null
)EOF";
const std::string json = Json::Factory::loadFromYamlString(yaml_string)->asJsonString();
MessageUtil::loadFromJson(json, *hcm.mutable_http_filters(1));
});

// As with ProtocolIntegrationTest.HittingEncoderFilterLimit use a filter
// which buffers response data but in this case, make sure the sendLocalReply
// is gRPC.
config_helper_.addFilter("{ name: envoy.http_dynamo_filter, config: {} }");
config_helper_.setBufferLimits(1024, 1024);
initialize();

// Send the request.
codec_client_ = makeHttpConnection(lookupPort("http"));
auto encoder_decoder =
codec_client_->startRequest(Http::TestHeaderMapImpl{{":method", "POST"},
{":path", "/test/long/url"},
{":scheme", "http"},
{":authority", "host"},
{"te", "trailers"}});
auto downstream_request = &encoder_decoder.first;
auto response = std::move(encoder_decoder.second);
Buffer::OwnedImpl data(R"({"TableName":"locations"})");
codec_client_->sendData(*downstream_request, data, true);
waitForNextUpstreamRequest();

// Send the response headers.
upstream_request_->encodeHeaders(default_response_headers_, false);

// Now send an overly large response body. At some point, too much data will
// be buffered, the stream will be reset, and the connection will disconnect.
fake_upstreams_[0]->set_allow_unexpected_disconnects(true);
upstream_request_->encodeData(1024 * 65, false);
ASSERT_TRUE(upstream_request_->waitForReset());
ASSERT_TRUE(fake_upstream_connection_->close());
ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect());

response->waitForEndStream();
EXPECT_TRUE(response->complete());
}

} // namespace Envoy

0 comments on commit 3e1aa6d

Please sign in to comment.