From 8595be78bfc87e85bdb47e3cc5d936ebb38f024e Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 8 May 2023 16:51:51 -0500 Subject: [PATCH 1/6] [1.14.x] grpc: ensure grpc resolver correctly uses lan/wan addresses on servers The grpc resolver implementation is fed from changes to the router.Router. Within the router there is a map of various areas storing the addressing information for servers in those areas. All map entries are of the WAN variety except a single special entry for the LAN. Addressing information in the LAN "area" are local addresses intended for use when making a client-to-server or server-to-server request. The client agent correctly updates this LAN area when receiving lan serf events, so by extension the grpc resolver works fine in that scenario. The server agent only initially populates a single entry in the LAN area (for itself) on startup, and then never mutates that area map again. For normal RPCs a different structure is used for LAN routing. Additionally when selecting a server to contact in the local datacenter it will randomly select addresses from either the LAN or WAN addressed entries in the map. Unfortunately this means that the grpc resolver stack as it exists on server agents is either broken or only accidentally functions by having servers dial each other over the WAN-accessible address. If the operator disables the serf wan port completely likely this incidental functioning would break. This PR enforces that local requests for servers (both for stale reads or leader forwarded requests) exclusively use the LAN "area" information and also fixes it so that servers keep that area up to date in the router. A test for the grpc resolver logic was added, as well as a higher level full-stack test to ensure the externally perceived bug does not return. --- agent/consul/client_test.go | 10 +- agent/consul/server_serf.go | 4 + agent/consul/subscribe_backend_test.go | 5 +- agent/grpc-internal/client_test.go | 81 +++++-- agent/grpc-internal/handler_test.go | 4 +- agent/grpc-internal/resolver/resolver.go | 90 +++++++- agent/grpc-internal/resolver/resolver_test.go | 195 +++++++++++++++++ agent/peering_endpoint_test.go | 198 ++++++++++++++++++ agent/rpc/peering/service_test.go | 14 +- agent/setup.go | 7 + 10 files changed, 574 insertions(+), 34 deletions(-) create mode 100644 agent/grpc-internal/resolver/resolver_test.go diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index cef20c291ba..84bb76883ac 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -499,11 +499,15 @@ func newClient(t *testing.T, config *Config) *Client { return client } -func newTestResolverConfig(t *testing.T, suffix string) resolver.Config { +func newTestResolverConfig(t *testing.T, suffix string, dc, agentType string) resolver.Config { n := t.Name() s := strings.Replace(n, "/", "", -1) s = strings.Replace(s, "_", "", -1) - return resolver.Config{Authority: strings.ToLower(s) + "-" + suffix} + return resolver.Config{ + Datacenter: dc, + AgentType: agentType, + Authority: strings.ToLower(s) + "-" + suffix, + } } func newDefaultDeps(t *testing.T, c *Config) Deps { @@ -518,7 +522,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps { tls, err := tlsutil.NewConfigurator(c.TLSConfig, logger) require.NoError(t, err, "failed to create tls configuration") - builder := resolver.NewServerResolverBuilder(newTestResolverConfig(t, c.NodeName+"-"+c.Datacenter)) + builder := resolver.NewServerResolverBuilder(newTestResolverConfig(t, c.NodeName+"-"+c.Datacenter, c.Datacenter, "server")) r := router.NewRouter(logger, c.Datacenter, fmt.Sprintf("%s.%s", c.NodeName, c.Datacenter), builder) resolver.Register(builder) diff --git a/agent/consul/server_serf.go b/agent/consul/server_serf.go index a515589303f..8c4c7600ab7 100644 --- a/agent/consul/server_serf.go +++ b/agent/consul/server_serf.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/consul/lib" libserf "github.com/hashicorp/consul/lib/serf" "github.com/hashicorp/consul/logging" + "github.com/hashicorp/consul/types" ) const ( @@ -356,6 +357,7 @@ func (s *Server) lanNodeJoin(me serf.MemberEvent) { // Update server lookup s.serverLookup.AddServer(serverMeta) + s.router.AddServer(types.AreaLAN, serverMeta) // If we're still expecting to bootstrap, may need to handle this. if s.config.BootstrapExpect != 0 { @@ -377,6 +379,7 @@ func (s *Server) lanNodeUpdate(me serf.MemberEvent) { // Update server lookup s.serverLookup.AddServer(serverMeta) + s.router.AddServer(types.AreaLAN, serverMeta) } } @@ -515,5 +518,6 @@ func (s *Server) lanNodeFailed(me serf.MemberEvent) { // Update id to address map s.serverLookup.RemoveServer(serverMeta) + s.router.RemoveServer(types.AreaLAN, serverMeta) } } diff --git a/agent/consul/subscribe_backend_test.go b/agent/consul/subscribe_backend_test.go index a4d1134e182..72e054d3960 100644 --- a/agent/consul/subscribe_backend_test.go +++ b/agent/consul/subscribe_backend_test.go @@ -377,7 +377,10 @@ func newClientWithGRPCResolver(t *testing.T, ops ...func(*Config)) (*Client, *re } builder := resolver.NewServerResolverBuilder(newTestResolverConfig(t, - "client."+config.Datacenter+"."+string(config.NodeID))) + "client."+config.Datacenter+"."+string(config.NodeID), + config.Datacenter, + "client", + )) resolver.Register(builder) t.Cleanup(func() { diff --git a/agent/grpc-internal/client_test.go b/agent/grpc-internal/client_test.go index d9d264d8030..87b9b5cb54d 100644 --- a/agent/grpc-internal/client_test.go +++ b/agent/grpc-internal/client_test.go @@ -33,8 +33,8 @@ func TestNewDialer_WithTLSWrapper(t *testing.T) { require.NoError(t, err) t.Cleanup(logError(t, lis.Close)) - builder := resolver.NewServerResolverBuilder(newConfig(t)) - builder.AddServer(types.AreaWAN, &metadata.Server{ + builder := resolver.NewServerResolverBuilder(newConfig(t, "dc1", "server")) + builder.AddServer(types.AreaLAN, &metadata.Server{ Name: "server-1", ID: "ID1", Datacenter: "dc1", @@ -84,7 +84,7 @@ func TestNewDialer_WithALPNWrapper(t *testing.T) { p.Wait() }() - builder := resolver.NewServerResolverBuilder(newConfig(t)) + builder := resolver.NewServerResolverBuilder(newConfig(t, "dc1", "server")) builder.AddServer(types.AreaWAN, &metadata.Server{ Name: "server-1", ID: "ID1", @@ -139,7 +139,7 @@ func TestNewDialer_WithALPNWrapper(t *testing.T) { func TestNewDialer_IntegrationWithTLSEnabledHandler(t *testing.T) { // if this test is failing because of expired certificates // use the procedure in test/CA-GENERATION.md - res := resolver.NewServerResolverBuilder(newConfig(t)) + res := resolver.NewServerResolverBuilder(newConfig(t, "dc1", "server")) registerWithGRPC(t, res) tlsConf, err := tlsutil.NewConfigurator(tlsutil.Config{ @@ -156,9 +156,17 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler(t *testing.T) { srv := newSimpleTestServer(t, "server-1", "dc1", tlsConf) md := srv.Metadata() - res.AddServer(types.AreaWAN, md) + res.AddServer(types.AreaLAN, md) t.Cleanup(srv.shutdown) + { + // Put a duplicate instance of this on the WAN that will + // fail if we accidentally use it. + srv := newPanicTestServer(t, hclog.Default(), "server-1", "dc1", nil) + res.AddServer(types.AreaWAN, srv.Metadata()) + t.Cleanup(srv.shutdown) + } + pool := NewClientConnPool(ClientConnPoolConfig{ Servers: res, TLSWrapper: TLSWrapper(tlsConf.OutgoingRPCWrapper()), @@ -186,7 +194,7 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler_viaMeshGateway(t *testing.T) // use the procedure in test/CA-GENERATION.md gwAddr := ipaddr.FormatAddressPort("127.0.0.1", freeport.GetOne(t)) - res := resolver.NewServerResolverBuilder(newConfig(t)) + res := resolver.NewServerResolverBuilder(newConfig(t, "dc2", "server")) registerWithGRPC(t, res) tlsConf, err := tlsutil.NewConfigurator(tlsutil.Config{ @@ -261,7 +269,7 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler_viaMeshGateway(t *testing.T) func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) { count := 4 - res := resolver.NewServerResolverBuilder(newConfig(t)) + res := resolver.NewServerResolverBuilder(newConfig(t, "dc1", "server")) registerWithGRPC(t, res) pool := NewClientConnPool(ClientConnPoolConfig{ Servers: res, @@ -272,9 +280,18 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) - srv := newSimpleTestServer(t, name, "dc1", nil) - res.AddServer(types.AreaWAN, srv.Metadata()) - t.Cleanup(srv.shutdown) + { + srv := newSimpleTestServer(t, name, "dc1", nil) + res.AddServer(types.AreaLAN, srv.Metadata()) + t.Cleanup(srv.shutdown) + } + { + // Put a duplicate instance of this on the WAN that will + // fail if we accidentally use it. + srv := newPanicTestServer(t, hclog.Default(), name, "dc1", nil) + res.AddServer(types.AreaWAN, srv.Metadata()) + t.Cleanup(srv.shutdown) + } } conn, err := pool.ClientConn("dc1") @@ -287,7 +304,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) { first, err := client.Something(ctx, &testservice.Req{}) require.NoError(t, err) - res.RemoveServer(types.AreaWAN, &metadata.Server{ID: first.ServerName, Datacenter: "dc1"}) + res.RemoveServer(types.AreaLAN, &metadata.Server{ID: first.ServerName, Datacenter: "dc1"}) resp, err := client.Something(ctx, &testservice.Req{}) require.NoError(t, err) @@ -296,7 +313,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) { func TestClientConnPool_ForwardToLeader_Failover(t *testing.T) { count := 3 - res := resolver.NewServerResolverBuilder(newConfig(t)) + res := resolver.NewServerResolverBuilder(newConfig(t, "dc1", "server")) registerWithGRPC(t, res) pool := NewClientConnPool(ClientConnPoolConfig{ Servers: res, @@ -308,10 +325,19 @@ func TestClientConnPool_ForwardToLeader_Failover(t *testing.T) { var servers []testServer for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) - srv := newSimpleTestServer(t, name, "dc1", nil) - res.AddServer(types.AreaWAN, srv.Metadata()) - servers = append(servers, srv) - t.Cleanup(srv.shutdown) + { + srv := newSimpleTestServer(t, name, "dc1", nil) + res.AddServer(types.AreaLAN, srv.Metadata()) + servers = append(servers, srv) + t.Cleanup(srv.shutdown) + } + { + // Put a duplicate instance of this on the WAN that will + // fail if we accidentally use it. + srv := newPanicTestServer(t, hclog.Default(), name, "dc1", nil) + res.AddServer(types.AreaWAN, srv.Metadata()) + t.Cleanup(srv.shutdown) + } } // Set the leader address to the first server. @@ -338,16 +364,20 @@ func TestClientConnPool_ForwardToLeader_Failover(t *testing.T) { require.Equal(t, resp.ServerName, servers[1].name) } -func newConfig(t *testing.T) resolver.Config { +func newConfig(t *testing.T, dc, agentType string) resolver.Config { n := t.Name() s := strings.Replace(n, "/", "", -1) s = strings.Replace(s, "_", "", -1) - return resolver.Config{Authority: strings.ToLower(s)} + return resolver.Config{ + Datacenter: dc, + AgentType: agentType, + Authority: strings.ToLower(s), + } } func TestClientConnPool_IntegrationWithGRPCResolver_Rebalance(t *testing.T) { count := 5 - res := resolver.NewServerResolverBuilder(newConfig(t)) + res := resolver.NewServerResolverBuilder(newConfig(t, "dc1", "server")) registerWithGRPC(t, res) pool := NewClientConnPool(ClientConnPoolConfig{ Servers: res, @@ -401,7 +431,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Rebalance(t *testing.T) { func TestClientConnPool_IntegrationWithGRPCResolver_MultiDC(t *testing.T) { dcs := []string{"dc1", "dc2", "dc3"} - res := resolver.NewServerResolverBuilder(newConfig(t)) + res := resolver.NewServerResolverBuilder(newConfig(t, "dc1", "server")) registerWithGRPC(t, res) pool := NewClientConnPool(ClientConnPoolConfig{ Servers: res, @@ -413,7 +443,16 @@ func TestClientConnPool_IntegrationWithGRPCResolver_MultiDC(t *testing.T) { for _, dc := range dcs { name := "server-0-" + dc srv := newSimpleTestServer(t, name, dc, nil) - res.AddServer(types.AreaWAN, srv.Metadata()) + if dc == "dc1" { + res.AddServer(types.AreaLAN, srv.Metadata()) + // Put a duplicate instance of this on the WAN that will + // fail if we accidentally use it. + srvBad := newPanicTestServer(t, hclog.Default(), name, dc, nil) + res.AddServer(types.AreaWAN, srvBad.Metadata()) + t.Cleanup(srvBad.shutdown) + } else { + res.AddServer(types.AreaWAN, srv.Metadata()) + } t.Cleanup(srv.shutdown) } diff --git a/agent/grpc-internal/handler_test.go b/agent/grpc-internal/handler_test.go index 4f093ac65ee..109a2cc6008 100644 --- a/agent/grpc-internal/handler_test.go +++ b/agent/grpc-internal/handler_test.go @@ -26,11 +26,11 @@ func TestHandler_PanicRecoveryInterceptor(t *testing.T) { Output: &buf, }) - res := resolver.NewServerResolverBuilder(newConfig(t)) + res := resolver.NewServerResolverBuilder(newConfig(t, "dc1", "server")) registerWithGRPC(t, res) srv := newPanicTestServer(t, logger, "server-1", "dc1", nil) - res.AddServer(types.AreaWAN, srv.Metadata()) + res.AddServer(types.AreaLAN, srv.Metadata()) t.Cleanup(srv.shutdown) pool := NewClientConnPool(ClientConnPoolConfig{ diff --git a/agent/grpc-internal/resolver/resolver.go b/agent/grpc-internal/resolver/resolver.go index 87275449ef8..683b8538972 100644 --- a/agent/grpc-internal/resolver/resolver.go +++ b/agent/grpc-internal/resolver/resolver.go @@ -17,25 +17,45 @@ import ( // ServerResolvers updated when changes occur. type ServerResolverBuilder struct { cfg Config + // leaderResolver is used to track the address of the leader in the local DC. leaderResolver leaderResolver + // servers is an index of Servers by area and Server.ID. The map contains server IDs // for all datacenters. servers map[types.AreaID]map[string]*metadata.Server + // resolvers is an index of connections to the serverResolver which manages // addresses of servers for that connection. + // + // this is only applicable for non-leader conn types resolvers map[resolver.ClientConn]*serverResolver + // lock for all stateful fields (excludes config which is immutable). lock sync.RWMutex } type Config struct { + // Datacenter is the datacenter of this agent. + Datacenter string + + // AgentType is either 'server' or 'client' and is required. + AgentType string + // Authority used to query the server. Defaults to "". Used to support // parallel testing because gRPC registers resolvers globally. Authority string } func NewServerResolverBuilder(cfg Config) *ServerResolverBuilder { + if cfg.Datacenter == "" { + panic("ServerResolverBuilder needs Config.Datacenter to be nonempty") + } + switch cfg.AgentType { + case "server", "client": + default: + panic("ServerResolverBuilder needs Config.AgentType to be either server or client") + } return &ServerResolverBuilder{ cfg: cfg, servers: make(map[types.AreaID]map[string]*metadata.Server), @@ -80,6 +100,7 @@ func (s *ServerResolverBuilder) ServerForGlobalAddr(globalAddr string) (*metadat } } } + return nil, fmt.Errorf("failed to find Consul server for global address %q", globalAddr) } @@ -91,12 +112,12 @@ func (s *ServerResolverBuilder) Build(target resolver.Target, cc resolver.Client // If there's already a resolver for this connection, return it. // TODO(streaming): how would this happen since we already cache connections in ClientConnPool? - if resolver, ok := s.resolvers[cc]; ok { - return resolver, nil - } if cc == s.leaderResolver.clientConn { return s.leaderResolver, nil } + if resolver, ok := s.resolvers[cc]; ok { + return resolver, nil + } //nolint:staticcheck serverType, datacenter, err := parseEndpoint(target.Endpoint) @@ -143,6 +164,10 @@ func (s *ServerResolverBuilder) Authority() string { // AddServer updates the resolvers' states to include the new server's address. func (s *ServerResolverBuilder) AddServer(areaID types.AreaID, server *metadata.Server) { + if s.shouldIgnoreServer(areaID, server) { + return + } + s.lock.Lock() defer s.lock.Unlock() @@ -154,6 +179,10 @@ func (s *ServerResolverBuilder) AddServer(areaID types.AreaID, server *metadata. areaServers[uniqueID(server)] = server + if areaID == types.AreaLAN || s.cfg.Datacenter == server.Datacenter { + s.leaderResolver.updateClientConn() + } + addrs := s.getDCAddrs(server.Datacenter) for _, resolver := range s.resolvers { if resolver.datacenter == server.Datacenter { @@ -179,6 +208,10 @@ func DCPrefix(datacenter, suffix string) string { // RemoveServer updates the resolvers' states with the given server removed. func (s *ServerResolverBuilder) RemoveServer(areaID types.AreaID, server *metadata.Server) { + if s.shouldIgnoreServer(areaID, server) { + return + } + s.lock.Lock() defer s.lock.Unlock() @@ -192,6 +225,10 @@ func (s *ServerResolverBuilder) RemoveServer(areaID types.AreaID, server *metada delete(s.servers, areaID) } + if areaID == types.AreaLAN || s.cfg.Datacenter == server.Datacenter { + s.leaderResolver.updateClientConn() + } + addrs := s.getDCAddrs(server.Datacenter) for _, resolver := range s.resolvers { if resolver.datacenter == server.Datacenter { @@ -200,14 +237,35 @@ func (s *ServerResolverBuilder) RemoveServer(areaID types.AreaID, server *metada } } +func (s *ServerResolverBuilder) shouldIgnoreServer(areaID types.AreaID, server *metadata.Server) bool { + if s.cfg.AgentType == "client" && areaID != types.AreaLAN { + return true + } + + if s.cfg.AgentType == "server" && + server.Datacenter == s.cfg.Datacenter && + areaID != types.AreaLAN { + return true + } + + return false +} + // getDCAddrs returns a list of the server addresses for the given datacenter. // This method requires that lock is held for reads. func (s *ServerResolverBuilder) getDCAddrs(dc string) []resolver.Address { + lanRequest := (s.cfg.Datacenter == dc) + var ( addrs []resolver.Address keptServerIDs = make(map[string]struct{}) ) - for _, areaServers := range s.servers { + for areaID, areaServers := range s.servers { + if (areaID == types.AreaLAN) != lanRequest { + // LAN requests only look at LAN data. WAN requests only look at + // WAN data. + continue + } for _, server := range areaServers { if server.Datacenter != dc { continue @@ -234,8 +292,28 @@ func (s *ServerResolverBuilder) UpdateLeaderAddr(datacenter, addr string) { s.lock.Lock() defer s.lock.Unlock() - s.leaderResolver.globalAddr = DCPrefix(datacenter, addr) - s.leaderResolver.updateClientConn() + lanAddr := DCPrefix(datacenter, addr) + + s.leaderResolver.globalAddr = lanAddr + + if s.lanHasAddrLocked(lanAddr) { + s.leaderResolver.updateClientConn() + } +} + +func (s *ServerResolverBuilder) lanHasAddrLocked(lanAddr string) bool { + areaServers, ok := s.servers[types.AreaLAN] + if !ok { + return false + } + + for _, server := range areaServers { + if DCPrefix(server.Datacenter, server.Addr.String()) == lanAddr { + return true + } + } + + return false } // serverResolver is a grpc Resolver that will keep a grpc.ClientConn up to date diff --git a/agent/grpc-internal/resolver/resolver_test.go b/agent/grpc-internal/resolver/resolver_test.go new file mode 100644 index 00000000000..ab6e403d7de --- /dev/null +++ b/agent/grpc-internal/resolver/resolver_test.go @@ -0,0 +1,195 @@ +package resolver + +import ( + "fmt" + "net" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/types" +) + +func TestServerResolverBuilder(t *testing.T) { + const agentDC = "dc1" + + type testcase struct { + name string + agentType string // server/client + serverType string // server/leader + requestDC string + expectLAN bool + } + + run := func(t *testing.T, tc testcase) { + rs := NewServerResolverBuilder(newConfig(t, agentDC, tc.agentType)) + + endpoint := "" + if tc.serverType == "leader" { + endpoint = "leader.local" + } else { + endpoint = tc.serverType + "." + tc.requestDC + } + + cc := &fakeClientConn{} + _, err := rs.Build(resolver.Target{ + Scheme: "consul", + Authority: rs.Authority(), + Endpoint: endpoint, + }, cc, resolver.BuildOptions{}) + require.NoError(t, err) + + for i := 0; i < 3; i++ { + dc := fmt.Sprintf("dc%d", i+1) + for j := 0; j < 3; j++ { + wanIP := fmt.Sprintf("127.1.%d.%d", i+1, j+10) + name := fmt.Sprintf("%s-server-%d", dc, j+1) + wanMeta := newServerMeta(name, dc, wanIP, true) + + if tc.agentType == "server" { + rs.AddServer(types.AreaWAN, wanMeta) + } + + if dc == agentDC { + // register LAN/WAN pairs for the same instances + lanIP := fmt.Sprintf("127.0.%d.%d", i+1, j+10) + lanMeta := newServerMeta(name, dc, lanIP, false) + rs.AddServer(types.AreaLAN, lanMeta) + + if j == 0 { + rs.UpdateLeaderAddr(dc, lanIP) + } + } + } + } + + if tc.serverType == "leader" { + assert.Len(t, cc.state.Addresses, 1) + } else { + assert.Len(t, cc.state.Addresses, 3) + } + + for _, addr := range cc.state.Addresses { + addrPrefix := tc.requestDC + "-" + if tc.expectLAN { + addrPrefix += "127.0." + } else { + addrPrefix += "127.1." + } + assert.True(t, strings.HasPrefix(addr.Addr, addrPrefix), + "%q does not start with %q (returned WAN for LAN request)", addr.Addr, addrPrefix) + + if tc.expectLAN { + assert.False(t, strings.Contains(addr.ServerName, ".dc"), + "%q ends with datacenter suffix (returned WAN for LAN request)", addr.ServerName) + } else { + assert.True(t, strings.HasSuffix(addr.ServerName, "."+tc.requestDC), + "%q does not end with %q", addr.ServerName, "."+tc.requestDC) + } + } + } + + cases := []testcase{ + { + name: "server requesting local servers", + agentType: "server", + serverType: "server", + requestDC: agentDC, + expectLAN: true, + }, + { + name: "server requesting remote servers in dc2", + agentType: "server", + serverType: "server", + requestDC: "dc2", + expectLAN: false, + }, + { + name: "server requesting remote servers in dc3", + agentType: "server", + serverType: "server", + requestDC: "dc3", + expectLAN: false, + }, + // --------------- + { + name: "server requesting local leader", + agentType: "server", + serverType: "leader", + requestDC: agentDC, + expectLAN: true, + }, + // --------------- + { + name: "client requesting local server", + agentType: "client", + serverType: "server", + requestDC: agentDC, + expectLAN: true, + }, + { + name: "client requesting local leader", + agentType: "client", + serverType: "leader", + requestDC: agentDC, + expectLAN: true, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func newServerMeta(name, dc, ip string, wan bool) *metadata.Server { + fullname := name + if wan { + fullname = name + "." + dc + } + return &metadata.Server{ + ID: name, + Name: fullname, + ShortName: name, + Datacenter: dc, + Addr: &net.IPAddr{IP: net.ParseIP(ip)}, + UseTLS: false, + } +} + +func newConfig(t *testing.T, dc, agentType string) Config { + n := t.Name() + s := strings.Replace(n, "/", "", -1) + s = strings.Replace(s, "_", "", -1) + return Config{ + Datacenter: dc, + AgentType: agentType, + Authority: strings.ToLower(s), + } +} + +// fakeClientConn implements resolver.ClientConn for tests +type fakeClientConn struct { + state resolver.State +} + +var _ resolver.ClientConn = (*fakeClientConn)(nil) + +func (f *fakeClientConn) UpdateState(state resolver.State) error { + f.state = state + return nil +} + +func (*fakeClientConn) ReportError(error) {} +func (*fakeClientConn) NewAddress(addresses []resolver.Address) {} +func (*fakeClientConn) NewServiceConfig(serviceConfig string) {} +func (*fakeClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult { + return nil +} diff --git a/agent/peering_endpoint_test.go b/agent/peering_endpoint_test.go index c49d77de68e..e18d6d82a30 100644 --- a/agent/peering_endpoint_test.go +++ b/agent/peering_endpoint_test.go @@ -1,6 +1,7 @@ package agent import ( + "bufio" "bytes" "context" "encoding/base64" @@ -9,19 +10,216 @@ import ( "io" "net/http" "net/http/httptest" + "strconv" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + gpeer "google.golang.org/grpc/peer" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" ) +func TestHTTP_Peering_Integration(t *testing.T) { + // This is a full-stack integration test of the gRPC (internal) stack. We + // use peering CRUD b/c that is one of the few endpoints exposed over gRPC + // (internal). + + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // We advertise a wan address we are not using, so that incidental attempts + // to use it will loudly fail. + const ip = "192.0.2.2" + + connectivityConfig := ` +ports { serf_wan = -1 } +bind_addr = "0.0.0.0" +client_addr = "0.0.0.0" +advertise_addr = "127.0.0.1" +advertise_addr_wan = "` + ip + `" ` + + var ( + buf1, buf2, buf3 bytes.Buffer + testLog = testutil.NewLogBuffer(t) + + log1 = io.MultiWriter(testLog, &buf1) + log2 = io.MultiWriter(testLog, &buf2) + log3 = io.MultiWriter(testLog, &buf3) + ) + + a1 := StartTestAgent(t, TestAgent{LogOutput: log1, HCL: ` + server = true + bootstrap = false + bootstrap_expect = 3 + ` + connectivityConfig}) + t.Cleanup(func() { a1.Shutdown() }) + + a2 := StartTestAgent(t, TestAgent{LogOutput: log2, HCL: ` + server = true + bootstrap = false + bootstrap_expect = 3 + ` + connectivityConfig}) + t.Cleanup(func() { a2.Shutdown() }) + + a3 := StartTestAgent(t, TestAgent{LogOutput: log3, HCL: ` + server = true + bootstrap = false + bootstrap_expect = 3 + ` + connectivityConfig}) + t.Cleanup(func() { a3.Shutdown() }) + + { // join a2 to a1 + addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN) + _, err := a1.JoinLAN([]string{addr}, nil) + require.NoError(t, err) + } + { // join a3 to a1 + addr := fmt.Sprintf("127.0.0.1:%d", a3.Config.SerfPortLAN) + _, err := a1.JoinLAN([]string{addr}, nil) + require.NoError(t, err) + } + + testrpc.WaitForLeader(t, a1.RPC, "dc1") + testrpc.WaitForActiveCARoot(t, a1.RPC, "dc1", nil) + + testrpc.WaitForTestAgent(t, a1.RPC, "dc1") + testrpc.WaitForTestAgent(t, a2.RPC, "dc1") + testrpc.WaitForTestAgent(t, a3.RPC, "dc1") + + retry.Run(t, func(r *retry.R) { + require.Len(r, a1.LANMembersInAgentPartition(), 3) + require.Len(r, a2.LANMembersInAgentPartition(), 3) + require.Len(r, a3.LANMembersInAgentPartition(), 3) + }) + + type testcase struct { + agent *TestAgent + peerName string + prevCount int + } + + checkPeeringList := func(t *testing.T, a *TestAgent, expect int) { + req, err := http.NewRequest("GET", "/v1/peerings", nil) + require.NoError(t, err) + + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + var apiResp []*api.Peering + require.NoError(t, json.NewDecoder(resp.Body).Decode(&apiResp)) + + require.Len(t, apiResp, expect) + } + + testConn := func(t *testing.T, conn *grpc.ClientConn, peers map[string]int) { + rpcClientPeering := pbpeering.NewPeeringServiceClient(conn) + + peer := &gpeer.Peer{} + _, err := rpcClientPeering.PeeringList( + context.Background(), + &pbpeering.PeeringListRequest{}, + grpc.Peer(peer), + ) + require.NoError(t, err) + + peers[peer.Addr.String()]++ + } + + var ( + standardPeers = make(map[string]int) + leaderPeers = make(map[string]int) + ) + runOnce := func(t *testing.T, tc testcase) { + testutil.RunStep(t, "standard peers", func(t *testing.T) { + conn, err := tc.agent.baseDeps.GRPCConnPool.ClientConn("dc1") + require.NoError(t, err) + testConn(t, conn, standardPeers) + }) + + testutil.RunStep(t, "leader peers", func(t *testing.T) { + leaderConn, err := tc.agent.baseDeps.GRPCConnPool.ClientConnLeader() + require.NoError(t, err) + testConn(t, leaderConn, leaderPeers) + }) + + testutil.RunStep(t, "check peering list before", func(t *testing.T) { + checkPeeringList(t, tc.agent, tc.prevCount) + }) + + body := &pbpeering.GenerateTokenRequest{ + PeerName: tc.peerName, + } + + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + + req, err := http.NewRequest("POST", "/v1/peering/token", bytes.NewReader(bodyBytes)) + require.NoError(t, err) + + resp := httptest.NewRecorder() + tc.agent.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code, "expected 200, got %d: %v", resp.Code, resp.Body.String()) + + var out pbpeering.GenerateTokenResponse + require.NoError(t, json.NewDecoder(resp.Body).Decode(&out)) + + testutil.RunStep(t, "check peering list after", func(t *testing.T) { + checkPeeringList(t, tc.agent, tc.prevCount+1) + }) + } + + // Try the procedure on all agents to force N-1 of them to leader-forward. + cases := []testcase{ + {agent: a1, peerName: "peer-1", prevCount: 0}, + {agent: a2, peerName: "peer-2", prevCount: 1}, + {agent: a3, peerName: "peer-3", prevCount: 2}, + } + + for i, tc := range cases { + tc := tc + testutil.RunStep(t, "server-"+strconv.Itoa(i+1), func(t *testing.T) { + runOnce(t, tc) + }) + } + + testutil.RunStep(t, "ensure we got the right mixture of responses", func(t *testing.T) { + assert.Len(t, standardPeers, 3) + + // Each server talks to a single leader. + assert.Len(t, leaderPeers, 1) + for p, n := range leaderPeers { + assert.Equal(t, 3, n, "leader peer %q expected 3 uses", p) + } + }) + + testutil.RunStep(t, "no server experienced the server resolution error", func(t *testing.T) { + // Check them all for the bad error + const grpcError = `failed to find Consul server for global address` + + var buf bytes.Buffer + buf.ReadFrom(&buf1) + buf.ReadFrom(&buf2) + buf.ReadFrom(&buf3) + + scan := bufio.NewScanner(&buf) + for scan.Scan() { + line := scan.Text() + require.NotContains(t, line, grpcError) + } + require.NoError(t, scan.Err()) + }) +} + func TestHTTP_Peering_GenerateToken(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index 1bcb0237d2b..2e005ce8a6a 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -8,6 +8,7 @@ import ( "io/ioutil" "net" "path" + "strings" "testing" "time" @@ -1664,6 +1665,17 @@ type testingServer struct { PublicGRPCAddr string } +func newConfig(t *testing.T, dc, agentType string) resolver.Config { + n := t.Name() + s := strings.Replace(n, "/", "", -1) + s = strings.Replace(s, "_", "", -1) + return resolver.Config{ + Datacenter: dc, + AgentType: agentType, + Authority: strings.ToLower(s), + } +} + // TODO(peering): remove duplication between this and agent/consul tests func newDefaultDeps(t *testing.T, c *consul.Config) consul.Deps { t.Helper() @@ -1678,7 +1690,7 @@ func newDefaultDeps(t *testing.T, c *consul.Config) consul.Deps { require.NoError(t, err, "failed to create tls configuration") r := router.NewRouter(logger, c.Datacenter, fmt.Sprintf("%s.%s", c.NodeName, c.Datacenter), nil) - builder := resolver.NewServerResolverBuilder(resolver.Config{}) + builder := resolver.NewServerResolverBuilder(newConfig(t, c.Datacenter, "client")) resolver.Register(builder) connPool := &pool.ConnPool{ diff --git a/agent/setup.go b/agent/setup.go index d237e2fb9ee..6f0f2538a6d 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -106,7 +106,14 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error) d.ViewStore = submatview.NewStore(d.Logger.Named("viewstore")) d.ConnPool = newConnPool(cfg, d.Logger, d.TLSConfigurator) + agentType := "client" + if cfg.ServerMode { + agentType = "server" + } + builder := resolver.NewServerResolverBuilder(resolver.Config{ + AgentType: agentType, + Datacenter: cfg.Datacenter, // Set the authority to something sufficiently unique so any usage in // tests would be self-isolating in the global resolver map, while also // not incurring a huge penalty for non-test code. From 86755d045652a1bc82cb2eefcb192d0e2cff8de1 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Wed, 10 May 2023 10:28:47 -0500 Subject: [PATCH 2/6] patch test --- agent/grpc-internal/client_test.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/agent/grpc-internal/client_test.go b/agent/grpc-internal/client_test.go index 87b9b5cb54d..ce4b6b41a51 100644 --- a/agent/grpc-internal/client_test.go +++ b/agent/grpc-internal/client_test.go @@ -92,6 +92,13 @@ func TestNewDialer_WithALPNWrapper(t *testing.T) { Addr: lis1.Addr(), UseTLS: true, }) + builder.AddServer(types.AreaLAN, &metadata.Server{ + Name: "server-1", + ID: "ID1", + Datacenter: "dc1", + Addr: lis1.Addr(), + UseTLS: true, + }) builder.AddServer(types.AreaWAN, &metadata.Server{ Name: "server-2", ID: "ID2", @@ -389,8 +396,13 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Rebalance(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) srv := newSimpleTestServer(t, name, "dc1", nil) - res.AddServer(types.AreaWAN, srv.Metadata()) + res.AddServer(types.AreaLAN, srv.Metadata()) t.Cleanup(srv.shutdown) + // Put a duplicate instance of this on the WAN that will + // fail if we accidentally use it. + srvBad := newPanicTestServer(t, hclog.Default(), name, "dc1", nil) + res.AddServer(types.AreaWAN, srvBad.Metadata()) + t.Cleanup(srvBad.shutdown) } conn, err := pool.ClientConn("dc1") From e05ae9aaf87eb5b84cd1339a4005f4b8e8e17546 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Wed, 10 May 2023 10:49:19 -0500 Subject: [PATCH 3/6] backport subtle test fix from #16396 --- agent/grpc-external/services/peerstream/stream_test.go | 2 +- agent/grpc-external/services/peerstream/testing.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index bdca91be658..b2afd3d64a5 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -1252,8 +1252,8 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) { }) testutil.RunStep(t, "stream is disconnected due to heartbeat timeout", func(t *testing.T) { - disconnectTime := it.FutureNow(1) retry.Run(t, func(r *retry.R) { + disconnectTime := it.StaticNow() status, ok := srv.StreamStatus(testPeerID) require.True(r, ok) require.False(r, status.Connected) diff --git a/agent/grpc-external/services/peerstream/testing.go b/agent/grpc-external/services/peerstream/testing.go index 4f0297a6c52..5eb575c06aa 100644 --- a/agent/grpc-external/services/peerstream/testing.go +++ b/agent/grpc-external/services/peerstream/testing.go @@ -150,6 +150,16 @@ func (t *incrementalTime) Now() time.Time { return t.base.Add(dur) } +// StaticNow returns the current internal clock without advancing it. +func (t *incrementalTime) StaticNow() time.Time { + t.mu.Lock() + defer t.mu.Unlock() + + dur := time.Duration(t.next) * time.Second + + return t.base.Add(dur) +} + // FutureNow will return a given future value of the Now() function. // The numerical argument indicates which future Now value you wanted. The // value must be > 0. From ef813cbb8bdd5fb4cbb0435dac327caabac75b20 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Wed, 10 May 2023 11:20:38 -0500 Subject: [PATCH 4/6] add changelog --- .changelog/17270.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17270.txt diff --git a/.changelog/17270.txt b/.changelog/17270.txt new file mode 100644 index 00000000000..b9bd52888e4 --- /dev/null +++ b/.changelog/17270.txt @@ -0,0 +1,3 @@ +```release-note:bug +grpc: ensure grpc resolver correctly uses lan/wan addresses on servers +``` From bbcc73c9c1632ebcecf6507e815a30b5eb46ad81 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Wed, 10 May 2023 11:43:48 -0500 Subject: [PATCH 5/6] add comment --- agent/grpc-internal/resolver/resolver.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/agent/grpc-internal/resolver/resolver.go b/agent/grpc-internal/resolver/resolver.go index 683b8538972..64ea70881ab 100644 --- a/agent/grpc-internal/resolver/resolver.go +++ b/agent/grpc-internal/resolver/resolver.go @@ -237,6 +237,19 @@ func (s *ServerResolverBuilder) RemoveServer(areaID types.AreaID, server *metada } } +// shouldIgnoreServer is used to contextually decide if a particular kind of +// server should be accepted into a given area. +// +// On client agents it's pretty easy: clients only participate in the standard +// LAN, so we only accept servers from the LAN. +// +// On server agents it's a little less obvious. This resolver is ultimately +// used to have servers dial other servers. If a server is going to cross +// between datacenters (using traditional federation) then we want to use the +// WAN addresses for them, but if a server is going to dial a sibling server in +// the same datacenter we want it to use the LAN addresses always. To achieve +// that here we simply never allow WAN servers for our current datacenter to be +// added into the resolver, letting only the LAN instances through. func (s *ServerResolverBuilder) shouldIgnoreServer(areaID types.AreaID, server *metadata.Server) bool { if s.cfg.AgentType == "client" && areaID != types.AreaLAN { return true From b4dcecac7f0c9d4ca226e2c31ccdc35068ad27b7 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Wed, 10 May 2023 11:54:55 -0500 Subject: [PATCH 6/6] simplify fix --- agent/grpc-internal/resolver/resolver.go | 32 ++---------------------- 1 file changed, 2 insertions(+), 30 deletions(-) diff --git a/agent/grpc-internal/resolver/resolver.go b/agent/grpc-internal/resolver/resolver.go index 64ea70881ab..9e4f73ff67d 100644 --- a/agent/grpc-internal/resolver/resolver.go +++ b/agent/grpc-internal/resolver/resolver.go @@ -179,10 +179,6 @@ func (s *ServerResolverBuilder) AddServer(areaID types.AreaID, server *metadata. areaServers[uniqueID(server)] = server - if areaID == types.AreaLAN || s.cfg.Datacenter == server.Datacenter { - s.leaderResolver.updateClientConn() - } - addrs := s.getDCAddrs(server.Datacenter) for _, resolver := range s.resolvers { if resolver.datacenter == server.Datacenter { @@ -225,10 +221,6 @@ func (s *ServerResolverBuilder) RemoveServer(areaID types.AreaID, server *metada delete(s.servers, areaID) } - if areaID == types.AreaLAN || s.cfg.Datacenter == server.Datacenter { - s.leaderResolver.updateClientConn() - } - addrs := s.getDCAddrs(server.Datacenter) for _, resolver := range s.resolvers { if resolver.datacenter == server.Datacenter { @@ -305,28 +297,8 @@ func (s *ServerResolverBuilder) UpdateLeaderAddr(datacenter, addr string) { s.lock.Lock() defer s.lock.Unlock() - lanAddr := DCPrefix(datacenter, addr) - - s.leaderResolver.globalAddr = lanAddr - - if s.lanHasAddrLocked(lanAddr) { - s.leaderResolver.updateClientConn() - } -} - -func (s *ServerResolverBuilder) lanHasAddrLocked(lanAddr string) bool { - areaServers, ok := s.servers[types.AreaLAN] - if !ok { - return false - } - - for _, server := range areaServers { - if DCPrefix(server.Datacenter, server.Addr.String()) == lanAddr { - return true - } - } - - return false + s.leaderResolver.globalAddr = DCPrefix(datacenter, addr) + s.leaderResolver.updateClientConn() } // serverResolver is a grpc Resolver that will keep a grpc.ClientConn up to date