From 9eb5fd38cba07a86c7c830c8b7f03b4be41e6285 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Fri, 11 Aug 2023 12:34:30 -0600 Subject: [PATCH 01/11] bimapper: allow to untrack links and support reference or id Also, add more test coverage --- .../resource/mappers/bimapper/bimapper.go | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/internal/resource/mappers/bimapper/bimapper.go b/internal/resource/mappers/bimapper/bimapper.go index b1738130673..fd47f0beedc 100644 --- a/internal/resource/mappers/bimapper/bimapper.go +++ b/internal/resource/mappers/bimapper/bimapper.go @@ -215,6 +215,30 @@ func (m *Mapper) LinkIDsForItem(item *pbresource.ID) []*pbresource.ID { return out } +// LinkIDsForItem returns IDs to links related to the requested item. +func (m *Mapper) LinkIDsForItem(item *pbresource.ID) []*pbresource.ID { + if !resource.EqualType(item.Type, m.itemType) { + panic(fmt.Sprintf("expected item type %q got %q", + resource.TypeToString(m.itemType), + resource.TypeToString(item.Type), + )) + } + + m.lock.Lock() + defer m.lock.Unlock() + + links, ok := m.itemToLink[resource.NewReferenceKey(item)] + if !ok { + return nil + } + + out := make([]*pbresource.ID, 0, len(links)) + for l := range links { + out = append(out, l.ToID()) + } + return out +} + // ItemsForLink returns item ids for items related to the provided link. // Deprecated: use ItemIDsForLink func (m *Mapper) ItemsForLink(link *pbresource.ID) []*pbresource.ID { From 86beeae8103bc8f1bb5b40682335c6d75f0c2510 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Sun, 23 Jul 2023 13:18:35 -0600 Subject: [PATCH 02/11] mesh-controller: handle L4 protocols for a proxy without upstreams --- agent/consul/server.go | 3 + .../controllers/endpoints/controller.go | 2 +- .../controllers/endpoints/controller_test.go | 31 +++ internal/mesh/exports.go | 2 +- .../controllers/mesh/builder/builder.go | 169 ++++++++++++ .../controllers/mesh/builder/builder_test.go | 238 ++++++++++++++++ .../internal/controllers/mesh/controller.go | 159 +++++++++++ .../controllers/mesh/controller_test.go | 255 ++++++++++++++++++ .../mesh/mappers/service_endpoints.go | 40 +++ .../mesh/mappers/service_endpoints_test.go | 44 +++ internal/resource/resourcetest/builder.go | 2 +- 11 files changed, 942 insertions(+), 3 deletions(-) create mode 100644 internal/mesh/internal/controllers/mesh/builder/builder.go create mode 100644 internal/mesh/internal/controllers/mesh/builder/builder_test.go create mode 100644 internal/mesh/internal/controllers/mesh/controller.go create mode 100644 internal/mesh/internal/controllers/mesh/controller_test.go create mode 100644 internal/mesh/internal/controllers/mesh/mappers/service_endpoints.go create mode 100644 internal/mesh/internal/controllers/mesh/mappers/service_endpoints_test.go diff --git a/agent/consul/server.go b/agent/consul/server.go index eccd758d18d..c99e1cf21ed 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -19,6 +19,9 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/consul/internal/mesh" + "github.com/hashicorp/consul/internal/resource" + "github.com/armon/go-metrics" "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/go-connlimit" diff --git a/internal/catalog/internal/controllers/endpoints/controller.go b/internal/catalog/internal/controllers/endpoints/controller.go index af10d40d3ba..641e69086e4 100644 --- a/internal/catalog/internal/controllers/endpoints/controller.go +++ b/internal/catalog/internal/controllers/endpoints/controller.go @@ -117,7 +117,7 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle // cause this service to be rereconciled. r.workloadMap.TrackIDForSelector(req.ID, serviceData.service.GetWorkloads()) - // Now read and umarshal all workloads selected by the service. It is imperative + // Now read and unmarshal all workloads selected by the service. It is imperative // that this happens after we notify the selection tracker to be tracking that // selection criteria. If the order were reversed we could potentially miss // workload creations that should be selected if they happen after gathering diff --git a/internal/catalog/internal/controllers/endpoints/controller_test.go b/internal/catalog/internal/controllers/endpoints/controller_test.go index 646bb288d35..8d9b864ca36 100644 --- a/internal/catalog/internal/controllers/endpoints/controller_test.go +++ b/internal/catalog/internal/controllers/endpoints/controller_test.go @@ -611,6 +611,7 @@ func (suite *controllerSuite) TestController() { Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}}, Ports: map[string]*pbcatalog.WorkloadPort{ "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + "grpc": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, }, Identity: "api", }). @@ -683,6 +684,36 @@ func (suite *controllerSuite) TestController() { // Verify that the endpoints were not regenerated suite.client.RequireVersionUnchanged(suite.T(), endpointsID, endpoints.Version) + // Update the service. + updatedService := rtest.Resource(types.ServiceType, "api"). + WithData(suite.T(), &pbcatalog.Service{ + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{"api-"}, + }, + Ports: []*pbcatalog.ServicePort{ + {TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + {TargetPort: "grpc", Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, + }, + }). + Write(suite.T(), suite.client) + + // Wait for the endpoints to be regenerated + endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version) + rtest.RequireOwner(suite.T(), endpoints, updatedService.Id, false) + + // ensure the endpoint was put into the passing state + suite.requireEndpoints(endpoints, &pbcatalog.Endpoint{ + TargetRef: workload.Id, + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "127.0.0.1", Ports: []string{"grpc", "http"}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + "grpc": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, + }, + HealthStatus: pbcatalog.Health_HEALTH_PASSING, + }) + // Delete the endpoints. The controller should bring these back momentarily suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: endpointsID}) diff --git a/internal/mesh/exports.go b/internal/mesh/exports.go index 6a6f97221f3..5df51439d16 100644 --- a/internal/mesh/exports.go +++ b/internal/mesh/exports.go @@ -54,7 +54,7 @@ var ( ComputedRoutesType = types.ComputedRoutesType ) -// RegisterTypes adds all resource types within the "catalog" API group +// RegisterTypes adds all resource types within the "mesh" API group // to the given type registry func RegisterTypes(r resource.Registry) { types.Register(r) diff --git a/internal/mesh/internal/controllers/mesh/builder/builder.go b/internal/mesh/internal/controllers/mesh/builder/builder.go new file mode 100644 index 00000000000..b367ef9f84f --- /dev/null +++ b/internal/mesh/internal/controllers/mesh/builder/builder.go @@ -0,0 +1,169 @@ +package builder + +import ( + "fmt" + + "github.com/hashicorp/consul/envoyextensions/xdscommon" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +type Builder struct { + id *pbresource.ID + proxyStateTemplate *pbmesh.ProxyStateTemplate + + lastBuiltListener lastListenerData +} + +type lastListenerData struct { + index int +} + +func New(id *pbresource.ID, identity *pbresource.Reference) *Builder { + return &Builder{ + id: id, + proxyStateTemplate: &pbmesh.ProxyStateTemplate{ + ProxyState: &pbmesh.ProxyState{ + Identity: identity, + Clusters: make(map[string]*pbmesh.Cluster), + Endpoints: make(map[string]*pbmesh.Endpoints), + }, + RequiredEndpoints: make(map[string]*pbmesh.EndpointRef), + RequiredLeafCertificates: make(map[string]*pbmesh.LeafCertificateRef), + RequiredTrustBundles: make(map[string]*pbmesh.TrustBundleRef), + }, + } +} + +func (b *Builder) Build() *pbmesh.ProxyStateTemplate { + b.lastBuiltListener = lastListenerData{} + return b.proxyStateTemplate +} + +func (b *Builder) AddInboundListener(name string, workload *pbcatalog.Workload) *Builder { + listener := &pbmesh.Listener{ + Name: name, + Direction: pbmesh.Direction_DIRECTION_INBOUND, + } + + // We will take listener bind port from the workload for now. + // Find mesh port. + var meshPort string + for portName, port := range workload.Ports { + if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { + meshPort = portName + break + } + } + + // Check if the workload has a specific address for the mesh port. + var meshAddress string + for _, address := range workload.Addresses { + for _, port := range address.Ports { + if port == meshPort { + meshAddress = address.Host + } + } + } + // Otherwise, assume the first address in the addresses list. + if meshAddress == "" { + // It is safe to assume that there's at least one address because we validate it when creating the workload. + meshAddress = workload.Addresses[0].Host + } + + listener.BindAddress = &pbmesh.Listener_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: meshAddress, + Port: workload.Ports[meshPort].Port, + }, + } + + // Track the last added listener. + b.lastBuiltListener.index = len(b.proxyStateTemplate.ProxyState.Listeners) + // Add listener to proxy state template + b.proxyStateTemplate.ProxyState.Listeners = append(b.proxyStateTemplate.ProxyState.Listeners, listener) + + return b +} + +func (b *Builder) AddInboundRouters(workload *pbcatalog.Workload) *Builder { + listener := b.proxyStateTemplate.ProxyState.Listeners[b.lastBuiltListener.index] + + // Go through workload ports and add the first non-mesh port we see. + // todo (ishustava): Note we will need to support multiple ports in the future. + // todo (ishustava): make sure we always iterate through ports in the same order so we don't need to send more updates to envoy. + for portName, port := range workload.Ports { + clusterName := fmt.Sprintf("%s:%s", xdscommon.LocalAppClusterName, portName) + if port.Protocol == pbcatalog.Protocol_PROTOCOL_TCP { + r := &pbmesh.Router{ + Destination: &pbmesh.Router_L4{ + L4: &pbmesh.L4Destination{ + Name: clusterName, + StatPrefix: listener.Name, + }, + }, + } + listener.Routers = append(listener.Routers, r) + + // Make cluster for this router destination. + b.proxyStateTemplate.ProxyState.Clusters[clusterName] = &pbmesh.Cluster{ + Group: &pbmesh.Cluster_EndpointGroup{ + EndpointGroup: &pbmesh.EndpointGroup{ + Group: &pbmesh.EndpointGroup_Static{ + Static: &pbmesh.StaticEndpointGroup{ + Name: clusterName, + }, + }, + }, + }, + } + + // Finally, add static endpoints. We're adding it statically as opposed to creating an endpoint ref + // because this endpoint is less likely to change as we're not tracking the health. + endpoint := &pbmesh.Endpoint{ + Address: &pbmesh.Endpoint_HostPort{ + HostPort: &pbmesh.HostPortAddress{ + Host: "127.0.0.1", + Port: port.Port, + }, + }, + } + b.proxyStateTemplate.ProxyState.Endpoints[clusterName] = &pbmesh.Endpoints{ + Name: clusterName, + Endpoints: []*pbmesh.Endpoint{endpoint}, + } + break + } + } + return b +} + +func (b *Builder) AddInboundTLS() *Builder { + listener := b.proxyStateTemplate.ProxyState.Listeners[b.lastBuiltListener.index] + // For inbound TLS, we want to use this proxy's identity. + workloadIdentity := b.proxyStateTemplate.ProxyState.Identity.Name + + inboundTLS := &pbmesh.TransportSocket{ + ConnectionTls: &pbmesh.TransportSocket_InboundMesh{ + InboundMesh: &pbmesh.InboundMeshMTLS{ + IdentityKey: workloadIdentity, + ValidationContext: &pbmesh.MeshInboundValidationContext{TrustBundlePeerNameKeys: []string{b.id.Tenancy.PeerName}}, + }, + }, + } + b.proxyStateTemplate.RequiredLeafCertificates[workloadIdentity] = &pbmesh.LeafCertificateRef{ + Name: workloadIdentity, + Namespace: b.id.Tenancy.Namespace, + Partition: b.id.Tenancy.Partition, + } + + b.proxyStateTemplate.RequiredTrustBundles[b.id.Tenancy.PeerName] = &pbmesh.TrustBundleRef{ + Peer: b.id.Tenancy.PeerName, + } + + for i := range listener.Routers { + listener.Routers[i].InboundTls = inboundTLS + } + return b +} diff --git a/internal/mesh/internal/controllers/mesh/builder/builder_test.go b/internal/mesh/internal/controllers/mesh/builder/builder_test.go new file mode 100644 index 00000000000..1f0ee988db9 --- /dev/null +++ b/internal/mesh/internal/controllers/mesh/builder/builder_test.go @@ -0,0 +1,238 @@ +package builder + +import ( + "testing" + + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/stretchr/testify/require" +) + +func TestAddInboundListener(t *testing.T) { + listenerName := "test-listener" + + cases := map[string]struct { + workload *pbcatalog.Workload + expListener *pbmesh.Listener + }{ + "single workload address without ports": { + workload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + }, + expListener: &pbmesh.Listener{ + Name: listenerName, + Direction: pbmesh.Direction_DIRECTION_INBOUND, + BindAddress: &pbmesh.Listener_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "10.0.0.1", + Port: 20000, + }, + }, + }, + }, + "multiple workload addresses without ports: prefer first address": { + workload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + { + Host: "10.0.0.2", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + }, + expListener: &pbmesh.Listener{ + Name: listenerName, + Direction: pbmesh.Direction_DIRECTION_INBOUND, + BindAddress: &pbmesh.Listener_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "10.0.0.1", + Port: 20000, + }, + }, + }, + }, + "multiple workload addresses with specific ports": { + workload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "127.0.0.1", + Ports: []string{"port1"}, + }, + { + Host: "10.0.0.2", + Ports: []string{"port2"}, + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + }, + expListener: &pbmesh.Listener{ + Name: listenerName, + Direction: pbmesh.Direction_DIRECTION_INBOUND, + BindAddress: &pbmesh.Listener_IpPort{ + IpPort: &pbmesh.IPPortAddress{ + Ip: "10.0.0.2", + Port: 20000, + }, + }, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + proxyStateTemplateID := testProxyStateTemplateID() + + proxyStateTemplate := New(proxyStateTemplateID, testIdentityRef()).AddInboundListener(listenerName, c.workload).Build() + require.Len(t, proxyStateTemplate.ProxyState.Listeners, 1) + prototest.AssertDeepEqual(t, c.expListener, proxyStateTemplate.ProxyState.Listeners[0]) + }) + } +} + +func TestAddInboundRouters(t *testing.T) { + workload := testWorkload() + + // Create new builder + builder := New(testProxyStateTemplateID(), testIdentityRef()). + AddInboundListener("test-listener", workload). + AddInboundRouters(workload) + + clusterName := "local_app:port1" + expRouters := []*pbmesh.Router{ + { + Destination: &pbmesh.Router_L4{ + L4: &pbmesh.L4Destination{ + Name: clusterName, + StatPrefix: "test-listener", + }, + }, + }, + } + expCluster := &pbmesh.Cluster{ + Group: &pbmesh.Cluster_EndpointGroup{ + EndpointGroup: &pbmesh.EndpointGroup{ + Group: &pbmesh.EndpointGroup_Static{ + Static: &pbmesh.StaticEndpointGroup{ + Name: clusterName, + }, + }, + }, + }, + } + + expEndpoints := &pbmesh.Endpoints{ + Name: clusterName, + Endpoints: []*pbmesh.Endpoint{ + { + Address: &pbmesh.Endpoint_HostPort{ + HostPort: &pbmesh.HostPortAddress{ + Host: "127.0.0.1", + Port: 8080, + }, + }, + }, + }, + } + + proxyStateTemplate := builder.Build() + + // Check routers. + require.Len(t, proxyStateTemplate.ProxyState.Listeners, 1) + prototest.AssertDeepEqual(t, expRouters, proxyStateTemplate.ProxyState.Listeners[0].Routers) + + // Check that the cluster exists in the clusters map. + prototest.AssertDeepEqual(t, expCluster, proxyStateTemplate.ProxyState.Clusters[clusterName]) + + // Check that the endpoints exist in the endpoint map for this cluster name. + prototest.AssertDeepEqual(t, expEndpoints, proxyStateTemplate.ProxyState.Endpoints[clusterName]) +} + +func TestAddInboundTLS(t *testing.T) { + id := testProxyStateTemplateID() + workload := testWorkload() + + proxyStateTemplate := New(id, testIdentityRef()). + AddInboundListener("test-listener", workload). + AddInboundRouters(workload). + AddInboundTLS(). + Build() + + expTransportSocket := &pbmesh.TransportSocket{ + ConnectionTls: &pbmesh.TransportSocket_InboundMesh{ + InboundMesh: &pbmesh.InboundMeshMTLS{ + IdentityKey: workload.Identity, + ValidationContext: &pbmesh.MeshInboundValidationContext{ + TrustBundlePeerNameKeys: []string{id.Tenancy.PeerName}}, + }, + }, + } + expLeafCertRef := &pbmesh.LeafCertificateRef{ + Name: workload.Identity, + Namespace: id.Tenancy.Namespace, + Partition: id.Tenancy.Partition, + } + + require.Len(t, proxyStateTemplate.ProxyState.Listeners, 1) + // Check that each router has the same TLS configuration. + for _, router := range proxyStateTemplate.ProxyState.Listeners[0].Routers { + prototest.AssertDeepEqual(t, expTransportSocket, router.InboundTls) + } + + // Check that there's a leaf cert ref added to the map. + prototest.AssertDeepEqual(t, expLeafCertRef, proxyStateTemplate.RequiredLeafCertificates[workload.Identity]) + + // Check that there's trust bundle name added to the trust bundles names. + _, ok := proxyStateTemplate.RequiredTrustBundles[id.Tenancy.PeerName] + require.True(t, ok) +} + +func testProxyStateTemplateID() *pbresource.ID { + return resourcetest.Resource(types.ProxyStateTemplateType, "test").ID() +} + +func testIdentityRef() *pbresource.Reference { + return &pbresource.Reference{ + Name: "test-identity", + Tenancy: &pbresource.Tenancy{ + Namespace: "default", + Partition: "default", + PeerName: "local", + }, + } +} + +func testWorkload() *pbcatalog.Workload { + return &pbcatalog.Workload{ + Identity: "test-identity", + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "port2": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + "port3": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + } +} diff --git a/internal/mesh/internal/controllers/mesh/controller.go b/internal/mesh/internal/controllers/mesh/controller.go new file mode 100644 index 00000000000..84a7b6436b7 --- /dev/null +++ b/internal/mesh/internal/controllers/mesh/controller.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mesh + +import ( + "context" + + "github.com/hashicorp/consul/envoyextensions/xdscommon" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/mesh/builder" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/mesh/mappers" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +// ControllerName is the name for this controller. It's used for logging or status keys. +const ControllerName = "consul.io/mesh-controller" + +func Controller() controller.Controller { + return controller.ForType(types.ProxyStateTemplateType). + WithWatch(catalog.ServiceEndpointsType, mappers.MapServiceEndpointsToProxyStateTemplate). + WithReconciler(&reconciler{}) +} + +type reconciler struct{} + +func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error { + rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", ControllerName) + + rt.Logger.Trace("reconciling proxy state template", "id", req.ID) + + // Check if the workload exists. + workloadID := workloadIDFromProxyStateTemplate(req.ID) + rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: workloadID}) + + switch { + case status.Code(err) == codes.NotFound: + // If workload has been deleted, then return as ProxyStateTemplate should be cleaned up + // by the garbage collector because of the owner reference. + rt.Logger.Trace("workload doesn't exist; skipping reconciliation", "workload", workloadID) + return nil + case err != nil: + rt.Logger.Error("error reading the associated workload", "error", err) + return err + } + + // Parse the workload data for this proxy. Note that we know that this workload has a service associated with it + // because we only trigger updates off of service endpoints. + workloadRes := rsp.Resource + var workload pbcatalog.Workload + err = workloadRes.Data.UnmarshalTo(&workload) + if err != nil { + rt.Logger.Error("error parsing workload data", "workload", workloadRes.Id) + return resource.NewErrDataParse(&workload, err) + } + + rsp, err = rt.Client.Read(ctx, &pbresource.ReadRequest{Id: req.ID}) + var buildNew bool + switch { + case status.Code(err) == codes.NotFound: + // Nothing to do as this resource may not have been created yet. + rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one", "id", req.ID) + buildNew = true + case err != nil: + rt.Logger.Error("error reading proxy state template", "error", err) + return nil + } + + if !isMeshEnabled(workload.Ports) { + // Skip non-mesh workloads. + + // If there's existing proxy state template, delete it. + if !buildNew { + rt.Logger.Trace("deleting existing proxy state template because workload is no longer on the mesh", "id", req.ID) + _, err = rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: req.ID}) + if err != nil { + rt.Logger.Error("error deleting existing proxy state template", "error", err) + return err + } + } + rt.Logger.Trace("skipping proxy state template generation because workload is not on the mesh", "workload", workloadRes.Id) + return nil + } + + var proxyTemplate pbmesh.ProxyStateTemplate + if !buildNew { + err = rsp.Resource.Data.UnmarshalTo(&proxyTemplate) + if err != nil { + rt.Logger.Error("error parsing proxy state template data", "id", req.ID) + return resource.NewErrDataParse(&proxyTemplate, err) + } + } + + b := builder.New(req.ID, workloadIdentityRefFromWorkload(workloadRes.Id)). + AddInboundListener(xdscommon.PublicListenerName, &workload). + AddInboundRouters(&workload). + AddInboundTLS() + + newProxyTemplate := b.Build() + + same := proto.Equal(&proxyTemplate, newProxyTemplate) + if buildNew || !same { + proxyTemplateData, err := anypb.New(newProxyTemplate) + if err != nil { + rt.Logger.Error("error creating proxy state template data", "error", err) + return err + } + rt.Logger.Trace("updating proxy state template", "id", req.ID) + _, err = rt.Client.Write(ctx, &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: req.ID, + Owner: workloadRes.Id, + Data: proxyTemplateData, + }, + }) + if err != nil { + rt.Logger.Error("error writing proxy state template", "error", err) + return err + } + } else { + rt.Logger.Trace("proxy state template data has not changed, skipping update", "id", req.ID) + } + return nil +} + +func workloadIDFromProxyStateTemplate(id *pbresource.ID) *pbresource.ID { + return &pbresource.ID{ + Name: id.Name, + Tenancy: id.Tenancy, + Type: catalog.WorkloadType, + } +} + +func workloadIdentityRefFromWorkload(id *pbresource.ID) *pbresource.Reference { + return &pbresource.Reference{ + Name: id.Name, + Tenancy: id.Tenancy, + } +} + +// isMeshEnabled returns true if workload or service endpoints port +// contain a port with the "mesh" protocol. +func isMeshEnabled(ports map[string]*pbcatalog.WorkloadPort) bool { + for _, port := range ports { + if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { + return true + } + } + return false +} diff --git a/internal/mesh/internal/controllers/mesh/controller_test.go b/internal/mesh/internal/controllers/mesh/controller_test.go new file mode 100644 index 00000000000..cd5f82e3fbc --- /dev/null +++ b/internal/mesh/internal/controllers/mesh/controller_test.go @@ -0,0 +1,255 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mesh + +import ( + "context" + "testing" + + svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" + "github.com/hashicorp/consul/envoyextensions/xdscommon" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/mesh/builder" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type meshControllerTestSuite struct { + suite.Suite + + client *resourcetest.Client + runtime controller.Runtime + + ctl reconciler + ctx context.Context + + workloadID *pbresource.ID + workload *pbcatalog.Workload + proxyStateTemplate *pbmesh.ProxyStateTemplate +} + +func (suite *meshControllerTestSuite) SetupTest() { + resourceClient := svctest.RunResourceService(suite.T(), types.Register, catalog.RegisterTypes) + suite.client = resourcetest.NewClient(resourceClient) + suite.runtime = controller.Runtime{Client: resourceClient, Logger: testutil.Logger(suite.T())} + suite.ctx = testutil.TestContext(suite.T()) + + suite.workload = &pbcatalog.Workload{ + Identity: "test-identity", + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + } + + suite.workloadID = resourcetest.Resource(catalog.WorkloadType, "test-workload"). + WithData(suite.T(), suite.workload). + Write(suite.T(), resourceClient).Id + + identityRef := &pbresource.Reference{ + Name: suite.workload.Identity, + Tenancy: suite.workloadID.Tenancy, + } + + suite.proxyStateTemplate = builder.New(suite.workloadID, identityRef). + AddInboundListener(xdscommon.PublicListenerName, suite.workload). + AddInboundRouters(suite.workload). + AddInboundTLS(). + Build() +} + +func (suite *meshControllerTestSuite) TestReconcile_NoWorkload() { + // This test ensures that removed workloads are ignored and don't result + // in the creation of the proxy state template. + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, "not-found"), + }) + require.NoError(suite.T(), err) + + suite.client.RequireResourceNotFound(suite.T(), resourceID(types.ProxyStateTemplateType, "not-found")) +} + +func (suite *meshControllerTestSuite) TestReconcile_NonMeshWorkload() { + nonMeshWorkload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + }, + } + + resourcetest.Resource(catalog.WorkloadType, "test-non-mesh-workload"). + WithData(suite.T(), nonMeshWorkload). + Write(suite.T(), suite.client.ResourceServiceClient) + + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, "test-non-mesh-workload"), + }) + require.NoError(suite.T(), err) + + suite.client.RequireResourceNotFound(suite.T(), resourceID(types.ProxyStateTemplateType, "test-non-mesh-workload")) +} + +func (suite *meshControllerTestSuite) TestReconcile_NoExistingProxyStateTemplate() { + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, suite.workloadID.Name), + }) + require.NoError(suite.T(), err) + + res := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, suite.workloadID.Name)) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), res.Data) + prototest.AssertDeepEqual(suite.T(), suite.workloadID, res.Owner) +} + +func (suite *meshControllerTestSuite) TestReconcile_ExistingProxyStateTemplate_WithUpdates() { + // Write the original. + resourcetest.Resource(types.ProxyStateTemplateType, "test-workload"). + WithData(suite.T(), suite.proxyStateTemplate). + WithOwner(suite.workloadID). + Write(suite.T(), suite.client.ResourceServiceClient) + + // Update the workload. + suite.workload.Ports["mesh"].Port = 21000 + updatedWorkloadID := resourcetest.Resource(catalog.WorkloadType, "test-workload"). + WithData(suite.T(), suite.workload). + Write(suite.T(), suite.client.ResourceServiceClient).Id + + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name), + }) + require.NoError(suite.T(), err) + + res := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name)) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), res.Data) + prototest.AssertDeepEqual(suite.T(), updatedWorkloadID, res.Owner) + + var updatedProxyStateTemplate pbmesh.ProxyStateTemplate + err = res.Data.UnmarshalTo(&updatedProxyStateTemplate) + require.NoError(suite.T(), err) + + // Check that our value is updated. + inboundListenerPort := updatedProxyStateTemplate.ProxyState.Listeners[0].BindAddress.(*pbmesh.Listener_IpPort).IpPort.Port + require.Equal(suite.T(), uint32(21000), inboundListenerPort) +} + +func (suite *meshControllerTestSuite) TestReconcile_ExistingProxyStateTemplate_NoUpdates() { + // Write the original + originalProxyState := resourcetest.Resource(types.ProxyStateTemplateType, "test-workload"). + WithData(suite.T(), suite.proxyStateTemplate). + WithOwner(suite.workloadID). + Write(suite.T(), suite.client.ResourceServiceClient) + + // Update the metadata on the workload which should result in no changes. + updatedWorkloadID := resourcetest.Resource(catalog.WorkloadType, "test-workload"). + WithData(suite.T(), suite.workload). + WithMeta("some", "meta"). + Write(suite.T(), suite.client.ResourceServiceClient).Id + + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name), + }) + require.NoError(suite.T(), err) + + updatedProxyState := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, suite.workloadID.Name)) + resourcetest.RequireVersionUnchanged(suite.T(), updatedProxyState, originalProxyState.Version) +} + +// delete the workload, check that proxy state gets deleted (?can we check that?) +func (suite *meshControllerTestSuite) TestController() { + // Run the controller manager + mgr := controller.NewManager(suite.client, suite.runtime.Logger) + mgr.Register(Controller()) + mgr.SetRaftLeader(true) + go mgr.Run(suite.ctx) + + proxyStateTemplateID := resourcetest.Resource(types.ProxyStateTemplateType, "test-workload").ID() + // Add a mesh workload and check that it gets reconciled. + resourcetest.Resource(catalog.WorkloadType, "test-workload"). + WithData(suite.T(), suite.workload). + Write(suite.T(), suite.client.ResourceServiceClient) + + resourcetest.Resource(catalog.ServiceType, "test-service"). + WithData(suite.T(), &pbcatalog.Service{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"test-workload"}}, + Ports: []*pbcatalog.ServicePort{ + {TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + }}). + Write(suite.T(), suite.client.ResourceServiceClient) + + endpoints := &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + TargetRef: suite.workloadID, + Addresses: suite.workload.Addresses, + Ports: suite.workload.Ports, + }, + }, + } + resourcetest.Resource(catalog.ServiceEndpointsType, "test-service"). + WithData(suite.T(), endpoints). + Write(suite.T(), suite.client.ResourceServiceClient) + + // Check that proxy state template resource is generated. + var proxyStateTmpl *pbresource.Resource + retry.Run(suite.T(), func(r *retry.R) { + proxyStateTmpl = suite.client.RequireResourceExists(r, proxyStateTemplateID) + }) + + // Delete the proxy state template resource and check that it gets regenerated. + _, err := suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: proxyStateTemplateID}) + require.NoError(suite.T(), err) + + suite.client.WaitForNewVersion(suite.T(), proxyStateTemplateID, proxyStateTmpl.Version) + + // Update workload and service endpoints to not be on the mesh anymore + // and check that the proxy state template is deleted. + delete(suite.workload.Ports, "mesh") + resourcetest.Resource(catalog.WorkloadType, "test-workload"). + WithData(suite.T(), suite.workload). + Write(suite.T(), suite.client.ResourceServiceClient) + + delete(endpoints.Endpoints[0].Ports, "mesh") + resourcetest.Resource(catalog.ServiceEndpointsType, "test-service"). + WithData(suite.T(), endpoints). + Write(suite.T(), suite.client.ResourceServiceClient) + + retry.Run(suite.T(), func(r *retry.R) { + suite.client.RequireResourceNotFound(r, proxyStateTemplateID) + }) +} + +func TestMeshController(t *testing.T) { + suite.Run(t, new(meshControllerTestSuite)) +} + +func resourceID(rtype *pbresource.Type, name string) *pbresource.ID { + return &pbresource.ID{ + Type: rtype, + Tenancy: &pbresource.Tenancy{ + Partition: "default", + Namespace: "default", + PeerName: "local", + }, + Name: name, + } +} diff --git a/internal/mesh/internal/controllers/mesh/mappers/service_endpoints.go b/internal/mesh/internal/controllers/mesh/mappers/service_endpoints.go new file mode 100644 index 00000000000..9453691532b --- /dev/null +++ b/internal/mesh/internal/controllers/mesh/mappers/service_endpoints.go @@ -0,0 +1,40 @@ +package mappers + +import ( + "context" + + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/types" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// MapServiceEndpointsToProxyStateTemplate maps catalog.ServiceEndpoints objects to the IDs of +// ProxyStateTemplate. +// For a downstream proxy, we only need to generate requests from workloads this endpoints points to +// If this service endpoints is an upstream for some proxies, we need to generate requests for those proxies as well. +// so we need to have a map from service endpoints to downstream proxy Ids +func MapServiceEndpointsToProxyStateTemplate(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { + // This mapper needs to look up workload IDs from service endpoints and replace them with proxystatetemplatetype. + var serviceEndpoints pbcatalog.ServiceEndpoints + err := res.Data.UnmarshalTo(&serviceEndpoints) + if err != nil { + return nil, err + } + + var result []controller.Request + + for _, endpoint := range serviceEndpoints.Endpoints { + // Convert the reference to a workload to a ProxyStateTemplate ID. + // Because these resources are name and tenancy aligned, we only need to change the type. + result = append(result, controller.Request{ + ID: &pbresource.ID{ + Name: endpoint.TargetRef.Name, + Tenancy: endpoint.TargetRef.Tenancy, + Type: types.ProxyStateTemplateType, + }, + }) + } + + return result, err +} diff --git a/internal/mesh/internal/controllers/mesh/mappers/service_endpoints_test.go b/internal/mesh/internal/controllers/mesh/mappers/service_endpoints_test.go new file mode 100644 index 00000000000..03e73150e9f --- /dev/null +++ b/internal/mesh/internal/controllers/mesh/mappers/service_endpoints_test.go @@ -0,0 +1,44 @@ +package mappers + +import ( + "context" + "testing" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + "github.com/stretchr/testify/require" +) + +func TestMapServiceEndpointsToProxyStateTemplate(t *testing.T) { + workload1 := resourcetest.Resource(catalog.WorkloadType, "workload-1").Build() + workload2 := resourcetest.Resource(catalog.WorkloadType, "workload-2").Build() + serviceEndpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "service"). + WithData(t, &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + TargetRef: workload1.Id, + }, + { + TargetRef: workload2.Id, + }, + }, + }).Build() + proxyTmpl1ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-1").ID() + proxyTmpl2ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-2").ID() + + expRequests := []controller.Request{ + { + ID: proxyTmpl1ID, + }, + { + ID: proxyTmpl2ID, + }, + } + + requests, err := MapServiceEndpointsToProxyStateTemplate(context.Background(), controller.Runtime{}, serviceEndpoints) + require.NoError(t, err) + require.ElementsMatch(t, expRequests, requests) +} diff --git a/internal/resource/resourcetest/builder.go b/internal/resource/resourcetest/builder.go index 1256b961c7e..2e0f5991e28 100644 --- a/internal/resource/resourcetest/builder.go +++ b/internal/resource/resourcetest/builder.go @@ -144,7 +144,7 @@ func (b *resourceBuilder) Write(t T, client pbresource.ResourceServiceClient) *p Resource: res, }) - if err == nil || res.Id.Uid != "" || status.Code(err) != codes.FailedPrecondition { + if err == nil || res.Id.Uid != "" || status.Code(err) == codes.FailedPrecondition { if err != nil { t.Logf("write saw error: %v", err) } From 89c106e2434fe0cc06b30dbc07879305de794189 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Fri, 28 Jul 2023 12:35:21 -0600 Subject: [PATCH 03/11] sidecar-controller: Support explicit destinations for L4 protocols and single ports. * This controller generates and saves ProxyStateTemplate for sidecar proxies. * It currently supports single-port L4 ports only. * It keeps a cache of all destinations to make it easier to compute and retrieve destinations. * It will update the status of the pbmesh.Upstreams resource if anything is invalid. --- agent/connect/uri_service.go | 31 + agent/consul/server.go | 16 + .../controllers/endpoints/controller.go | 4 + internal/mesh/exports.go | 40 +- .../controllers/mesh/builder/builder.go | 169 ------ .../controllers/mesh/builder/builder_test.go | 238 -------- .../internal/controllers/mesh/controller.go | 159 ----- .../controllers/mesh/controller_test.go | 255 -------- .../mesh/mappers/service_endpoints.go | 40 -- .../mesh/mappers/service_endpoints_test.go | 44 -- .../mesh/internal/controllers/register.go | 7 + .../sidecar-proxy/builder/builder.go | 42 ++ .../sidecar-proxy/builder/builder_test.go | 47 ++ .../builder/destination_builder.go | 132 +++++ .../builder/destination_builder_test.go | 106 ++++ .../sidecar-proxy/builder/local_app.go | 139 +++++ .../sidecar-proxy/builder/local_app_test.go | 91 +++ .../sidecar-proxy/builder/naming.go | 33 ++ .../testdata/l4-multi-destination.golden | 122 ++++ ...kload-addresses-with-specific-ports.golden | 71 +++ ...le-workload-addresses-without-ports.golden | 71 +++ ...le-destination-ip-port-bind-address.golden | 70 +++ ...estination-unix-socket-bind-address.golden | 70 +++ ...ngle-workload-address-without-ports.golden | 71 +++ .../controllers/sidecar-proxy/cache/cache.go | 144 +++++ .../sidecar-proxy/cache/cache_test.go | 168 ++++++ .../controllers/sidecar-proxy/controller.go | 168 ++++++ .../sidecar-proxy/controller_test.go | 370 ++++++++++++ .../sidecar-proxy/fetcher/data_fetcher.go | 244 ++++++++ .../fetcher/data_fetcher_test.go | 561 ++++++++++++++++++ .../mapper/destinations_mapper.go | 69 +++ .../mapper/destinations_mapper_test.go | 99 ++++ .../mapper/service_endpoints_mapper.go | 76 +++ .../mapper/service_endpoints_mapper_test.go | 80 +++ .../sidecar-proxy/status/status.go | 55 ++ .../mesh/internal/types/intermediate/types.go | 56 ++ internal/mesh/internal/types/upstreams.go | 2 +- internal/resource/reference.go | 8 + internal/resource/resourcetest/builder.go | 4 + .../v1alpha1/service_endpoints.pb.go | 63 +- .../v1alpha1/service_endpoints.proto | 3 + 41 files changed, 3297 insertions(+), 941 deletions(-) delete mode 100644 internal/mesh/internal/controllers/mesh/builder/builder.go delete mode 100644 internal/mesh/internal/controllers/mesh/builder/builder_test.go delete mode 100644 internal/mesh/internal/controllers/mesh/controller.go delete mode 100644 internal/mesh/internal/controllers/mesh/controller_test.go delete mode 100644 internal/mesh/internal/controllers/mesh/mappers/service_endpoints.go delete mode 100644 internal/mesh/internal/controllers/mesh/mappers/service_endpoints_test.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/builder.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/builder_test.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder_test.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/local_app_test.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/naming.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multi-destination.golden create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-workload-address-without-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/cache/cache_test.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/controller.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/controller_test.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher_test.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper_test.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper_test.go create mode 100644 internal/mesh/internal/controllers/sidecar-proxy/status/status.go create mode 100644 internal/mesh/internal/types/intermediate/types.go diff --git a/agent/connect/uri_service.go b/agent/connect/uri_service.go index 3be7cf4797a..f02310e2623 100644 --- a/agent/connect/uri_service.go +++ b/agent/connect/uri_service.go @@ -8,6 +8,7 @@ import ( "net/url" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/proto-public/pbresource" ) // SpiffeIDService is the structure to represent the SPIFFE ID for a service. @@ -52,3 +53,33 @@ func (id SpiffeIDService) uriPath() string { } return path } + +// SpiffeIDIdentity is the structure to represent the SPIFFE ID for an identity. +type SpiffeIDIdentity struct { + Host string + Partition string + Namespace string + Identity string +} + +func (id SpiffeIDIdentity) URI() *url.URL { + var result url.URL + result.Scheme = "spiffe" + result.Host = id.Host + result.Path = fmt.Sprintf("/ap/%s/ns/%s/identity/%s", + id.Partition, + id.Namespace, + id.Identity, + ) + return &result +} + +// SpiffeIDFromIdentityRef creates the SIFFE ID from an identity. +func SpiffeIDFromIdentityRef(trustDomain string, ref *pbresource.Reference) string { + return SpiffeIDIdentity{ + Host: trustDomain, + Partition: ref.Tenancy.Partition, + Namespace: ref.Tenancy.Namespace, + Identity: ref.Name, + }.URI().String() +} diff --git a/agent/consul/server.go b/agent/consul/server.go index c99e1cf21ed..50bf0f0d1c0 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -19,6 +19,7 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/internal/mesh" "github.com/hashicorp/consul/internal/resource" @@ -910,6 +911,21 @@ func (s *Server) registerControllers(deps Deps, proxyUpdater ProxyUpdater) { return &bundle, nil }, ProxyUpdater: proxyUpdater, + TrustDomainFetcher: func() (string, error) { + if s.config.CAConfig == nil || s.config.CAConfig.ClusterID == "" { + return "", fmt.Errorf("CA has not finished initializing") + } + + // Build TrustDomain based on the ClusterID stored. + signingID := connect.SpiffeIDSigningForCluster(s.config.CAConfig.ClusterID) + if signingID == nil { + // If CA is bootstrapped at all then this should never happen but be + // defensive. + return "", fmt.Errorf("no cluster trust domain setup") + } + + return signingID.Host(), nil + }, }) } diff --git a/internal/catalog/internal/controllers/endpoints/controller.go b/internal/catalog/internal/controllers/endpoints/controller.go index 641e69086e4..5a3e65d0a37 100644 --- a/internal/catalog/internal/controllers/endpoints/controller.go +++ b/internal/catalog/internal/controllers/endpoints/controller.go @@ -5,6 +5,7 @@ package endpoints import ( "context" + "fmt" "sort" "github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth" @@ -375,6 +376,9 @@ func workloadToEndpoint(svc *pbcatalog.Service, data *workloadData) *pbcatalog.E return nil } + if data.resource.Id == nil { + fmt.Println("-------------------iryna: workload id is nil") + } return &pbcatalog.Endpoint{ TargetRef: data.resource.Id, HealthStatus: health, diff --git a/internal/mesh/exports.go b/internal/mesh/exports.go index 5df51439d16..e406f58c16c 100644 --- a/internal/mesh/exports.go +++ b/internal/mesh/exports.go @@ -6,6 +6,8 @@ package mesh import ( "github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/mesh/internal/controllers" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/status" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource" ) @@ -19,15 +21,16 @@ var ( // Resource Kind Names. - ProxyConfigurationKind = types.ProxyConfigurationKind - UpstreamsKind = types.UpstreamsKind - UpstreamsConfigurationKind = types.UpstreamsConfigurationKind - ProxyStateKind = types.ProxyStateTemplateKind - HTTPRouteKind = types.HTTPRouteKind - GRPCRouteKind = types.GRPCRouteKind - TCPRouteKind = types.TCPRouteKind - DestinationPolicyKind = types.DestinationPolicyKind - ComputedRoutesKind = types.ComputedRoutesKind + ProxyConfigurationKind = types.ProxyConfigurationKind + UpstreamsKind = types.UpstreamsKind + UpstreamsConfigurationKind = types.UpstreamsConfigurationKind + ProxyStateKind = types.ProxyStateTemplateKind + HTTPRouteKind = types.HTTPRouteKind + GRPCRouteKind = types.GRPCRouteKind + TCPRouteKind = types.TCPRouteKind + DestinationPolicyKind = types.DestinationPolicyKind + ComputedRoutesKind = types.ComputedRoutesKind + ProxyStateTemplateV1Alpha1Type = types.ProxyStateTemplateV1Alpha1Type // Resource Types for the v1alpha1 version. @@ -40,6 +43,7 @@ var ( TCPRouteV1Alpha1Type = types.TCPRouteV1Alpha1Type DestinationPolicyV1Alpha1Type = types.DestinationPolicyV1Alpha1Type ComputedRoutesV1Alpha1Type = types.ComputedRoutesV1Alpha1Type + ProxyStateTemplateType = types.ProxyStateTemplateV1Alpha1Type // Resource Types for the latest version. @@ -52,6 +56,22 @@ var ( TCPRouteType = types.TCPRouteType DestinationPolicyType = types.DestinationPolicyType ComputedRoutesType = types.ComputedRoutesType + + // Controller statuses. + + // Sidecar-proxy controller. + + SidecarProxyStatusKey = sidecar_proxy.ControllerName + + SidecarProxyStatusConditionMeshDestination = status.StatusConditionMeshDestination + + SidecarProxyStatusReasonNonMeshDestination = status.StatusReasonNonMeshDestination + SidecarProxyStatusReasonMeshDestination = status.StatusReasonMeshDestination + + SidecarProxyStatusConditionDestinationExists = status.StatusConditionDestinationExists + + SidecarProxyStatusReasonDestinationServiceNotFound = status.StatusReasonDestinationServiceNotFound + SidecarProxyStatusReasonDestinationServiceFound = status.StatusReasonDestinationServiceFound ) // RegisterTypes adds all resource types within the "mesh" API group @@ -66,4 +86,6 @@ func RegisterControllers(mgr *controller.Manager, deps ControllerDependencies) { controllers.Register(mgr, deps) } +type TrustDomainFetcher = sidecar_proxy.TrustDomainFetcher + type ControllerDependencies = controllers.Dependencies diff --git a/internal/mesh/internal/controllers/mesh/builder/builder.go b/internal/mesh/internal/controllers/mesh/builder/builder.go deleted file mode 100644 index b367ef9f84f..00000000000 --- a/internal/mesh/internal/controllers/mesh/builder/builder.go +++ /dev/null @@ -1,169 +0,0 @@ -package builder - -import ( - "fmt" - - "github.com/hashicorp/consul/envoyextensions/xdscommon" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" - pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -type Builder struct { - id *pbresource.ID - proxyStateTemplate *pbmesh.ProxyStateTemplate - - lastBuiltListener lastListenerData -} - -type lastListenerData struct { - index int -} - -func New(id *pbresource.ID, identity *pbresource.Reference) *Builder { - return &Builder{ - id: id, - proxyStateTemplate: &pbmesh.ProxyStateTemplate{ - ProxyState: &pbmesh.ProxyState{ - Identity: identity, - Clusters: make(map[string]*pbmesh.Cluster), - Endpoints: make(map[string]*pbmesh.Endpoints), - }, - RequiredEndpoints: make(map[string]*pbmesh.EndpointRef), - RequiredLeafCertificates: make(map[string]*pbmesh.LeafCertificateRef), - RequiredTrustBundles: make(map[string]*pbmesh.TrustBundleRef), - }, - } -} - -func (b *Builder) Build() *pbmesh.ProxyStateTemplate { - b.lastBuiltListener = lastListenerData{} - return b.proxyStateTemplate -} - -func (b *Builder) AddInboundListener(name string, workload *pbcatalog.Workload) *Builder { - listener := &pbmesh.Listener{ - Name: name, - Direction: pbmesh.Direction_DIRECTION_INBOUND, - } - - // We will take listener bind port from the workload for now. - // Find mesh port. - var meshPort string - for portName, port := range workload.Ports { - if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { - meshPort = portName - break - } - } - - // Check if the workload has a specific address for the mesh port. - var meshAddress string - for _, address := range workload.Addresses { - for _, port := range address.Ports { - if port == meshPort { - meshAddress = address.Host - } - } - } - // Otherwise, assume the first address in the addresses list. - if meshAddress == "" { - // It is safe to assume that there's at least one address because we validate it when creating the workload. - meshAddress = workload.Addresses[0].Host - } - - listener.BindAddress = &pbmesh.Listener_IpPort{ - IpPort: &pbmesh.IPPortAddress{ - Ip: meshAddress, - Port: workload.Ports[meshPort].Port, - }, - } - - // Track the last added listener. - b.lastBuiltListener.index = len(b.proxyStateTemplate.ProxyState.Listeners) - // Add listener to proxy state template - b.proxyStateTemplate.ProxyState.Listeners = append(b.proxyStateTemplate.ProxyState.Listeners, listener) - - return b -} - -func (b *Builder) AddInboundRouters(workload *pbcatalog.Workload) *Builder { - listener := b.proxyStateTemplate.ProxyState.Listeners[b.lastBuiltListener.index] - - // Go through workload ports and add the first non-mesh port we see. - // todo (ishustava): Note we will need to support multiple ports in the future. - // todo (ishustava): make sure we always iterate through ports in the same order so we don't need to send more updates to envoy. - for portName, port := range workload.Ports { - clusterName := fmt.Sprintf("%s:%s", xdscommon.LocalAppClusterName, portName) - if port.Protocol == pbcatalog.Protocol_PROTOCOL_TCP { - r := &pbmesh.Router{ - Destination: &pbmesh.Router_L4{ - L4: &pbmesh.L4Destination{ - Name: clusterName, - StatPrefix: listener.Name, - }, - }, - } - listener.Routers = append(listener.Routers, r) - - // Make cluster for this router destination. - b.proxyStateTemplate.ProxyState.Clusters[clusterName] = &pbmesh.Cluster{ - Group: &pbmesh.Cluster_EndpointGroup{ - EndpointGroup: &pbmesh.EndpointGroup{ - Group: &pbmesh.EndpointGroup_Static{ - Static: &pbmesh.StaticEndpointGroup{ - Name: clusterName, - }, - }, - }, - }, - } - - // Finally, add static endpoints. We're adding it statically as opposed to creating an endpoint ref - // because this endpoint is less likely to change as we're not tracking the health. - endpoint := &pbmesh.Endpoint{ - Address: &pbmesh.Endpoint_HostPort{ - HostPort: &pbmesh.HostPortAddress{ - Host: "127.0.0.1", - Port: port.Port, - }, - }, - } - b.proxyStateTemplate.ProxyState.Endpoints[clusterName] = &pbmesh.Endpoints{ - Name: clusterName, - Endpoints: []*pbmesh.Endpoint{endpoint}, - } - break - } - } - return b -} - -func (b *Builder) AddInboundTLS() *Builder { - listener := b.proxyStateTemplate.ProxyState.Listeners[b.lastBuiltListener.index] - // For inbound TLS, we want to use this proxy's identity. - workloadIdentity := b.proxyStateTemplate.ProxyState.Identity.Name - - inboundTLS := &pbmesh.TransportSocket{ - ConnectionTls: &pbmesh.TransportSocket_InboundMesh{ - InboundMesh: &pbmesh.InboundMeshMTLS{ - IdentityKey: workloadIdentity, - ValidationContext: &pbmesh.MeshInboundValidationContext{TrustBundlePeerNameKeys: []string{b.id.Tenancy.PeerName}}, - }, - }, - } - b.proxyStateTemplate.RequiredLeafCertificates[workloadIdentity] = &pbmesh.LeafCertificateRef{ - Name: workloadIdentity, - Namespace: b.id.Tenancy.Namespace, - Partition: b.id.Tenancy.Partition, - } - - b.proxyStateTemplate.RequiredTrustBundles[b.id.Tenancy.PeerName] = &pbmesh.TrustBundleRef{ - Peer: b.id.Tenancy.PeerName, - } - - for i := range listener.Routers { - listener.Routers[i].InboundTls = inboundTLS - } - return b -} diff --git a/internal/mesh/internal/controllers/mesh/builder/builder_test.go b/internal/mesh/internal/controllers/mesh/builder/builder_test.go deleted file mode 100644 index 1f0ee988db9..00000000000 --- a/internal/mesh/internal/controllers/mesh/builder/builder_test.go +++ /dev/null @@ -1,238 +0,0 @@ -package builder - -import ( - "testing" - - "github.com/hashicorp/consul/internal/mesh/internal/types" - "github.com/hashicorp/consul/internal/resource/resourcetest" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" - pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/proto/private/prototest" - "github.com/stretchr/testify/require" -) - -func TestAddInboundListener(t *testing.T) { - listenerName := "test-listener" - - cases := map[string]struct { - workload *pbcatalog.Workload - expListener *pbmesh.Listener - }{ - "single workload address without ports": { - workload: &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - { - Host: "10.0.0.1", - }, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, - "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - }, - }, - expListener: &pbmesh.Listener{ - Name: listenerName, - Direction: pbmesh.Direction_DIRECTION_INBOUND, - BindAddress: &pbmesh.Listener_IpPort{ - IpPort: &pbmesh.IPPortAddress{ - Ip: "10.0.0.1", - Port: 20000, - }, - }, - }, - }, - "multiple workload addresses without ports: prefer first address": { - workload: &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - { - Host: "10.0.0.1", - }, - { - Host: "10.0.0.2", - }, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, - "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - }, - }, - expListener: &pbmesh.Listener{ - Name: listenerName, - Direction: pbmesh.Direction_DIRECTION_INBOUND, - BindAddress: &pbmesh.Listener_IpPort{ - IpPort: &pbmesh.IPPortAddress{ - Ip: "10.0.0.1", - Port: 20000, - }, - }, - }, - }, - "multiple workload addresses with specific ports": { - workload: &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - { - Host: "127.0.0.1", - Ports: []string{"port1"}, - }, - { - Host: "10.0.0.2", - Ports: []string{"port2"}, - }, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, - "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - }, - }, - expListener: &pbmesh.Listener{ - Name: listenerName, - Direction: pbmesh.Direction_DIRECTION_INBOUND, - BindAddress: &pbmesh.Listener_IpPort{ - IpPort: &pbmesh.IPPortAddress{ - Ip: "10.0.0.2", - Port: 20000, - }, - }, - }, - }, - } - - for name, c := range cases { - t.Run(name, func(t *testing.T) { - proxyStateTemplateID := testProxyStateTemplateID() - - proxyStateTemplate := New(proxyStateTemplateID, testIdentityRef()).AddInboundListener(listenerName, c.workload).Build() - require.Len(t, proxyStateTemplate.ProxyState.Listeners, 1) - prototest.AssertDeepEqual(t, c.expListener, proxyStateTemplate.ProxyState.Listeners[0]) - }) - } -} - -func TestAddInboundRouters(t *testing.T) { - workload := testWorkload() - - // Create new builder - builder := New(testProxyStateTemplateID(), testIdentityRef()). - AddInboundListener("test-listener", workload). - AddInboundRouters(workload) - - clusterName := "local_app:port1" - expRouters := []*pbmesh.Router{ - { - Destination: &pbmesh.Router_L4{ - L4: &pbmesh.L4Destination{ - Name: clusterName, - StatPrefix: "test-listener", - }, - }, - }, - } - expCluster := &pbmesh.Cluster{ - Group: &pbmesh.Cluster_EndpointGroup{ - EndpointGroup: &pbmesh.EndpointGroup{ - Group: &pbmesh.EndpointGroup_Static{ - Static: &pbmesh.StaticEndpointGroup{ - Name: clusterName, - }, - }, - }, - }, - } - - expEndpoints := &pbmesh.Endpoints{ - Name: clusterName, - Endpoints: []*pbmesh.Endpoint{ - { - Address: &pbmesh.Endpoint_HostPort{ - HostPort: &pbmesh.HostPortAddress{ - Host: "127.0.0.1", - Port: 8080, - }, - }, - }, - }, - } - - proxyStateTemplate := builder.Build() - - // Check routers. - require.Len(t, proxyStateTemplate.ProxyState.Listeners, 1) - prototest.AssertDeepEqual(t, expRouters, proxyStateTemplate.ProxyState.Listeners[0].Routers) - - // Check that the cluster exists in the clusters map. - prototest.AssertDeepEqual(t, expCluster, proxyStateTemplate.ProxyState.Clusters[clusterName]) - - // Check that the endpoints exist in the endpoint map for this cluster name. - prototest.AssertDeepEqual(t, expEndpoints, proxyStateTemplate.ProxyState.Endpoints[clusterName]) -} - -func TestAddInboundTLS(t *testing.T) { - id := testProxyStateTemplateID() - workload := testWorkload() - - proxyStateTemplate := New(id, testIdentityRef()). - AddInboundListener("test-listener", workload). - AddInboundRouters(workload). - AddInboundTLS(). - Build() - - expTransportSocket := &pbmesh.TransportSocket{ - ConnectionTls: &pbmesh.TransportSocket_InboundMesh{ - InboundMesh: &pbmesh.InboundMeshMTLS{ - IdentityKey: workload.Identity, - ValidationContext: &pbmesh.MeshInboundValidationContext{ - TrustBundlePeerNameKeys: []string{id.Tenancy.PeerName}}, - }, - }, - } - expLeafCertRef := &pbmesh.LeafCertificateRef{ - Name: workload.Identity, - Namespace: id.Tenancy.Namespace, - Partition: id.Tenancy.Partition, - } - - require.Len(t, proxyStateTemplate.ProxyState.Listeners, 1) - // Check that each router has the same TLS configuration. - for _, router := range proxyStateTemplate.ProxyState.Listeners[0].Routers { - prototest.AssertDeepEqual(t, expTransportSocket, router.InboundTls) - } - - // Check that there's a leaf cert ref added to the map. - prototest.AssertDeepEqual(t, expLeafCertRef, proxyStateTemplate.RequiredLeafCertificates[workload.Identity]) - - // Check that there's trust bundle name added to the trust bundles names. - _, ok := proxyStateTemplate.RequiredTrustBundles[id.Tenancy.PeerName] - require.True(t, ok) -} - -func testProxyStateTemplateID() *pbresource.ID { - return resourcetest.Resource(types.ProxyStateTemplateType, "test").ID() -} - -func testIdentityRef() *pbresource.Reference { - return &pbresource.Reference{ - Name: "test-identity", - Tenancy: &pbresource.Tenancy{ - Namespace: "default", - Partition: "default", - PeerName: "local", - }, - } -} - -func testWorkload() *pbcatalog.Workload { - return &pbcatalog.Workload{ - Identity: "test-identity", - Addresses: []*pbcatalog.WorkloadAddress{ - { - Host: "10.0.0.1", - }, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, - "port2": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - "port3": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - }, - } -} diff --git a/internal/mesh/internal/controllers/mesh/controller.go b/internal/mesh/internal/controllers/mesh/controller.go deleted file mode 100644 index 84a7b6436b7..00000000000 --- a/internal/mesh/internal/controllers/mesh/controller.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package mesh - -import ( - "context" - - "github.com/hashicorp/consul/envoyextensions/xdscommon" - "github.com/hashicorp/consul/internal/catalog" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/mesh/builder" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/mesh/mappers" - "github.com/hashicorp/consul/internal/mesh/internal/types" - "github.com/hashicorp/consul/internal/resource" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" - pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" - "github.com/hashicorp/consul/proto-public/pbresource" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" -) - -// ControllerName is the name for this controller. It's used for logging or status keys. -const ControllerName = "consul.io/mesh-controller" - -func Controller() controller.Controller { - return controller.ForType(types.ProxyStateTemplateType). - WithWatch(catalog.ServiceEndpointsType, mappers.MapServiceEndpointsToProxyStateTemplate). - WithReconciler(&reconciler{}) -} - -type reconciler struct{} - -func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error { - rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", ControllerName) - - rt.Logger.Trace("reconciling proxy state template", "id", req.ID) - - // Check if the workload exists. - workloadID := workloadIDFromProxyStateTemplate(req.ID) - rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: workloadID}) - - switch { - case status.Code(err) == codes.NotFound: - // If workload has been deleted, then return as ProxyStateTemplate should be cleaned up - // by the garbage collector because of the owner reference. - rt.Logger.Trace("workload doesn't exist; skipping reconciliation", "workload", workloadID) - return nil - case err != nil: - rt.Logger.Error("error reading the associated workload", "error", err) - return err - } - - // Parse the workload data for this proxy. Note that we know that this workload has a service associated with it - // because we only trigger updates off of service endpoints. - workloadRes := rsp.Resource - var workload pbcatalog.Workload - err = workloadRes.Data.UnmarshalTo(&workload) - if err != nil { - rt.Logger.Error("error parsing workload data", "workload", workloadRes.Id) - return resource.NewErrDataParse(&workload, err) - } - - rsp, err = rt.Client.Read(ctx, &pbresource.ReadRequest{Id: req.ID}) - var buildNew bool - switch { - case status.Code(err) == codes.NotFound: - // Nothing to do as this resource may not have been created yet. - rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one", "id", req.ID) - buildNew = true - case err != nil: - rt.Logger.Error("error reading proxy state template", "error", err) - return nil - } - - if !isMeshEnabled(workload.Ports) { - // Skip non-mesh workloads. - - // If there's existing proxy state template, delete it. - if !buildNew { - rt.Logger.Trace("deleting existing proxy state template because workload is no longer on the mesh", "id", req.ID) - _, err = rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: req.ID}) - if err != nil { - rt.Logger.Error("error deleting existing proxy state template", "error", err) - return err - } - } - rt.Logger.Trace("skipping proxy state template generation because workload is not on the mesh", "workload", workloadRes.Id) - return nil - } - - var proxyTemplate pbmesh.ProxyStateTemplate - if !buildNew { - err = rsp.Resource.Data.UnmarshalTo(&proxyTemplate) - if err != nil { - rt.Logger.Error("error parsing proxy state template data", "id", req.ID) - return resource.NewErrDataParse(&proxyTemplate, err) - } - } - - b := builder.New(req.ID, workloadIdentityRefFromWorkload(workloadRes.Id)). - AddInboundListener(xdscommon.PublicListenerName, &workload). - AddInboundRouters(&workload). - AddInboundTLS() - - newProxyTemplate := b.Build() - - same := proto.Equal(&proxyTemplate, newProxyTemplate) - if buildNew || !same { - proxyTemplateData, err := anypb.New(newProxyTemplate) - if err != nil { - rt.Logger.Error("error creating proxy state template data", "error", err) - return err - } - rt.Logger.Trace("updating proxy state template", "id", req.ID) - _, err = rt.Client.Write(ctx, &pbresource.WriteRequest{ - Resource: &pbresource.Resource{ - Id: req.ID, - Owner: workloadRes.Id, - Data: proxyTemplateData, - }, - }) - if err != nil { - rt.Logger.Error("error writing proxy state template", "error", err) - return err - } - } else { - rt.Logger.Trace("proxy state template data has not changed, skipping update", "id", req.ID) - } - return nil -} - -func workloadIDFromProxyStateTemplate(id *pbresource.ID) *pbresource.ID { - return &pbresource.ID{ - Name: id.Name, - Tenancy: id.Tenancy, - Type: catalog.WorkloadType, - } -} - -func workloadIdentityRefFromWorkload(id *pbresource.ID) *pbresource.Reference { - return &pbresource.Reference{ - Name: id.Name, - Tenancy: id.Tenancy, - } -} - -// isMeshEnabled returns true if workload or service endpoints port -// contain a port with the "mesh" protocol. -func isMeshEnabled(ports map[string]*pbcatalog.WorkloadPort) bool { - for _, port := range ports { - if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { - return true - } - } - return false -} diff --git a/internal/mesh/internal/controllers/mesh/controller_test.go b/internal/mesh/internal/controllers/mesh/controller_test.go deleted file mode 100644 index cd5f82e3fbc..00000000000 --- a/internal/mesh/internal/controllers/mesh/controller_test.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package mesh - -import ( - "context" - "testing" - - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" - "github.com/hashicorp/consul/envoyextensions/xdscommon" - "github.com/hashicorp/consul/internal/catalog" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/mesh/builder" - "github.com/hashicorp/consul/internal/mesh/internal/types" - "github.com/hashicorp/consul/internal/resource/resourcetest" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" - pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/proto/private/prototest" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -type meshControllerTestSuite struct { - suite.Suite - - client *resourcetest.Client - runtime controller.Runtime - - ctl reconciler - ctx context.Context - - workloadID *pbresource.ID - workload *pbcatalog.Workload - proxyStateTemplate *pbmesh.ProxyStateTemplate -} - -func (suite *meshControllerTestSuite) SetupTest() { - resourceClient := svctest.RunResourceService(suite.T(), types.Register, catalog.RegisterTypes) - suite.client = resourcetest.NewClient(resourceClient) - suite.runtime = controller.Runtime{Client: resourceClient, Logger: testutil.Logger(suite.T())} - suite.ctx = testutil.TestContext(suite.T()) - - suite.workload = &pbcatalog.Workload{ - Identity: "test-identity", - Addresses: []*pbcatalog.WorkloadAddress{ - { - Host: "10.0.0.1", - }, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, - "mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, - }, - } - - suite.workloadID = resourcetest.Resource(catalog.WorkloadType, "test-workload"). - WithData(suite.T(), suite.workload). - Write(suite.T(), resourceClient).Id - - identityRef := &pbresource.Reference{ - Name: suite.workload.Identity, - Tenancy: suite.workloadID.Tenancy, - } - - suite.proxyStateTemplate = builder.New(suite.workloadID, identityRef). - AddInboundListener(xdscommon.PublicListenerName, suite.workload). - AddInboundRouters(suite.workload). - AddInboundTLS(). - Build() -} - -func (suite *meshControllerTestSuite) TestReconcile_NoWorkload() { - // This test ensures that removed workloads are ignored and don't result - // in the creation of the proxy state template. - err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ - ID: resourceID(types.ProxyStateTemplateType, "not-found"), - }) - require.NoError(suite.T(), err) - - suite.client.RequireResourceNotFound(suite.T(), resourceID(types.ProxyStateTemplateType, "not-found")) -} - -func (suite *meshControllerTestSuite) TestReconcile_NonMeshWorkload() { - nonMeshWorkload := &pbcatalog.Workload{ - Addresses: []*pbcatalog.WorkloadAddress{ - { - Host: "10.0.0.1", - }, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, - }, - } - - resourcetest.Resource(catalog.WorkloadType, "test-non-mesh-workload"). - WithData(suite.T(), nonMeshWorkload). - Write(suite.T(), suite.client.ResourceServiceClient) - - err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ - ID: resourceID(types.ProxyStateTemplateType, "test-non-mesh-workload"), - }) - require.NoError(suite.T(), err) - - suite.client.RequireResourceNotFound(suite.T(), resourceID(types.ProxyStateTemplateType, "test-non-mesh-workload")) -} - -func (suite *meshControllerTestSuite) TestReconcile_NoExistingProxyStateTemplate() { - err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ - ID: resourceID(types.ProxyStateTemplateType, suite.workloadID.Name), - }) - require.NoError(suite.T(), err) - - res := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, suite.workloadID.Name)) - require.NoError(suite.T(), err) - require.NotNil(suite.T(), res.Data) - prototest.AssertDeepEqual(suite.T(), suite.workloadID, res.Owner) -} - -func (suite *meshControllerTestSuite) TestReconcile_ExistingProxyStateTemplate_WithUpdates() { - // Write the original. - resourcetest.Resource(types.ProxyStateTemplateType, "test-workload"). - WithData(suite.T(), suite.proxyStateTemplate). - WithOwner(suite.workloadID). - Write(suite.T(), suite.client.ResourceServiceClient) - - // Update the workload. - suite.workload.Ports["mesh"].Port = 21000 - updatedWorkloadID := resourcetest.Resource(catalog.WorkloadType, "test-workload"). - WithData(suite.T(), suite.workload). - Write(suite.T(), suite.client.ResourceServiceClient).Id - - err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ - ID: resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name), - }) - require.NoError(suite.T(), err) - - res := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name)) - require.NoError(suite.T(), err) - require.NotNil(suite.T(), res.Data) - prototest.AssertDeepEqual(suite.T(), updatedWorkloadID, res.Owner) - - var updatedProxyStateTemplate pbmesh.ProxyStateTemplate - err = res.Data.UnmarshalTo(&updatedProxyStateTemplate) - require.NoError(suite.T(), err) - - // Check that our value is updated. - inboundListenerPort := updatedProxyStateTemplate.ProxyState.Listeners[0].BindAddress.(*pbmesh.Listener_IpPort).IpPort.Port - require.Equal(suite.T(), uint32(21000), inboundListenerPort) -} - -func (suite *meshControllerTestSuite) TestReconcile_ExistingProxyStateTemplate_NoUpdates() { - // Write the original - originalProxyState := resourcetest.Resource(types.ProxyStateTemplateType, "test-workload"). - WithData(suite.T(), suite.proxyStateTemplate). - WithOwner(suite.workloadID). - Write(suite.T(), suite.client.ResourceServiceClient) - - // Update the metadata on the workload which should result in no changes. - updatedWorkloadID := resourcetest.Resource(catalog.WorkloadType, "test-workload"). - WithData(suite.T(), suite.workload). - WithMeta("some", "meta"). - Write(suite.T(), suite.client.ResourceServiceClient).Id - - err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ - ID: resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name), - }) - require.NoError(suite.T(), err) - - updatedProxyState := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, suite.workloadID.Name)) - resourcetest.RequireVersionUnchanged(suite.T(), updatedProxyState, originalProxyState.Version) -} - -// delete the workload, check that proxy state gets deleted (?can we check that?) -func (suite *meshControllerTestSuite) TestController() { - // Run the controller manager - mgr := controller.NewManager(suite.client, suite.runtime.Logger) - mgr.Register(Controller()) - mgr.SetRaftLeader(true) - go mgr.Run(suite.ctx) - - proxyStateTemplateID := resourcetest.Resource(types.ProxyStateTemplateType, "test-workload").ID() - // Add a mesh workload and check that it gets reconciled. - resourcetest.Resource(catalog.WorkloadType, "test-workload"). - WithData(suite.T(), suite.workload). - Write(suite.T(), suite.client.ResourceServiceClient) - - resourcetest.Resource(catalog.ServiceType, "test-service"). - WithData(suite.T(), &pbcatalog.Service{ - Workloads: &pbcatalog.WorkloadSelector{Names: []string{"test-workload"}}, - Ports: []*pbcatalog.ServicePort{ - {TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - }}). - Write(suite.T(), suite.client.ResourceServiceClient) - - endpoints := &pbcatalog.ServiceEndpoints{ - Endpoints: []*pbcatalog.Endpoint{ - { - TargetRef: suite.workloadID, - Addresses: suite.workload.Addresses, - Ports: suite.workload.Ports, - }, - }, - } - resourcetest.Resource(catalog.ServiceEndpointsType, "test-service"). - WithData(suite.T(), endpoints). - Write(suite.T(), suite.client.ResourceServiceClient) - - // Check that proxy state template resource is generated. - var proxyStateTmpl *pbresource.Resource - retry.Run(suite.T(), func(r *retry.R) { - proxyStateTmpl = suite.client.RequireResourceExists(r, proxyStateTemplateID) - }) - - // Delete the proxy state template resource and check that it gets regenerated. - _, err := suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: proxyStateTemplateID}) - require.NoError(suite.T(), err) - - suite.client.WaitForNewVersion(suite.T(), proxyStateTemplateID, proxyStateTmpl.Version) - - // Update workload and service endpoints to not be on the mesh anymore - // and check that the proxy state template is deleted. - delete(suite.workload.Ports, "mesh") - resourcetest.Resource(catalog.WorkloadType, "test-workload"). - WithData(suite.T(), suite.workload). - Write(suite.T(), suite.client.ResourceServiceClient) - - delete(endpoints.Endpoints[0].Ports, "mesh") - resourcetest.Resource(catalog.ServiceEndpointsType, "test-service"). - WithData(suite.T(), endpoints). - Write(suite.T(), suite.client.ResourceServiceClient) - - retry.Run(suite.T(), func(r *retry.R) { - suite.client.RequireResourceNotFound(r, proxyStateTemplateID) - }) -} - -func TestMeshController(t *testing.T) { - suite.Run(t, new(meshControllerTestSuite)) -} - -func resourceID(rtype *pbresource.Type, name string) *pbresource.ID { - return &pbresource.ID{ - Type: rtype, - Tenancy: &pbresource.Tenancy{ - Partition: "default", - Namespace: "default", - PeerName: "local", - }, - Name: name, - } -} diff --git a/internal/mesh/internal/controllers/mesh/mappers/service_endpoints.go b/internal/mesh/internal/controllers/mesh/mappers/service_endpoints.go deleted file mode 100644 index 9453691532b..00000000000 --- a/internal/mesh/internal/controllers/mesh/mappers/service_endpoints.go +++ /dev/null @@ -1,40 +0,0 @@ -package mappers - -import ( - "context" - - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/types" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -// MapServiceEndpointsToProxyStateTemplate maps catalog.ServiceEndpoints objects to the IDs of -// ProxyStateTemplate. -// For a downstream proxy, we only need to generate requests from workloads this endpoints points to -// If this service endpoints is an upstream for some proxies, we need to generate requests for those proxies as well. -// so we need to have a map from service endpoints to downstream proxy Ids -func MapServiceEndpointsToProxyStateTemplate(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { - // This mapper needs to look up workload IDs from service endpoints and replace them with proxystatetemplatetype. - var serviceEndpoints pbcatalog.ServiceEndpoints - err := res.Data.UnmarshalTo(&serviceEndpoints) - if err != nil { - return nil, err - } - - var result []controller.Request - - for _, endpoint := range serviceEndpoints.Endpoints { - // Convert the reference to a workload to a ProxyStateTemplate ID. - // Because these resources are name and tenancy aligned, we only need to change the type. - result = append(result, controller.Request{ - ID: &pbresource.ID{ - Name: endpoint.TargetRef.Name, - Tenancy: endpoint.TargetRef.Tenancy, - Type: types.ProxyStateTemplateType, - }, - }) - } - - return result, err -} diff --git a/internal/mesh/internal/controllers/mesh/mappers/service_endpoints_test.go b/internal/mesh/internal/controllers/mesh/mappers/service_endpoints_test.go deleted file mode 100644 index 03e73150e9f..00000000000 --- a/internal/mesh/internal/controllers/mesh/mappers/service_endpoints_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package mappers - -import ( - "context" - "testing" - - "github.com/hashicorp/consul/internal/catalog" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/types" - "github.com/hashicorp/consul/internal/resource/resourcetest" - pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" - "github.com/stretchr/testify/require" -) - -func TestMapServiceEndpointsToProxyStateTemplate(t *testing.T) { - workload1 := resourcetest.Resource(catalog.WorkloadType, "workload-1").Build() - workload2 := resourcetest.Resource(catalog.WorkloadType, "workload-2").Build() - serviceEndpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "service"). - WithData(t, &pbcatalog.ServiceEndpoints{ - Endpoints: []*pbcatalog.Endpoint{ - { - TargetRef: workload1.Id, - }, - { - TargetRef: workload2.Id, - }, - }, - }).Build() - proxyTmpl1ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-1").ID() - proxyTmpl2ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-2").ID() - - expRequests := []controller.Request{ - { - ID: proxyTmpl1ID, - }, - { - ID: proxyTmpl2ID, - }, - } - - requests, err := MapServiceEndpointsToProxyStateTemplate(context.Background(), controller.Runtime{}, serviceEndpoints) - require.NoError(t, err) - require.ElementsMatch(t, expRequests, requests) -} diff --git a/internal/mesh/internal/controllers/register.go b/internal/mesh/internal/controllers/register.go index adfdd5c8afc..23a9d507fd7 100644 --- a/internal/mesh/internal/controllers/register.go +++ b/internal/mesh/internal/controllers/register.go @@ -9,14 +9,21 @@ import ( "github.com/hashicorp/consul/internal/mesh/internal/controllers/xds" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource/mappers/bimapper" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/mapper" ) type Dependencies struct { + TrustDomainFetcher sidecar_proxy.TrustDomainFetcher TrustBundleFetcher xds.TrustBundleFetcher ProxyUpdater xds.ProxyUpdater } func Register(mgr *controller.Manager, deps Dependencies) { + c := cache.New() + m := mapper.New(c) mapper := bimapper.New(types.ProxyStateTemplateType, catalog.ServiceEndpointsType) mgr.Register(xds.Controller(mapper, deps.ProxyUpdater, deps.TrustBundleFetcher)) + mgr.Register(sidecar_proxy.Controller(c, m, deps.TrustDomainFetcher)) } diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/builder.go b/internal/mesh/internal/controllers/sidecar-proxy/builder/builder.go new file mode 100644 index 00000000000..d675dbec7d6 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/builder.go @@ -0,0 +1,42 @@ +package builder + +import ( + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1/pbproxystate" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// Builder builds a ProxyStateTemplate. +type Builder struct { + id *pbresource.ID + proxyStateTemplate *pbmesh.ProxyStateTemplate + trustDomain string +} + +func New(id *pbresource.ID, identity *pbresource.Reference, trustDomain string) *Builder { + return &Builder{ + id: id, + trustDomain: trustDomain, + proxyStateTemplate: &pbmesh.ProxyStateTemplate{ + ProxyState: &pbmesh.ProxyState{ + Identity: identity, + Clusters: make(map[string]*pbproxystate.Cluster), + Endpoints: make(map[string]*pbproxystate.Endpoints), + }, + RequiredEndpoints: make(map[string]*pbproxystate.EndpointRef), + RequiredLeafCertificates: make(map[string]*pbproxystate.LeafCertificateRef), + RequiredTrustBundles: make(map[string]*pbproxystate.TrustBundleRef), + }, + } +} + +func (b *Builder) Build() *pbmesh.ProxyStateTemplate { + return b.proxyStateTemplate +} + +func (b *Builder) addListener(l *pbproxystate.Listener) *Builder { + // Add listener to proxy state template + b.proxyStateTemplate.ProxyState.Listeners = append(b.proxyStateTemplate.ProxyState.Listeners, l) + + return b +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/builder_test.go b/internal/mesh/internal/controllers/sidecar-proxy/builder/builder_test.go new file mode 100644 index 00000000000..3d36c446ba1 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/builder_test.go @@ -0,0 +1,47 @@ +package builder + +import ( + "flag" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +var ( + update = flag.Bool("update", false, "update the golden files of this test") +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} + +func protoToJSON(t *testing.T, pb proto.Message) string { + t.Helper() + m := protojson.MarshalOptions{ + Multiline: true, + } + gotJSON, err := m.Marshal(pb) + require.NoError(t, err) + return string(gotJSON) +} + +func goldenValue(t *testing.T, goldenFile string, actual string, update bool) string { + t.Helper() + goldenPath := filepath.Join("testdata", goldenFile) + ".golden" + + if update { + err := os.WriteFile(goldenPath, []byte(actual), 0644) + require.NoError(t, err) + + return actual + } + + content, err := os.ReadFile(goldenPath) + require.NoError(t, err) + return string(content) +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go b/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go new file mode 100644 index 00000000000..062ceb02f1e --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go @@ -0,0 +1,132 @@ +package builder + +import ( + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1/pbproxystate" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +func (b *Builder) BuildDestinations(destinations []*intermediate.Destination) *Builder { + for _, destination := range destinations { + if destination.Explicit != nil { + b.buildExplicitDestination(destination) + } + } + + return b +} + +func (b *Builder) buildExplicitDestination(destination *intermediate.Destination) *Builder { + clusterName := DestinationClusterName(destination.Explicit.DestinationRef, destination.Explicit.Datacenter, b.trustDomain) + statPrefix := DestinationStatPrefix(destination.Explicit.DestinationRef, destination.Explicit.Datacenter) + + // We assume that all endpoints have the same port. Later, we will change service endpoints to + // have the global ports map rather than per address. + destPort := destination.ServiceEndpoints.Endpoints.Endpoints[0].Ports[destination.Explicit.DestinationPort] + + if destPort != nil { + return b.addOutboundDestinationListener(destination.Explicit). + addRouter(clusterName, statPrefix, destPort.Protocol). + addCluster(clusterName, destination.Identities). + addEndpointsRef(clusterName, destination.ServiceEndpoints.Resource.Id, destination.Explicit.DestinationPort) + } + + return b +} + +func (b *Builder) addOutboundDestinationListener(explicit *pbmesh.Upstream) *Builder { + listener := &pbproxystate.Listener{ + Direction: pbproxystate.Direction_DIRECTION_OUTBOUND, + } + + // Create outbound listener address. + switch explicit.ListenAddr.(type) { + case *pbmesh.Upstream_IpPort: + destinationAddr := explicit.ListenAddr.(*pbmesh.Upstream_IpPort) + listener.BindAddress = &pbproxystate.Listener_HostPort{ + HostPort: &pbproxystate.HostPortAddress{ + Host: destinationAddr.IpPort.Ip, + Port: destinationAddr.IpPort.Port, + }, + } + listener.Name = DestinationListenerName(explicit.DestinationRef.Name, explicit.DestinationPort, destinationAddr.IpPort.Ip, destinationAddr.IpPort.Port) + case *pbmesh.Upstream_Unix: + destinationAddr := explicit.ListenAddr.(*pbmesh.Upstream_Unix) + listener.BindAddress = &pbproxystate.Listener_UnixSocket{ + UnixSocket: &pbproxystate.UnixSocketAddress{ + Path: destinationAddr.Unix.Path, + Mode: destinationAddr.Unix.Mode, + }, + } + listener.Name = DestinationListenerName(explicit.DestinationRef.Name, explicit.DestinationPort, destinationAddr.Unix.Path, 0) + } + + return b.addListener(listener) +} + +// for explicit destinations, we have no filter chain match, and filters based on port protocol +func (b *Builder) addRouter(clusterName, statPrefix string, protocol pbcatalog.Protocol) *Builder { + listener := b.getLastBuiltListener() + + switch protocol { + case pbcatalog.Protocol_PROTOCOL_TCP: + router := &pbproxystate.Router{ + Destination: &pbproxystate.Router_L4{ + L4: &pbproxystate.L4Destination{ + Name: clusterName, + StatPrefix: statPrefix, + }, + }, + } + listener.Routers = append(listener.Routers, router) + } + return b +} + +func (b *Builder) addCluster(clusterName string, destinationIdentities []*pbresource.Reference) *Builder { + var spiffeIDs []string + for _, identity := range destinationIdentities { + spiffeIDs = append(spiffeIDs, connect.SpiffeIDFromIdentityRef(b.trustDomain, identity)) + } + + // Create destination cluster + cluster := &pbproxystate.Cluster{ + Group: &pbproxystate.Cluster_EndpointGroup{ + EndpointGroup: &pbproxystate.EndpointGroup{ + Group: &pbproxystate.EndpointGroup_Dynamic{ + Dynamic: &pbproxystate.DynamicEndpointGroup{ + Config: &pbproxystate.DynamicEndpointGroupConfig{ + DisablePanicThreshold: true, + }, + OutboundTls: &pbproxystate.TransportSocket{ + ConnectionTls: &pbproxystate.TransportSocket_OutboundMesh{ + OutboundMesh: &pbproxystate.OutboundMeshMTLS{ + IdentityKey: b.proxyStateTemplate.ProxyState.Identity.Name, + ValidationContext: &pbproxystate.MeshOutboundValidationContext{ + SpiffeIds: spiffeIDs, + }, + Sni: clusterName, + }, + }, + }, + }, + }, + }, + }, + } + + b.proxyStateTemplate.ProxyState.Clusters[clusterName] = cluster + return b +} + +func (b *Builder) addEndpointsRef(clusterName string, serviceEndpointsID *pbresource.ID, destinationPort string) *Builder { + // Finally, add endpoints references. + b.proxyStateTemplate.RequiredEndpoints[clusterName] = &pbproxystate.EndpointRef{ + Id: serviceEndpointsID, + Port: destinationPort, + } + return b +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder_test.go b/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder_test.go new file mode 100644 index 00000000000..6d8371662fd --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder_test.go @@ -0,0 +1,106 @@ +package builder + +import ( + "testing" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" +) + +var ( + endpointsData = &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "10.0.0.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + }, + }, + }, + } +) + +func TestBuildExplicitDestinations(t *testing.T) { + api1Endpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "api-1"). + WithData(t, endpointsData).Build() + + api2Endpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "api-2"). + WithData(t, endpointsData).Build() + + api1Identity := &pbresource.Reference{ + Name: "api1-identity", + Tenancy: api1Endpoints.Id.Tenancy, + } + + api2Identity := &pbresource.Reference{ + Name: "api2-identity", + Tenancy: api2Endpoints.Id.Tenancy, + } + + destinationIpPort := &intermediate.Destination{ + Explicit: &pbmesh.Upstream{ + DestinationRef: resource.Reference(api1Endpoints.Id, ""), + DestinationPort: "tcp", + Datacenter: "dc1", + ListenAddr: &pbmesh.Upstream_IpPort{ + IpPort: &pbmesh.IPPortAddress{Ip: "1.1.1.1", Port: 1234}, + }, + }, + ServiceEndpoints: &intermediate.ServiceEndpoints{ + Resource: api1Endpoints, + Endpoints: endpointsData, + }, + Identities: []*pbresource.Reference{api1Identity}, + } + + destinationUnix := &intermediate.Destination{ + Explicit: &pbmesh.Upstream{ + DestinationRef: resource.Reference(api2Endpoints.Id, ""), + DestinationPort: "tcp", + Datacenter: "dc1", + ListenAddr: &pbmesh.Upstream_Unix{ + Unix: &pbmesh.UnixSocketAddress{Path: "/path/to/socket", Mode: "0666"}, + }, + }, + ServiceEndpoints: &intermediate.ServiceEndpoints{ + Resource: api2Endpoints, + Endpoints: endpointsData, + }, + Identities: []*pbresource.Reference{api2Identity}, + } + + cases := map[string]struct { + destinations []*intermediate.Destination + }{ + "l4-single-destination-ip-port-bind-address": { + destinations: []*intermediate.Destination{destinationIpPort}, + }, + "l4-single-destination-unix-socket-bind-address": { + destinations: []*intermediate.Destination{destinationUnix}, + }, + "l4-multi-destination": { + destinations: []*intermediate.Destination{destinationIpPort, destinationUnix}, + }, + } + + for name, c := range cases { + proxyTmpl := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul"). + BuildDestinations(c.destinations). + Build() + + actual := protoToJSON(t, proxyTmpl) + expected := goldenValue(t, name, actual, *update) + + require.Equal(t, expected, actual) + } + +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go b/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go new file mode 100644 index 00000000000..5082ce298ee --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go @@ -0,0 +1,139 @@ +package builder + +import ( + "fmt" + + "github.com/hashicorp/consul/envoyextensions/xdscommon" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1/pbproxystate" +) + +func (b *Builder) BuildLocalApp(workload *pbcatalog.Workload) *Builder { + return b.addInboundListener(xdscommon.PublicListenerName, workload). + addInboundRouters(workload). + addInboundTLS() +} + +func (b *Builder) getLastBuiltListener() *pbproxystate.Listener { + lastBuiltIndex := len(b.proxyStateTemplate.ProxyState.Listeners) - 1 + return b.proxyStateTemplate.ProxyState.Listeners[lastBuiltIndex] +} + +func (b *Builder) addInboundListener(name string, workload *pbcatalog.Workload) *Builder { + listener := &pbproxystate.Listener{ + Name: name, + Direction: pbproxystate.Direction_DIRECTION_INBOUND, + } + + // We will take listener bind port from the workload for now. + // Find mesh port. + var meshPort string + for portName, port := range workload.Ports { + if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { + meshPort = portName + break + } + } + + // Check if the workload has a specific address for the mesh port. + var meshAddress string + for _, address := range workload.Addresses { + for _, port := range address.Ports { + if port == meshPort { + meshAddress = address.Host + } + } + } + // Otherwise, assume the first address in the addresses list. + if meshAddress == "" { + // It is safe to assume that there's at least one address because we validate it when creating the workload. + meshAddress = workload.Addresses[0].Host + } + + listener.BindAddress = &pbproxystate.Listener_HostPort{ + HostPort: &pbproxystate.HostPortAddress{ + Host: meshAddress, + Port: workload.Ports[meshPort].Port, + }, + } + + return b.addListener(listener) +} + +func (b *Builder) addInboundRouters(workload *pbcatalog.Workload) *Builder { + listener := b.getLastBuiltListener() + + // Go through workload ports and add the first non-mesh port we see. + // todo (ishustava): Note we will need to support multiple ports in the future. + // todo (ishustava): make sure we always iterate through ports in the same order so we don't need to send more updates to envoy. + for portName, port := range workload.Ports { + clusterName := fmt.Sprintf("%s:%s", xdscommon.LocalAppClusterName, portName) + if port.Protocol == pbcatalog.Protocol_PROTOCOL_TCP { + r := &pbproxystate.Router{ + Destination: &pbproxystate.Router_L4{ + L4: &pbproxystate.L4Destination{ + Name: clusterName, + StatPrefix: listener.Name, + }, + }, + } + listener.Routers = append(listener.Routers, r) + + // Make cluster for this router destination. + b.proxyStateTemplate.ProxyState.Clusters[clusterName] = &pbproxystate.Cluster{ + Group: &pbproxystate.Cluster_EndpointGroup{ + EndpointGroup: &pbproxystate.EndpointGroup{ + Group: &pbproxystate.EndpointGroup_Static{ + Static: &pbproxystate.StaticEndpointGroup{}, + }, + }, + }, + } + + // Finally, add static endpoints. We're adding it statically as opposed to creating an endpoint ref + // because this endpoint is less likely to change as we're not tracking the health. + endpoint := &pbproxystate.Endpoint{ + Address: &pbproxystate.Endpoint_HostPort{ + HostPort: &pbproxystate.HostPortAddress{ + Host: "127.0.0.1", + Port: port.Port, + }, + }, + } + b.proxyStateTemplate.ProxyState.Endpoints[clusterName] = &pbproxystate.Endpoints{ + Endpoints: []*pbproxystate.Endpoint{endpoint}, + } + break + } + } + return b +} + +func (b *Builder) addInboundTLS() *Builder { + listener := b.getLastBuiltListener() + // For inbound TLS, we want to use this proxy's identity. + workloadIdentity := b.proxyStateTemplate.ProxyState.Identity.Name + + inboundTLS := &pbproxystate.TransportSocket{ + ConnectionTls: &pbproxystate.TransportSocket_InboundMesh{ + InboundMesh: &pbproxystate.InboundMeshMTLS{ + IdentityKey: workloadIdentity, + ValidationContext: &pbproxystate.MeshInboundValidationContext{TrustBundlePeerNameKeys: []string{b.id.Tenancy.PeerName}}, + }, + }, + } + b.proxyStateTemplate.RequiredLeafCertificates[workloadIdentity] = &pbproxystate.LeafCertificateRef{ + Name: workloadIdentity, + Namespace: b.id.Tenancy.Namespace, + Partition: b.id.Tenancy.Partition, + } + + b.proxyStateTemplate.RequiredTrustBundles[b.id.Tenancy.PeerName] = &pbproxystate.TrustBundleRef{ + Peer: b.id.Tenancy.PeerName, + } + + for i := range listener.Routers { + listener.Routers[i].InboundTls = inboundTLS + } + return b +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app_test.go b/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app_test.go new file mode 100644 index 00000000000..5a71e282a66 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app_test.go @@ -0,0 +1,91 @@ +package builder + +import ( + "testing" + + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" +) + +func TestBuildLocalApp(t *testing.T) { + cases := map[string]struct { + workload *pbcatalog.Workload + }{ + "l4-single-workload-address-without-ports": { + workload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + }, + }, + "l4-multiple-workload-addresses-without-ports": { + workload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + { + Host: "10.0.0.2", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + }, + }, + "l4-multiple-workload-addresses-with-specific-ports": { + workload: &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "127.0.0.1", + Ports: []string{"port1"}, + }, + { + Host: "10.0.0.2", + Ports: []string{"port2"}, + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + proxyTmpl := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul").BuildLocalApp(c.workload). + Build() + actual := protoToJSON(t, proxyTmpl) + expected := goldenValue(t, name, actual, *update) + + require.Equal(t, expected, actual) + }) + } +} + +func testProxyStateTemplateID() *pbresource.ID { + return resourcetest.Resource(types.ProxyStateTemplateType, "test").ID() +} + +func testIdentityRef() *pbresource.Reference { + return &pbresource.Reference{ + Name: "test-identity", + Tenancy: &pbresource.Tenancy{ + Namespace: "default", + Partition: "default", + PeerName: "local", + }, + } +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/naming.go b/internal/mesh/internal/controllers/sidecar-proxy/builder/naming.go new file mode 100644 index 00000000000..0186dc8c0b2 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/naming.go @@ -0,0 +1,33 @@ +package builder + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +func DestinationClusterName(serviceRef *pbresource.Reference, datacenter, trustDomain string) string { + return connect.ServiceSNI(serviceRef.Name, + "", + serviceRef.Tenancy.Namespace, + serviceRef.Tenancy.Partition, + datacenter, + trustDomain) +} + +func DestinationStatPrefix(serviceRef *pbresource.Reference, datacenter string) string { + return fmt.Sprintf("upstream.%s.%s.%s.%s", + serviceRef.Name, + serviceRef.Tenancy.Namespace, + serviceRef.Tenancy.Partition, + datacenter) +} + +func DestinationListenerName(name, portName string, address string, port uint32) string { + if port != 0 { + return fmt.Sprintf("%s:%s:%s:%d", name, portName, address, port) + } + + return fmt.Sprintf("%s:%s:%s", name, portName, address) +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multi-destination.golden b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multi-destination.golden new file mode 100644 index 00000000000..dae364f5c07 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multi-destination.golden @@ -0,0 +1,122 @@ +{ + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + }, + "name": "test-identity" + }, + "listeners": [ + { + "name": "api-1:tcp:1.1.1.1:1234", + "direction": "DIRECTION_OUTBOUND", + "hostPort": { + "host": "1.1.1.1", + "port": 1234 + }, + "routers": [ + { + "l4": { + "name": "api-1.default.dc1.internal.foo.consul", + "statPrefix": "upstream.api-1.default.default.dc1" + } + } + ] + }, + { + "name": "api-2:tcp:/path/to/socket", + "direction": "DIRECTION_OUTBOUND", + "unixSocket": { + "path": "/path/to/socket", + "mode": "0666" + }, + "routers": [ + { + "l4": { + "name": "api-2.default.dc1.internal.foo.consul", + "statPrefix": "upstream.api-2.default.default.dc1" + } + } + ] + } + ], + "clusters": { + "api-1.default.dc1.internal.foo.consul": { + "endpointGroup": { + "dynamic": { + "config": { + "disablePanicThreshold": true + }, + "outboundTls": { + "outboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "spiffeIds": [ + "spiffe://foo.consul/ap/default/ns/default/identity/api1-identity" + ] + }, + "sni": "api-1.default.dc1.internal.foo.consul" + } + } + } + } + }, + "api-2.default.dc1.internal.foo.consul": { + "endpointGroup": { + "dynamic": { + "config": { + "disablePanicThreshold": true + }, + "outboundTls": { + "outboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "spiffeIds": [ + "spiffe://foo.consul/ap/default/ns/default/identity/api2-identity" + ] + }, + "sni": "api-2.default.dc1.internal.foo.consul" + } + } + } + } + } + } + }, + "requiredEndpoints": { + "api-1.default.dc1.internal.foo.consul": { + "id": { + "name": "api-1", + "type": { + "group": "catalog", + "groupVersion": "v1alpha1", + "kind": "ServiceEndpoints" + }, + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + } + }, + "port": "tcp" + }, + "api-2.default.dc1.internal.foo.consul": { + "id": { + "name": "api-2", + "type": { + "group": "catalog", + "groupVersion": "v1alpha1", + "kind": "ServiceEndpoints" + }, + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + } + }, + "port": "tcp" + } + } +} \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden new file mode 100644 index 00000000000..ff3d8ef0c07 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden @@ -0,0 +1,71 @@ +{ + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + }, + "name": "test-identity" + }, + "listeners": [ + { + "name": "public_listener", + "direction": "DIRECTION_INBOUND", + "hostPort": { + "host": "10.0.0.2", + "port": 20000 + }, + "routers": [ + { + "l4": { + "name": "local_app:port1", + "statPrefix": "public_listener" + }, + "inboundTls": { + "inboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "trustBundlePeerNameKeys": [ + "local" + ] + } + } + } + } + ] + } + ], + "clusters": { + "local_app:port1": { + "endpointGroup": { + "static": {} + } + } + }, + "endpoints": { + "local_app:port1": { + "endpoints": [ + { + "hostPort": { + "host": "127.0.0.1", + "port": 8080 + } + } + ] + } + } + }, + "requiredLeafCertificates": { + "test-identity": { + "name": "test-identity", + "namespace": "default", + "partition": "default" + } + }, + "requiredTrustBundles": { + "local": { + "peer": "local" + } + } +} \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden new file mode 100644 index 00000000000..9c22e94d597 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden @@ -0,0 +1,71 @@ +{ + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + }, + "name": "test-identity" + }, + "listeners": [ + { + "name": "public_listener", + "direction": "DIRECTION_INBOUND", + "hostPort": { + "host": "10.0.0.1", + "port": 20000 + }, + "routers": [ + { + "l4": { + "name": "local_app:port1", + "statPrefix": "public_listener" + }, + "inboundTls": { + "inboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "trustBundlePeerNameKeys": [ + "local" + ] + } + } + } + } + ] + } + ], + "clusters": { + "local_app:port1": { + "endpointGroup": { + "static": {} + } + } + }, + "endpoints": { + "local_app:port1": { + "endpoints": [ + { + "hostPort": { + "host": "127.0.0.1", + "port": 8080 + } + } + ] + } + } + }, + "requiredLeafCertificates": { + "test-identity": { + "name": "test-identity", + "namespace": "default", + "partition": "default" + } + }, + "requiredTrustBundles": { + "local": { + "peer": "local" + } + } +} \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden new file mode 100644 index 00000000000..44c97ca76f5 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden @@ -0,0 +1,70 @@ +{ + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + }, + "name": "test-identity" + }, + "listeners": [ + { + "name": "api-1:tcp:1.1.1.1:1234", + "direction": "DIRECTION_OUTBOUND", + "hostPort": { + "host": "1.1.1.1", + "port": 1234 + }, + "routers": [ + { + "l4": { + "name": "api-1.default.dc1.internal.foo.consul", + "statPrefix": "upstream.api-1.default.default.dc1" + } + } + ] + } + ], + "clusters": { + "api-1.default.dc1.internal.foo.consul": { + "endpointGroup": { + "dynamic": { + "config": { + "disablePanicThreshold": true + }, + "outboundTls": { + "outboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "spiffeIds": [ + "spiffe://foo.consul/ap/default/ns/default/identity/api1-identity" + ] + }, + "sni": "api-1.default.dc1.internal.foo.consul" + } + } + } + } + } + } + }, + "requiredEndpoints": { + "api-1.default.dc1.internal.foo.consul": { + "id": { + "name": "api-1", + "type": { + "group": "catalog", + "groupVersion": "v1alpha1", + "kind": "ServiceEndpoints" + }, + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + } + }, + "port": "tcp" + } + } +} \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden new file mode 100644 index 00000000000..2dbaa61a1ff --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden @@ -0,0 +1,70 @@ +{ + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + }, + "name": "test-identity" + }, + "listeners": [ + { + "name": "api-2:tcp:/path/to/socket", + "direction": "DIRECTION_OUTBOUND", + "unixSocket": { + "path": "/path/to/socket", + "mode": "0666" + }, + "routers": [ + { + "l4": { + "name": "api-2.default.dc1.internal.foo.consul", + "statPrefix": "upstream.api-2.default.default.dc1" + } + } + ] + } + ], + "clusters": { + "api-2.default.dc1.internal.foo.consul": { + "endpointGroup": { + "dynamic": { + "config": { + "disablePanicThreshold": true + }, + "outboundTls": { + "outboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "spiffeIds": [ + "spiffe://foo.consul/ap/default/ns/default/identity/api2-identity" + ] + }, + "sni": "api-2.default.dc1.internal.foo.consul" + } + } + } + } + } + } + }, + "requiredEndpoints": { + "api-2.default.dc1.internal.foo.consul": { + "id": { + "name": "api-2", + "type": { + "group": "catalog", + "groupVersion": "v1alpha1", + "kind": "ServiceEndpoints" + }, + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + } + }, + "port": "tcp" + } + } +} \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-workload-address-without-ports.golden b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-workload-address-without-ports.golden new file mode 100644 index 00000000000..9c22e94d597 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-workload-address-without-ports.golden @@ -0,0 +1,71 @@ +{ + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" + }, + "name": "test-identity" + }, + "listeners": [ + { + "name": "public_listener", + "direction": "DIRECTION_INBOUND", + "hostPort": { + "host": "10.0.0.1", + "port": 20000 + }, + "routers": [ + { + "l4": { + "name": "local_app:port1", + "statPrefix": "public_listener" + }, + "inboundTls": { + "inboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "trustBundlePeerNameKeys": [ + "local" + ] + } + } + } + } + ] + } + ], + "clusters": { + "local_app:port1": { + "endpointGroup": { + "static": {} + } + } + }, + "endpoints": { + "local_app:port1": { + "endpoints": [ + { + "hostPort": { + "host": "127.0.0.1", + "port": 8080 + } + } + ] + } + } + }, + "requiredLeafCertificates": { + "test-identity": { + "name": "test-identity", + "namespace": "default", + "partition": "default" + } + }, + "requiredTrustBundles": { + "local": { + "peer": "local" + } + } +} \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go b/internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go new file mode 100644 index 00000000000..5e8dd7d8b12 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go @@ -0,0 +1,144 @@ +package cache + +import ( + "fmt" + "sync" + + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// Cache stores information needed for the mesh controller to reconcile efficiently. +// This currently means storing a list of all destinations for easy look up +// as well as indices of source proxies where those destinations are referenced. +// +// It is the responsibility of controller and its subcomponents (like mapper and data fetcher) +// to keep this cache up-to-date as we're observing new data. +type Cache struct { + lock sync.RWMutex + + // store is a map from destination service reference and port as a string ID + // to the object representing destination reference. + store map[string]*intermediate.CombinedDestinationRef + + // sourceProxiesIndex stores a map from a string representation of source proxy ID + // to the keys in the store map. + sourceProxiesIndex map[string]storeKeys +} + +type storeKeys map[string]struct{} + +func New() *Cache { + return &Cache{ + store: make(map[string]*intermediate.CombinedDestinationRef), + sourceProxiesIndex: make(map[string]storeKeys), + } +} + +func KeyFromID(id *pbresource.ID) string { + return fmt.Sprintf("%s/%s/%s", + resource.ToGVK(id.Type), + resource.TenancyToString(id.Tenancy), + id.Name) +} + +func KeyFromRefAndPort(ref *pbresource.Reference, port string) string { + return fmt.Sprintf("%s:%s", + resource.ReferenceToString(ref), + port) +} + +func (c *Cache) Write(d *intermediate.CombinedDestinationRef) { + c.lock.Lock() + defer c.lock.Unlock() + + key := KeyFromRefAndPort(d.ServiceRef, d.Port) + + c.store[key] = d + + // Update source proxies index. + for _, proxyID := range d.SourceProxies { + proxyIDKey := KeyFromID(proxyID) + + _, ok := c.sourceProxiesIndex[proxyIDKey] + if !ok { + c.sourceProxiesIndex[proxyIDKey] = make(storeKeys) + } + + c.sourceProxiesIndex[proxyIDKey][key] = struct{}{} + } +} + +func (c *Cache) Delete(ref *pbresource.Reference, port string) { + c.lock.Lock() + defer c.lock.Unlock() + + key := KeyFromRefAndPort(ref, port) + + // First get it from the store. + dest, ok := c.store[key] + if !ok { + // If it's not there, return as there's nothing for us to. + return + } + + // Update source proxies indices. + for _, proxyID := range dest.SourceProxies { + proxyIDKey := KeyFromID(proxyID) + + // Delete our destination key from this source proxy. + delete(c.sourceProxiesIndex[proxyIDKey], key) + } + + // Finally, delete this destination from the store. + delete(c.store, key) +} + +func (c *Cache) DeleteSourceProxy(id *pbresource.ID) { + c.lock.Lock() + defer c.lock.Unlock() + + proxyIDKey := KeyFromID(id) + + // Get all destination keys. + destKeys := c.sourceProxiesIndex[proxyIDKey] + + for destKey := range destKeys { + // Read destination. + dest, ok := c.store[destKey] + if !ok { + // If there's no destination with that key, skip it as there's nothing for us to do. + continue + } + + // Delete the source proxy ID. + delete(dest.SourceProxies, proxyIDKey) + } + + // Finally, delete the index for this proxy. + delete(c.sourceProxiesIndex, proxyIDKey) +} + +func (c *Cache) ReadDestination(ref *pbresource.Reference, port string) *intermediate.CombinedDestinationRef { + c.lock.RLock() + defer c.lock.RUnlock() + + key := KeyFromRefAndPort(ref, port) + return c.store[key] +} + +func (c *Cache) DestinationsBySourceProxy(id *pbresource.ID) []*intermediate.CombinedDestinationRef { + c.lock.RLock() + defer c.lock.RUnlock() + + var destinations []*intermediate.CombinedDestinationRef + + proxyIDKey := KeyFromID(id) + + for destKey := range c.sourceProxiesIndex[proxyIDKey] { + destinations = append(destinations, c.store[destKey]) + } + + return destinations +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/cache/cache_test.go b/internal/mesh/internal/controllers/sidecar-proxy/cache/cache_test.go new file mode 100644 index 00000000000..7f0dd4b437d --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/cache/cache_test.go @@ -0,0 +1,168 @@ +package cache + +import ( + "testing" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource/resourcetest" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/stretchr/testify/require" +) + +func TestWrite_Create(t *testing.T) { + cache := New() + + proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() + destination := testDestination(proxyID) + cache.Write(destination) + + destKey := KeyFromRefAndPort(destination.ServiceRef, destination.Port) + require.Equal(t, destination, cache.store[destKey]) + actualSourceProxies := cache.sourceProxiesIndex + expectedSourceProxies := map[string]storeKeys{ + KeyFromID(proxyID): {destKey: struct{}{}}, + } + require.Equal(t, expectedSourceProxies, actualSourceProxies) + + // Check that we can read back the destination successfully. + require.Equal(t, destination, cache.ReadDestination(destination.ServiceRef, destination.Port)) +} + +func TestWrite_Update(t *testing.T) { + cache := New() + + proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() + destination1 := testDestination(proxyID) + cache.Write(destination1) + + // Add another destination for the same proxy ID. + destination2 := testDestination(proxyID) + destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection() + cache.Write(destination2) + + // Check that the source proxies are updated. + actualSourceProxies := cache.sourceProxiesIndex + expectedSourceProxies := map[string]storeKeys{ + KeyFromID(proxyID): { + KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{}, + KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{}, + }, + } + require.Equal(t, expectedSourceProxies, actualSourceProxies) + + // Add another destination for a different proxy. + anotherProxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-def").ID() + destination3 := testDestination(anotherProxyID) + destination3.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-3").ReferenceNoSection() + cache.Write(destination3) + + actualSourceProxies = cache.sourceProxiesIndex + expectedSourceProxies = map[string]storeKeys{ + KeyFromID(proxyID): { + KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{}, + KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{}, + }, + KeyFromID(anotherProxyID): { + KeyFromRefAndPort(destination3.ServiceRef, destination3.Port): struct{}{}, + }, + } + require.Equal(t, expectedSourceProxies, actualSourceProxies) +} + +func TestWrite_Delete(t *testing.T) { + cache := New() + + proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() + destination1 := testDestination(proxyID) + cache.Write(destination1) + + // Add another destination for the same proxy ID. + destination2 := testDestination(proxyID) + destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection() + cache.Write(destination2) + + cache.Delete(destination1.ServiceRef, destination1.Port) + + require.NotContains(t, cache.store, KeyFromRefAndPort(destination1.ServiceRef, destination1.Port)) + + // Check that the source proxies are updated. + actualSourceProxies := cache.sourceProxiesIndex + expectedSourceProxies := map[string]storeKeys{ + KeyFromID(proxyID): { + KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{}, + }, + } + require.Equal(t, expectedSourceProxies, actualSourceProxies) + + // Try to delete non-existing destination and check that nothing has changed.. + cache.Delete( + resourcetest.Resource(catalog.ServiceType, "does-not-exist").ReferenceNoSection(), + "doesn't-matter") + + require.Contains(t, cache.store, KeyFromRefAndPort(destination2.ServiceRef, destination2.Port)) + require.Equal(t, expectedSourceProxies, cache.sourceProxiesIndex) +} + +func TestDeleteSourceProxy(t *testing.T) { + cache := New() + + proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() + destination1 := testDestination(proxyID) + cache.Write(destination1) + + // Add another destination for the same proxy ID. + destination2 := testDestination(proxyID) + destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection() + cache.Write(destination2) + + cache.DeleteSourceProxy(proxyID) + + // Check that source proxy index is gone. + proxyKey := KeyFromID(proxyID) + require.NotContains(t, cache.sourceProxiesIndex, proxyKey) + + // Check that the destinations no longer have this proxy as the source. + require.NotContains(t, destination1.SourceProxies, proxyKey) + require.NotContains(t, destination2.SourceProxies, proxyKey) + + // Try to add a non-existent key to source proxy index + cache.sourceProxiesIndex[proxyKey] = map[string]struct{}{"doesn't-exist": {}} + cache.DeleteSourceProxy(proxyID) + + // Check that source proxy index is gone. + require.NotContains(t, cache.sourceProxiesIndex, proxyKey) + + // Check that the destinations no longer have this proxy as the source. + require.NotContains(t, destination1.SourceProxies, proxyKey) + require.NotContains(t, destination2.SourceProxies, proxyKey) +} + +func TestDestinationsBySourceProxy(t *testing.T) { + cache := New() + + proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() + destination1 := testDestination(proxyID) + cache.Write(destination1) + + // Add another destination for the same proxy ID. + destination2 := testDestination(proxyID) + destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection() + cache.Write(destination2) + + actualDestinations := cache.DestinationsBySourceProxy(proxyID) + expectedDestinations := []*intermediate.CombinedDestinationRef{destination1, destination2} + require.ElementsMatch(t, expectedDestinations, actualDestinations) +} + +func testDestination(proxyID *pbresource.ID) *intermediate.CombinedDestinationRef { + return &intermediate.CombinedDestinationRef{ + ServiceRef: resourcetest.Resource(catalog.ServiceType, "test-service").ReferenceNoSection(), + Port: "tcp", + ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "test-servicedestinations").ID(), + SourceProxies: map[string]*pbresource.ID{ + KeyFromID(proxyID): proxyID, + }, + } +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/controller.go b/internal/mesh/internal/controllers/sidecar-proxy/controller.go new file mode 100644 index 00000000000..2f1ea8c9437 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/controller.go @@ -0,0 +1,168 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sidecar_proxy + +import ( + "context" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/builder" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/fetcher" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/mapper" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +// ControllerName is the name for this controller. It's used for logging or status keys. +const ControllerName = "consul.io/sidecar-proxy-controller" + +type TrustDomainFetcher func() (string, error) + +func Controller(cache *cache.Cache, mapper *mapper.Mapper, trustDomainFetcher TrustDomainFetcher) controller.Controller { + if cache == nil || mapper == nil || trustDomainFetcher == nil { + panic("cache, mapper and trust domain fetcher are required") + } + + return controller.ForType(types.ProxyStateTemplateType). + WithWatch(catalog.ServiceEndpointsType, mapper.MapServiceEndpointsToProxyStateTemplate). + WithWatch(types.UpstreamsType, mapper.MapDestinationsToProxyStateTemplate). + WithReconciler(&reconciler{cache: cache, getTrustDomain: trustDomainFetcher}) +} + +type reconciler struct { + cache *cache.Cache + getTrustDomain TrustDomainFetcher +} + +func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error { + rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", ControllerName) + + rt.Logger.Trace("reconciling proxy state template", "id", req.ID) + + // Instantiate a data fetcher to fetch all reconciliation data. + dataFetcher := fetcher.Fetcher{Client: rt.Client, Cache: r.cache} + + // Check if the apiWorkload exists. + workloadID := resource.ReplaceType(catalog.WorkloadType, req.ID) + workload, err := dataFetcher.FetchWorkload(ctx, resource.ReplaceType(catalog.WorkloadType, req.ID)) + if err != nil { + rt.Logger.Error("error reading the associated workload", "error", err) + return err + } + if workload == nil { + // If apiWorkload has been deleted, then return as ProxyStateTemplate should be cleaned up + // by the garbage collector because of the owner reference. + rt.Logger.Trace("workload doesn't exist; skipping reconciliation", "workload", workloadID) + return nil + } + + proxyStateTemplate, err := dataFetcher.FetchProxyStateTemplate(ctx, req.ID) + if err != nil { + rt.Logger.Error("error reading proxy state template", "error", err) + return nil + } + + if proxyStateTemplate == nil { + // If proxy state template has been deleted + rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one", "id", req.ID) + } + + if !fetcher.IsMeshEnabled(workload.Workload.Ports) { + // Skip non-mesh workloads. + + // If there's existing proxy state template, delete it. + if proxyStateTemplate != nil { + rt.Logger.Trace("deleting existing proxy state template because workload is no longer on the mesh", "id", req.ID) + _, err = rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: req.ID}) + if err != nil { + rt.Logger.Error("error deleting existing proxy state template", "error", err) + return err + } + + // Remove it from cache. + r.cache.DeleteSourceProxy(req.ID) + } + rt.Logger.Trace("skipping proxy state template generation because workload is not on the mesh", "workload", workload.Resource.Id) + return nil + } + + // First get the trust domain. + trustDomain, err := r.getTrustDomain() + if err != nil { + rt.Logger.Error("error fetching trust domain to compute proxy state template", "error", err) + return err + } + + b := builder.New(req.ID, workloadIdentityRefFromWorkload(workload), trustDomain). + BuildLocalApp(workload.Workload) + + // Get all destinationsData. + destinationsRefs := r.cache.DestinationsBySourceProxy(req.ID) + destinationsData, statuses, err := dataFetcher.FetchDestinationsData(ctx, destinationsRefs) + if err != nil { + rt.Logger.Error("error fetching destinations for this proxy", "id", req.ID, "error", err) + return err + } + + b.BuildDestinations(destinationsData) + + newProxyTemplate := b.Build() + + if proxyStateTemplate == nil || !proto.Equal(proxyStateTemplate.Tmpl, newProxyTemplate) { + proxyTemplateData, err := anypb.New(newProxyTemplate) + if err != nil { + rt.Logger.Error("error creating proxy state template data", "error", err) + return err + } + rt.Logger.Trace("updating proxy state template", "id", req.ID) + _, err = rt.Client.Write(ctx, &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: req.ID, + Owner: workload.Resource.Id, + Data: proxyTemplateData, + }, + }) + if err != nil { + rt.Logger.Error("error writing proxy state template", "error", err) + return err + } + } else { + rt.Logger.Trace("proxy state template data has not changed, skipping update", "id", req.ID) + } + + // Update any statuses. + for _, status := range statuses { + updatedStatus := &pbresource.Status{ + ObservedGeneration: status.Generation, + } + updatedStatus.Conditions = status.Conditions + // If the status is unchanged then we should return and avoid the unnecessary write + if !resource.EqualStatus(status.OldStatus[ControllerName], updatedStatus, false) { + rt.Logger.Trace("updating status", "id", status.ID) + _, err = rt.Client.WriteStatus(ctx, &pbresource.WriteStatusRequest{ + Id: status.ID, + Key: ControllerName, + Status: updatedStatus, + }) + if err != nil { + rt.Logger.Error("error writing new status", "id", status.ID, "error", err) + return err + } + } + } + return nil +} + +func workloadIdentityRefFromWorkload(w *intermediate.Workload) *pbresource.Reference { + return &pbresource.Reference{ + Name: w.Workload.Identity, + Tenancy: w.Resource.Id.Tenancy, + } +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/controller_test.go b/internal/mesh/internal/controllers/sidecar-proxy/controller_test.go new file mode 100644 index 00000000000..74e56de9f38 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/controller_test.go @@ -0,0 +1,370 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sidecar_proxy + +import ( + "context" + "testing" + + svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/builder" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/mapper" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/status" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1/pbproxystate" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type meshControllerTestSuite struct { + suite.Suite + + client *resourcetest.Client + runtime controller.Runtime + + ctl *reconciler + ctx context.Context + + apiWorkloadID *pbresource.ID + apiWorkload *pbcatalog.Workload + apiService *pbresource.Resource + apiEndpoints *pbresource.Resource + apiEndpointsData *pbcatalog.ServiceEndpoints + webWorkload *pbresource.Resource + proxyStateTemplate *pbmesh.ProxyStateTemplate +} + +func (suite *meshControllerTestSuite) SetupTest() { + resourceClient := svctest.RunResourceService(suite.T(), types.Register, catalog.RegisterTypes) + suite.client = resourcetest.NewClient(resourceClient) + suite.runtime = controller.Runtime{Client: resourceClient, Logger: testutil.Logger(suite.T())} + suite.ctx = testutil.TestContext(suite.T()) + + suite.ctl = &reconciler{ + cache: cache.New(), + getTrustDomain: func() (string, error) { + return "test.consul", nil + }, + } + + suite.apiWorkload = &pbcatalog.Workload{ + Identity: "api-identity", + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + } + + suite.apiWorkloadID = resourcetest.Resource(catalog.WorkloadType, "api-abc"). + WithData(suite.T(), suite.apiWorkload). + Write(suite.T(), resourceClient).Id + + suite.apiService = resourcetest.Resource(catalog.ServiceType, "api-service"). + WithData(suite.T(), &pbcatalog.Service{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"api-abc"}}, + Ports: []*pbcatalog.ServicePort{ + {TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + }}). + Write(suite.T(), suite.client.ResourceServiceClient) + + suite.apiEndpointsData = &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + TargetRef: suite.apiWorkloadID, + Addresses: suite.apiWorkload.Addresses, + Ports: suite.apiWorkload.Ports, + Identity: "api-identity", + }, + }, + } + suite.apiEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-service"). + WithData(suite.T(), suite.apiEndpointsData). + Write(suite.T(), suite.client.ResourceServiceClient) + + webWorkloadData := &pbcatalog.Workload{ + Identity: "web-identity", + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.2", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + } + suite.webWorkload = resourcetest.Resource(catalog.WorkloadType, "web-def"). + WithData(suite.T(), webWorkloadData). + Write(suite.T(), suite.client) + + resourcetest.Resource(catalog.ServiceType, "web"). + WithData(suite.T(), &pbcatalog.Service{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"web-def"}}, + Ports: []*pbcatalog.ServicePort{ + {TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + {TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }}). + Write(suite.T(), suite.client) + + resourcetest.Resource(catalog.ServiceEndpointsType, "web"). + WithData(suite.T(), &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + TargetRef: suite.webWorkload.Id, + Addresses: webWorkloadData.Addresses, + Ports: webWorkloadData.Ports, + Identity: "web-identity", + }, + }, + }).Write(suite.T(), suite.client) + + identityRef := &pbresource.Reference{ + Name: suite.apiWorkload.Identity, + Tenancy: suite.apiWorkloadID.Tenancy, + } + + suite.proxyStateTemplate = builder.New(suite.apiWorkloadID, identityRef, "test.consul"). + BuildLocalApp(suite.apiWorkload). + Build() +} + +func (suite *meshControllerTestSuite) TestReconcile_NoWorkload() { + // This test ensures that removed workloads are ignored and don't result + // in the creation of the proxy state template. + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, "not-found"), + }) + require.NoError(suite.T(), err) + + suite.client.RequireResourceNotFound(suite.T(), resourceID(types.ProxyStateTemplateType, "not-found")) +} + +func (suite *meshControllerTestSuite) TestReconcile_NonMeshWorkload() { + // This test ensures that non-mesh workloads are ignored by the controller. + + nonMeshWorkload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + { + Host: "10.0.0.1", + }, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + }, + } + + resourcetest.Resource(catalog.WorkloadType, "test-non-mesh-api-workload"). + WithData(suite.T(), nonMeshWorkload). + Write(suite.T(), suite.client.ResourceServiceClient) + + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, "test-non-mesh-api-workload"), + }) + + require.NoError(suite.T(), err) + suite.client.RequireResourceNotFound(suite.T(), resourceID(types.ProxyStateTemplateType, "test-non-mesh-api-workload")) +} + +func (suite *meshControllerTestSuite) TestReconcile_NoExistingProxyStateTemplate() { + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, suite.apiWorkloadID.Name), + }) + require.NoError(suite.T(), err) + + res := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, suite.apiWorkloadID.Name)) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), res.Data) + prototest.AssertDeepEqual(suite.T(), suite.apiWorkloadID, res.Owner) +} + +func (suite *meshControllerTestSuite) TestReconcile_ExistingProxyStateTemplate_WithUpdates() { + // This test ensures that we write a new proxy state template when there are changes. + + // Write the original. + resourcetest.Resource(types.ProxyStateTemplateType, "api-abc"). + WithData(suite.T(), suite.proxyStateTemplate). + WithOwner(suite.apiWorkloadID). + Write(suite.T(), suite.client.ResourceServiceClient) + + // Update the apiWorkload. + suite.apiWorkload.Ports["mesh"].Port = 21000 + updatedWorkloadID := resourcetest.Resource(catalog.WorkloadType, "api-abc"). + WithData(suite.T(), suite.apiWorkload). + Write(suite.T(), suite.client.ResourceServiceClient).Id + + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name), + }) + require.NoError(suite.T(), err) + + res := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name)) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), res.Data) + prototest.AssertDeepEqual(suite.T(), updatedWorkloadID, res.Owner) + + var updatedProxyStateTemplate pbmesh.ProxyStateTemplate + err = res.Data.UnmarshalTo(&updatedProxyStateTemplate) + require.NoError(suite.T(), err) + + // Check that our value is updated in the proxy state template. + inboundListenerPort := updatedProxyStateTemplate.ProxyState.Listeners[0]. + BindAddress.(*pbproxystate.Listener_HostPort).HostPort.Port + require.Equal(suite.T(), uint32(21000), inboundListenerPort) +} + +func (suite *meshControllerTestSuite) TestReconcile_ExistingProxyStateTemplate_NoUpdates() { + // This test ensures that we skip writing of the proxy state template when there are no changes to it. + + // Write the original. + originalProxyState := resourcetest.Resource(types.ProxyStateTemplateType, "api-abc"). + WithData(suite.T(), suite.proxyStateTemplate). + WithOwner(suite.apiWorkloadID). + Write(suite.T(), suite.client.ResourceServiceClient) + + // Update the metadata on the apiWorkload which should result in no changes. + updatedWorkloadID := resourcetest.Resource(catalog.WorkloadType, "api-abc"). + WithData(suite.T(), suite.apiWorkload). + WithMeta("some", "meta"). + Write(suite.T(), suite.client.ResourceServiceClient).Id + + err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{ + ID: resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name), + }) + require.NoError(suite.T(), err) + + updatedProxyState := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, suite.apiWorkloadID.Name)) + resourcetest.RequireVersionUnchanged(suite.T(), updatedProxyState, originalProxyState.Version) +} + +func (suite *meshControllerTestSuite) TestController() { + // This is a comprehensive test that checks the overall controller behavior as various resources change state. + // This should test interactions between the reconciler, the mappers, and the cache to ensure they work + // together and produce expected result. + + // Run the controller manager + mgr := controller.NewManager(suite.client, suite.runtime.Logger) + c := cache.New() + m := mapper.New(c) + + mgr.Register(Controller(c, m, func() (string, error) { + return "test.consul", nil + })) + mgr.SetRaftLeader(true) + go mgr.Run(suite.ctx) + + // Create proxy state template IDs to check against in this test. + apiProxyStateTemplateID := resourcetest.Resource(types.ProxyStateTemplateType, "api-abc").ID() + webProxyStateTemplateID := resourcetest.Resource(types.ProxyStateTemplateType, "web-def").ID() + + // Check that proxy state template resource is generated for both the api and web workloads. + var webProxyStateTemplate *pbresource.Resource + retry.Run(suite.T(), func(r *retry.R) { + suite.client.RequireResourceExists(r, apiProxyStateTemplateID) + webProxyStateTemplate = suite.client.RequireResourceExists(r, webProxyStateTemplateID) + }) + + // Add a source service and check that a new proxy state is generated. + webDestinations := resourcetest.Resource(types.UpstreamsType, "web-destinations"). + WithData(suite.T(), &pbmesh.Upstreams{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"web-def"}}, + Upstreams: []*pbmesh.Upstream{ + { + DestinationRef: resource.Reference(suite.apiService.Id, ""), + DestinationPort: "tcp", + }, + }, + }).Write(suite.T(), suite.client) + webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) + + // Update destination's service apiEndpoints and workload to be non-mesh + // and check that: + // * api's proxy state template is deleted + // * we get a new web proxy resource re-generated + // * the status on Upstreams resource is updated with a validation error + nonMeshPorts := map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + } + resourcetest.Resource(catalog.ServiceEndpointsType, "api-service"). + WithData(suite.T(), &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + TargetRef: suite.apiWorkloadID, + Addresses: suite.apiWorkload.Addresses, + Ports: nonMeshPorts, + Identity: "api-identity", + }, + }, + }). + Write(suite.T(), suite.client.ResourceServiceClient) + + resourcetest.Resource(catalog.WorkloadType, "api-abc"). + WithData(suite.T(), &pbcatalog.Workload{ + Identity: "api-identity", + Addresses: suite.apiWorkload.Addresses, + Ports: nonMeshPorts}). + Write(suite.T(), suite.client) + + // Check that api proxy template is gone. + retry.Run(suite.T(), func(r *retry.R) { + suite.client.RequireResourceNotFound(r, apiProxyStateTemplateID) + }) + + // Check status on the pbmesh.Upstreams resource. + serviceRef := cache.KeyFromRefAndPort(resource.Reference(suite.apiService.Id, ""), "tcp") + suite.client.WaitForStatusCondition(suite.T(), webDestinations.Id, ControllerName, + status.ConditionNonMeshDestination(serviceRef)) + + // We should get a new web proxy template resource because this destination should be removed. + webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) + + // Update destination's service apiEndpoints back to mesh and check that we get a new web proxy resource re-generated + // and that the status on Upstreams resource is updated to be empty. + resourcetest.Resource(catalog.ServiceEndpointsType, "api-service"). + WithData(suite.T(), suite.apiEndpointsData). + Write(suite.T(), suite.client.ResourceServiceClient) + + suite.client.WaitForStatusCondition(suite.T(), webDestinations.Id, ControllerName, + status.ConditionMeshDestination(serviceRef)) + + // We should also get a new web proxy template resource as this destination should be added again. + webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) + + // Delete the proxy state template resource and check that it gets regenerated. + _, err := suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: webProxyStateTemplateID}) + require.NoError(suite.T(), err) + + suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) +} + +func TestMeshController(t *testing.T) { + suite.Run(t, new(meshControllerTestSuite)) +} + +func resourceID(rtype *pbresource.Type, name string) *pbresource.ID { + return &pbresource.ID{ + Type: rtype, + Tenancy: &pbresource.Tenancy{ + Partition: "default", + Namespace: "default", + PeerName: "local", + }, + Name: name, + } +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go b/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go new file mode 100644 index 00000000000..ac9e8cc2077 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go @@ -0,0 +1,244 @@ +package fetcher + +import ( + "context" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + ctrlStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/status" + "github.com/hashicorp/consul/internal/mesh/internal/types" + intermediateTypes "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type Fetcher struct { + Client pbresource.ResourceServiceClient + Cache *cache.Cache +} + +func (f *Fetcher) FetchWorkload(ctx context.Context, id *pbresource.ID) (*intermediateTypes.Workload, error) { + rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{Id: id}) + + switch { + case status.Code(err) == codes.NotFound: + // We also need to make sure to delete the associated proxy from cache. + // We are ignoring errors from cache here as this deletion is best effort. + f.Cache.DeleteSourceProxy(resource.ReplaceType(types.ProxyStateTemplateType, id)) + return nil, nil + case err != nil: + return nil, err + } + + w := &intermediateTypes.Workload{ + Resource: rsp.Resource, + } + + var workload pbcatalog.Workload + err = rsp.Resource.Data.UnmarshalTo(&workload) + if err != nil { + return nil, resource.NewErrDataParse(&workload, err) + } + + w.Workload = &workload + return w, nil +} + +func (f *Fetcher) FetchProxyStateTemplate(ctx context.Context, id *pbresource.ID) (*intermediateTypes.ProxyStateTemplate, error) { + rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{Id: id}) + + switch { + case status.Code(err) == codes.NotFound: + return nil, nil + case err != nil: + return nil, err + } + + p := &intermediateTypes.ProxyStateTemplate{ + Resource: rsp.Resource, + } + + var tmpl pbmesh.ProxyStateTemplate + err = rsp.Resource.Data.UnmarshalTo(&tmpl) + if err != nil { + return nil, resource.NewErrDataParse(&tmpl, err) + } + + p.Tmpl = &tmpl + return p, nil +} + +func (f *Fetcher) FetchServiceEndpoints(ctx context.Context, id *pbresource.ID) (*intermediateTypes.ServiceEndpoints, error) { + rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{Id: id}) + + switch { + case status.Code(err) == codes.NotFound: + return nil, nil + case err != nil: + return nil, err + } + + se := &intermediateTypes.ServiceEndpoints{ + Resource: rsp.Resource, + } + + var endpoints pbcatalog.ServiceEndpoints + err = rsp.Resource.Data.UnmarshalTo(&endpoints) + if err != nil { + return nil, resource.NewErrDataParse(&endpoints, err) + } + + se.Endpoints = &endpoints + return se, nil +} + +func (f *Fetcher) FetchDestinations(ctx context.Context, id *pbresource.ID) (*intermediateTypes.Destinations, error) { + rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{Id: id}) + + switch { + case status.Code(err) == codes.NotFound: + return nil, nil + case err != nil: + return nil, err + } + + u := &intermediateTypes.Destinations{ + Resource: rsp.Resource, + } + + var destinations pbmesh.Upstreams + err = rsp.Resource.Data.UnmarshalTo(&destinations) + if err != nil { + return nil, resource.NewErrDataParse(&destinations, err) + } + + u.Destinations = &destinations + return u, nil +} + +func (f *Fetcher) FetchDestinationsData( + ctx context.Context, + destinationRefs []*intermediateTypes.CombinedDestinationRef, +) ([]*intermediateTypes.Destination, map[string]*intermediateTypes.Status, error) { + + var destinations []*intermediateTypes.Destination + statuses := make(map[string]*intermediateTypes.Status) + for _, dest := range destinationRefs { + // Fetch Destinations resource if there is one. + us, err := f.FetchDestinations(ctx, dest.ExplicitDestinationsID) + if err != nil { + // If there's an error, return and force another reconcile instead of computing + // partial proxy state. + return nil, statuses, err + } + + if us == nil { + // If the Destinations resource is not found, then we should delete it from cache and continue. + f.Cache.Delete(dest.ServiceRef, dest.Port) + continue + } + + u := &intermediateTypes.Destination{} + // As Destinations resource contains a list of destinations, + // we need to find the one that references our service and port. + u.Explicit = findDestination(dest.ServiceRef, dest.Port, us.Destinations) + + // Fetch ServiceEndpoints. + serviceID := resource.IDFromReference(dest.ServiceRef) + se, err := f.FetchServiceEndpoints(ctx, resource.ReplaceType(catalog.ServiceEndpointsType, serviceID)) + if err != nil { + return nil, statuses, err + } + + serviceRef := cache.KeyFromRefAndPort(dest.ServiceRef, dest.Port) + upstreamsRef := cache.KeyFromID(us.Resource.Id) + if se == nil { + // If the Service Endpoints resource is not found, then we update the status of the Upstreams resource + // but not remove it from cache in case it comes back. + updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID, + us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceNotFound(serviceRef)) + continue + } else { + updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID, + us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceFound(serviceRef)) + } + + u.ServiceEndpoints = se + + // Check if this endpoints is mesh-enabled. If not, remove it from cache and return an error. + if !IsMeshEnabled(se.Endpoints.Endpoints[0].Ports) { + // Add invalid status but don't remove from cache. If this state changes, + // we want to be able to detect this change. + updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID, + us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionNonMeshDestination(serviceRef)) + + // This error should not cause the execution to stop, as we want to make sure that this non-mesh destination + // gets removed from the proxy state. + continue + } else { + // If everything was successful, add an empty condition so that we can remove any existing statuses. + updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID, + us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshDestination(serviceRef)) + } + + // Gather all identities. + if se != nil { + var identities []*pbresource.Reference + for _, ep := range se.Endpoints.Endpoints { + identities = append(identities, &pbresource.Reference{ + Name: ep.Identity, + Tenancy: se.Resource.Id.Tenancy, + }) + } + u.Identities = identities + } + + destinations = append(destinations, u) + } + + return destinations, statuses, nil +} + +// IsMeshEnabled returns true if apiWorkload or service endpoints port +// contain a port with the "mesh" protocol. +func IsMeshEnabled(ports map[string]*pbcatalog.WorkloadPort) bool { + for _, port := range ports { + if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { + return true + } + } + return false +} + +func findDestination(ref *pbresource.Reference, port string, destinations *pbmesh.Upstreams) *pbmesh.Upstream { + for _, destination := range destinations.Upstreams { + if resource.EqualReference(ref, destination.DestinationRef) && + port == destination.DestinationPort { + return destination + } + } + return nil +} + +func updateStatusCondition( + statuses map[string]*intermediateTypes.Status, + key string, + id *pbresource.ID, + oldStatus map[string]*pbresource.Status, + generation string, + condition *pbresource.Condition) { + if _, ok := statuses[key]; ok { + statuses[key].Conditions = append(statuses[key].Conditions, condition) + } else { + statuses[key] = &intermediateTypes.Status{ + ID: id, + Generation: generation, + Conditions: []*pbresource.Condition{condition}, + OldStatus: oldStatus, + } + } +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher_test.go b/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher_test.go new file mode 100644 index 00000000000..17eaf1ffa83 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher_test.go @@ -0,0 +1,561 @@ +package fetcher + +import ( + "context" + "testing" + + svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + meshStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/status" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestIsMeshEnabled(t *testing.T) { + cases := map[string]struct { + ports map[string]*pbcatalog.WorkloadPort + exp bool + }{ + "nil ports": { + ports: nil, + exp: false, + }, + "empty ports": { + ports: make(map[string]*pbcatalog.WorkloadPort), + exp: false, + }, + "no mesh ports": { + ports: map[string]*pbcatalog.WorkloadPort{ + "p1": {Port: 1000, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + "p2": {Port: 2000, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + }, + exp: false, + }, + "one mesh port": { + ports: map[string]*pbcatalog.WorkloadPort{ + "p1": {Port: 1000, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + "p2": {Port: 2000, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "p3": {Port: 3000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + exp: true, + }, + "multiple mesh ports": { + ports: map[string]*pbcatalog.WorkloadPort{ + "p1": {Port: 1000, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + "p2": {Port: 2000, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "p3": {Port: 3000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + "p4": {Port: 4000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + exp: true, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + require.Equal(t, c.exp, IsMeshEnabled(c.ports)) + }) + } +} + +type dataFetcherSuite struct { + suite.Suite + + ctx context.Context + client pbresource.ResourceServiceClient + rt controller.Runtime + + api1Service *pbresource.Resource + api2Service *pbresource.Resource + api1ServiceEndpoints *pbresource.Resource + api1ServiceEndpointsData *pbcatalog.ServiceEndpoints + api2ServiceEndpoints *pbresource.Resource + api2ServiceEndpointsData *pbcatalog.ServiceEndpoints + webDestinations *pbresource.Resource + webDestinationsData *pbmesh.Upstreams + webProxy *pbresource.Resource + webWorkload *pbresource.Resource +} + +func (suite *dataFetcherSuite) SetupTest() { + suite.ctx = testutil.TestContext(suite.T()) + suite.client = svctest.RunResourceService(suite.T(), types.Register, catalog.RegisterTypes) + suite.rt = controller.Runtime{ + Client: suite.client, + Logger: testutil.Logger(suite.T()), + } + + suite.api1Service = resourcetest.Resource(catalog.ServiceType, "api-1"). + WithData(suite.T(), &pbcatalog.Service{}). + Write(suite.T(), suite.client) + + suite.api1ServiceEndpointsData = &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}}, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "mesh": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + Identity: "api-1-identity", + }, + }, + } + suite.api1ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-1"). + WithData(suite.T(), suite.api1ServiceEndpointsData).Write(suite.T(), suite.client) + + suite.api2Service = resourcetest.Resource(catalog.ServiceType, "api-2"). + WithData(suite.T(), &pbcatalog.Service{}). + Write(suite.T(), suite.client) + + suite.api2ServiceEndpointsData = &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.2"}}, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp1": {Port: 9080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "tcp2": {Port: 9081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + "mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + }, + Identity: "api-2-identity", + }, + }, + } + suite.api2ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-2"). + WithData(suite.T(), suite.api2ServiceEndpointsData).Write(suite.T(), suite.client) + + suite.webDestinationsData = &pbmesh.Upstreams{ + Upstreams: []*pbmesh.Upstream{ + { + DestinationRef: resource.Reference(suite.api1Service.Id, ""), + DestinationPort: "tcp", + }, + { + DestinationRef: resource.Reference(suite.api2Service.Id, ""), + DestinationPort: "tcp1", + }, + { + DestinationRef: resource.Reference(suite.api2Service.Id, ""), + DestinationPort: "tcp2", + }, + }, + } + + suite.webDestinations = resourcetest.Resource(types.UpstreamsType, "web-destinations"). + WithData(suite.T(), suite.webDestinationsData). + Write(suite.T(), suite.client) + + suite.webProxy = resourcetest.Resource(types.ProxyStateTemplateType, "web-abc"). + WithData(suite.T(), &pbmesh.ProxyStateTemplate{}). + Write(suite.T(), suite.client) + + suite.webWorkload = resourcetest.Resource(catalog.WorkloadType, "web-abc"). + WithData(suite.T(), &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.2"}}, + Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}}, + }). + Write(suite.T(), suite.client) +} + +func (suite *dataFetcherSuite) TestFetcher_FetchWorkload_WorkloadNotFound() { + // Test that when workload is not found, we remove it from cache. + + proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() + + // Create cache and pre-populate it. + c := cache.New() + dest1 := &intermediate.CombinedDestinationRef{ + ServiceRef: resourcetest.Resource(catalog.ServiceType, "test-service-1").ReferenceNoSection(), + Port: "tcp", + ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "test-servicedestinations-1").ID(), + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(proxyID): proxyID, + }, + } + dest2 := &intermediate.CombinedDestinationRef{ + ServiceRef: resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection(), + Port: "tcp", + ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "test-servicedestinations-2").ID(), + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(proxyID): proxyID, + }, + } + c.Write(dest1) + c.Write(dest2) + + f := Fetcher{Cache: c, Client: suite.client} + _, err := f.FetchWorkload(context.Background(), proxyID) + require.NoError(suite.T(), err) + + // Check that cache is updated to remove proxy id. + require.Nil(suite.T(), c.DestinationsBySourceProxy(proxyID)) +} + +func (suite *dataFetcherSuite) TestFetcher_NotFound() { + // This test checks that we ignore not found errors for various types we need to fetch. + + f := Fetcher{ + Client: suite.client, + } + + cases := map[string]struct { + typ *pbresource.Type + fetchFunc func(id *pbresource.ID) error + }{ + "proxy state template": { + typ: types.ProxyStateTemplateType, + fetchFunc: func(id *pbresource.ID) error { + _, err := f.FetchProxyStateTemplate(context.Background(), id) + return err + }, + }, + "service endpoints": { + typ: catalog.ServiceEndpointsType, + fetchFunc: func(id *pbresource.ID) error { + _, err := f.FetchServiceEndpoints(context.Background(), id) + return err + }, + }, + "destinations": { + typ: types.UpstreamsType, + fetchFunc: func(id *pbresource.ID) error { + _, err := f.FetchDestinations(context.Background(), id) + return err + }, + }, + } + + for name, c := range cases { + suite.T().Run(name, func(t *testing.T) { + err := c.fetchFunc(resourcetest.Resource(c.typ, "not-found").ID()) + require.NoError(t, err) + }) + } +} + +func (suite *dataFetcherSuite) TestFetcher_FetchErrors() { + f := Fetcher{ + Client: suite.client, + } + + cases := map[string]struct { + name string + fetchFunc func(id *pbresource.ID) error + }{ + "workload": { + name: "web-abc", + fetchFunc: func(id *pbresource.ID) error { + _, err := f.FetchWorkload(context.Background(), id) + return err + }, + }, + "proxy state template": { + name: "web-abc", + fetchFunc: func(id *pbresource.ID) error { + _, err := f.FetchProxyStateTemplate(context.Background(), id) + return err + }, + }, + "service endpoints": { + name: "api-1", + fetchFunc: func(id *pbresource.ID) error { + _, err := f.FetchServiceEndpoints(context.Background(), id) + return err + }, + }, + "destinations": { + name: "web-destinations", + fetchFunc: func(id *pbresource.ID) error { + _, err := f.FetchDestinations(context.Background(), id) + return err + }, + }, + } + + for name, c := range cases { + suite.T().Run(name+"-read", func(t *testing.T) { + badType := &pbresource.Type{ + Group: "not", + Kind: "found", + GroupVersion: "vfake", + } + err := c.fetchFunc(resourcetest.Resource(badType, c.name).ID()) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + }) + + suite.T().Run(name+"-unmarshal", func(t *testing.T) { + // Create a dummy health checks type as it won't be any of the types mesh controller cares about + resourcetest.Resource(catalog.HealthChecksType, c.name). + WithData(suite.T(), &pbcatalog.HealthChecks{ + Workloads: &pbcatalog.WorkloadSelector{Names: []string{"web-abc"}}, + }). + Write(suite.T(), suite.client) + + err := c.fetchFunc(resourcetest.Resource(catalog.HealthChecksType, c.name).ID()) + require.Error(t, err) + var parseErr resource.ErrDataParse + require.ErrorAs(t, err, &parseErr) + }) + } +} + +func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { + destination1 := &intermediate.CombinedDestinationRef{ + ServiceRef: resource.Reference(suite.api1Service.Id, ""), + Port: "tcp", + ExplicitDestinationsID: suite.webDestinations.Id, + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + }, + } + destination2 := &intermediate.CombinedDestinationRef{ + ServiceRef: resource.Reference(suite.api2Service.Id, ""), + Port: "tcp1", + ExplicitDestinationsID: suite.webDestinations.Id, + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + }, + } + destination3 := &intermediate.CombinedDestinationRef{ + ServiceRef: resource.Reference(suite.api2Service.Id, ""), + Port: "tcp2", + ExplicitDestinationsID: suite.webDestinations.Id, + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + }, + } + + c := cache.New() + c.Write(destination1) + c.Write(destination2) + c.Write(destination3) + + f := Fetcher{ + Cache: c, + Client: suite.client, + } + + suite.T().Run("destinations not found", func(t *testing.T) { + destinationRefNoDestinations := &intermediate.CombinedDestinationRef{ + ServiceRef: resource.Reference(suite.api1Service.Id, ""), + Port: "tcp", + ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "not-found").ID(), + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + }, + } + c.Write(destinationRefNoDestinations) + + destinationRefs := []*intermediate.CombinedDestinationRef{destinationRefNoDestinations} + destinations, _, err := f.FetchDestinationsData(suite.ctx, destinationRefs) + require.NoError(t, err) + require.Nil(t, destinations) + require.Nil(t, c.ReadDestination(destinationRefNoDestinations.ServiceRef, destinationRefNoDestinations.Port)) + }) + + suite.T().Run("service endpoints not found", func(t *testing.T) { + notFoundServiceRef := resourcetest.Resource(catalog.ServiceType, "not-found").ReferenceNoSection() + destinationNoServiceEndpoints := &intermediate.CombinedDestinationRef{ + ServiceRef: notFoundServiceRef, + Port: "tcp", + ExplicitDestinationsID: suite.webDestinations.Id, + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + }, + } + c.Write(destinationNoServiceEndpoints) + + destinationRefs := []*intermediate.CombinedDestinationRef{destinationNoServiceEndpoints} + destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs) + require.NoError(t, err) + require.Nil(t, destinations) + + destinationRef := cache.KeyFromID(destinationNoServiceEndpoints.ExplicitDestinationsID) + serviceRef := cache.KeyFromRefAndPort(destinationNoServiceEndpoints.ServiceRef, destinationNoServiceEndpoints.Port) + + require.Len(t, statuses[destinationRef].Conditions, 1) + require.Equal(t, statuses[destinationRef].Conditions[0], + meshStatus.ConditionDestinationServiceNotFound(serviceRef)) + + require.NotNil(t, c.ReadDestination(destinationNoServiceEndpoints.ServiceRef, destinationNoServiceEndpoints.Port)) + }) + + suite.T().Run("service endpoints not on mesh", func(t *testing.T) { + apiNonMeshServiceEndpointsData := &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}}, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + }, + Identity: "api-1-identity", + }, + }, + } + apiNonMeshServiceEndpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "api-1"). + WithData(suite.T(), apiNonMeshServiceEndpointsData).Write(suite.T(), suite.client) + destinationNonMeshServiceEndpoints := &intermediate.CombinedDestinationRef{ + ServiceRef: resource.Reference(apiNonMeshServiceEndpoints.Owner, ""), + Port: "tcp", + ExplicitDestinationsID: suite.webDestinations.Id, + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + }, + } + c.Write(destinationNonMeshServiceEndpoints) + + destinationRefs := []*intermediate.CombinedDestinationRef{destinationNonMeshServiceEndpoints} + destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs) + require.NoError(t, err) + require.Nil(t, destinations) + + destinationRef := cache.KeyFromID(destinationNonMeshServiceEndpoints.ExplicitDestinationsID) + serviceRef := cache.KeyFromRefAndPort(destinationNonMeshServiceEndpoints.ServiceRef, destinationNonMeshServiceEndpoints.Port) + + require.Len(t, statuses[destinationRef].Conditions, 2) + prototest.AssertElementsMatch(t, statuses[destinationRef].Conditions, + []*pbresource.Condition{ + meshStatus.ConditionDestinationServiceFound(serviceRef), + meshStatus.ConditionNonMeshDestination(serviceRef), + }) + + require.NotNil(t, c.ReadDestination(destinationNonMeshServiceEndpoints.ServiceRef, destinationNonMeshServiceEndpoints.Port)) + }) + + suite.T().Run("invalid destinations", func(t *testing.T) { + // Update api1 to no longer be on the mesh. + suite.api1ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-1"). + WithData(suite.T(), &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}}, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + }, + Identity: "api-1-identity", + }, + }, + }).Write(suite.T(), suite.client) + + destinationRefs := []*intermediate.CombinedDestinationRef{destination1} + + destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs) + serviceRef := cache.KeyFromRefAndPort(destination1.ServiceRef, destination1.Port) + destinationRef := cache.KeyFromID(destination1.ExplicitDestinationsID) + expectedStatus := &intermediate.Status{ + ID: suite.webDestinations.Id, + Generation: suite.webDestinations.Generation, + Conditions: []*pbresource.Condition{ + meshStatus.ConditionDestinationServiceFound(serviceRef), + meshStatus.ConditionNonMeshDestination(serviceRef), + }, + } + + require.NoError(t, err) + + // Check that the status is generated correctly. + prototest.AssertDeepEqual(t, expectedStatus, statuses[destinationRef]) + + // Check that we didn't return any destinations. + require.Nil(t, destinations) + + // Check that destination service is still in cache because it's still referenced from the pbmesh.Upstreams + // resource. + require.NotNil(t, c.ReadDestination(destination1.ServiceRef, destination1.Port)) + + // Update the endpoints to be mesh enabled again and check that the status is now valid. + suite.api1ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-1"). + WithData(suite.T(), suite.api1ServiceEndpointsData).Write(suite.T(), suite.client) + expectedStatus = &intermediate.Status{ + ID: suite.webDestinations.Id, + Generation: suite.webDestinations.Generation, + Conditions: []*pbresource.Condition{ + meshStatus.ConditionDestinationServiceFound(serviceRef), + meshStatus.ConditionMeshDestination(serviceRef), + }, + } + + _, statuses, err = f.FetchDestinationsData(suite.ctx, destinationRefs) + require.NoError(t, err) + prototest.AssertDeepEqual(t, expectedStatus, statuses[destinationRef]) + }) + + suite.T().Run("happy path", func(t *testing.T) { + destinationRefs := []*intermediate.CombinedDestinationRef{destination1, destination2, destination3} + expectedDestinations := []*intermediate.Destination{ + { + Explicit: suite.webDestinationsData.Upstreams[0], + ServiceEndpoints: &intermediate.ServiceEndpoints{ + Resource: suite.api1ServiceEndpoints, + Endpoints: suite.api1ServiceEndpointsData, + }, + Identities: []*pbresource.Reference{ + { + Name: "api-1-identity", + Tenancy: suite.api1Service.Id.Tenancy, + }, + }, + }, + { + Explicit: suite.webDestinationsData.Upstreams[1], + ServiceEndpoints: &intermediate.ServiceEndpoints{ + Resource: suite.api2ServiceEndpoints, + Endpoints: suite.api2ServiceEndpointsData, + }, + Identities: []*pbresource.Reference{ + { + Name: "api-2-identity", + Tenancy: suite.api2Service.Id.Tenancy, + }, + }, + }, + { + Explicit: suite.webDestinationsData.Upstreams[2], + ServiceEndpoints: &intermediate.ServiceEndpoints{ + Resource: suite.api2ServiceEndpoints, + Endpoints: suite.api2ServiceEndpointsData, + }, + Identities: []*pbresource.Reference{ + { + Name: "api-2-identity", + Tenancy: suite.api2Service.Id.Tenancy, + }, + }, + }, + } + var expectedConditions []*pbresource.Condition + for _, d := range destinationRefs { + ref := cache.KeyFromRefAndPort(d.ServiceRef, d.Port) + expectedConditions = append(expectedConditions, + meshStatus.ConditionDestinationServiceFound(ref), + meshStatus.ConditionMeshDestination(ref)) + } + + actualDestinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs) + require.NoError(t, err) + + // Check that all statuses have "happy" conditions. + dref := cache.KeyFromID(destination1.ExplicitDestinationsID) + prototest.AssertElementsMatch(t, expectedConditions, statuses[dref].Conditions) + + // Check that we've computed expanded destinations correctly. + prototest.AssertElementsMatch(t, expectedDestinations, actualDestinations) + }) +} + +func TestDataFetcher(t *testing.T) { + suite.Run(t, new(dataFetcherSuite)) +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper.go b/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper.go new file mode 100644 index 00000000000..420725134a6 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper.go @@ -0,0 +1,69 @@ +package mapper + +import ( + "context" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +func (m *Mapper) MapDestinationsToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { + var destinations pbmesh.Upstreams + err := res.Data.UnmarshalTo(&destinations) + if err != nil { + return nil, err + } + + // Look up workloads for this destinations. + sourceProxyIDs := make(map[string]*pbresource.ID) + var result []controller.Request + for _, prefix := range destinations.Workloads.Prefixes { + resp, err := rt.Client.List(ctx, &pbresource.ListRequest{ + Type: catalog.WorkloadType, + Tenancy: res.Id.Tenancy, + NamePrefix: prefix, + }) + if err != nil { + return nil, err + } + for _, r := range resp.Resources { + proxyID := resource.ReplaceType(types.ProxyStateTemplateType, r.Id) + sourceProxyIDs[cache.KeyFromID(proxyID)] = proxyID + result = append(result, controller.Request{ + ID: proxyID, + }) + } + } + + for _, name := range destinations.Workloads.Names { + id := &pbresource.ID{ + Name: name, + Tenancy: res.Id.Tenancy, + Type: catalog.WorkloadType, + } + proxyID := resource.ReplaceType(types.ProxyStateTemplateType, id) + sourceProxyIDs[cache.KeyFromID(proxyID)] = proxyID + result = append(result, controller.Request{ + ID: proxyID, + }) + } + + // Add this destination to cache. + for _, destination := range destinations.Upstreams { + destinationRef := &intermediate.CombinedDestinationRef{ + ServiceRef: destination.DestinationRef, + Port: destination.DestinationPort, + ExplicitDestinationsID: res.Id, + SourceProxies: sourceProxyIDs, + } + m.cache.Write(destinationRef) + } + + return result, nil +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper_test.go b/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper_test.go new file mode 100644 index 00000000000..69186527bdc --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper_test.go @@ -0,0 +1,99 @@ +package mapper + +import ( + "context" + "testing" + + svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/stretchr/testify/require" +) + +func TestMapDestinationsToProxyStateTemplate(t *testing.T) { + client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes) + webWorkload1 := resourcetest.Resource(catalog.WorkloadType, "web-abc"). + WithData(t, &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}}, + Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}}, + }). + Write(t, client) + webWorkload2 := resourcetest.Resource(catalog.WorkloadType, "web-def"). + WithData(t, &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.2"}}, + Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}}, + }). + Write(t, client) + webWorkload3 := resourcetest.Resource(catalog.WorkloadType, "non-prefix-web"). + WithData(t, &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.3"}}, + Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}}, + }). + Write(t, client) + + webDestinationsData := &pbmesh.Upstreams{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: []string{"non-prefix-web"}, + Prefixes: []string{"web"}, + }, + Upstreams: []*pbmesh.Upstream{ + { + DestinationRef: resourcetest.Resource(catalog.ServiceType, "api-1").ReferenceNoSection(), + DestinationPort: "tcp", + }, + { + DestinationRef: resourcetest.Resource(catalog.ServiceType, "api-2").ReferenceNoSection(), + DestinationPort: "tcp1", + }, + { + DestinationRef: resourcetest.Resource(catalog.ServiceType, "api-2").ReferenceNoSection(), + DestinationPort: "tcp2", + }, + }, + } + + webDestinations := resourcetest.Resource(types.UpstreamsType, "web-destinations"). + WithData(t, webDestinationsData). + Write(t, client) + + c := cache.New() + mapper := &Mapper{cache: c} + + expRequests := []controller.Request{ + {ID: resource.ReplaceType(types.ProxyStateTemplateType, webWorkload1.Id)}, + {ID: resource.ReplaceType(types.ProxyStateTemplateType, webWorkload2.Id)}, + {ID: resource.ReplaceType(types.ProxyStateTemplateType, webWorkload3.Id)}, + } + + requests, err := mapper.MapDestinationsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, webDestinations) + require.NoError(t, err) + prototest.AssertElementsMatch(t, expRequests, requests) + + //var expDestinations []*intermediate.CombinedDestinationRef + proxy1ID := resourcetest.Resource(types.ProxyStateTemplateType, webWorkload1.Id.Name).ID() + proxy2ID := resourcetest.Resource(types.ProxyStateTemplateType, webWorkload2.Id.Name).ID() + proxy3ID := resourcetest.Resource(types.ProxyStateTemplateType, webWorkload3.Id.Name).ID() + for _, u := range webDestinationsData.Upstreams { + expDestination := &intermediate.CombinedDestinationRef{ + ServiceRef: u.DestinationRef, + Port: u.DestinationPort, + ExplicitDestinationsID: webDestinations.Id, + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(proxy1ID): proxy1ID, + cache.KeyFromID(proxy2ID): proxy2ID, + cache.KeyFromID(proxy3ID): proxy3ID, + }, + } + prototest.AssertDeepEqual(t, expDestination, c.ReadDestination(u.DestinationRef, u.DestinationPort)) + } + +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper.go b/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper.go new file mode 100644 index 00000000000..52afab086ff --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper.go @@ -0,0 +1,76 @@ +package mapper + +import ( + "context" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +type Mapper struct { + cache *cache.Cache +} + +func New(c *cache.Cache) *Mapper { + return &Mapper{ + cache: c, + } +} + +// MapServiceEndpointsToProxyStateTemplate maps catalog.ServiceEndpoints objects to the IDs of +// ProxyStateTemplate. +// For a destination proxy, we only need to generate requests from workloads this "endpoints" points to +// so that we can re-generate proxy state for the sidecar proxy. +// If this service endpoints is a source for some proxies, we need to generate requests for those proxies as well. +// so we need to have a map from service endpoints to source proxy Ids. +func (m *Mapper) MapServiceEndpointsToProxyStateTemplate(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { + // This mapper needs to look up workload IDs from service endpoints and replace them with ProxyStateTemplate type. + var serviceEndpoints pbcatalog.ServiceEndpoints + err := res.Data.UnmarshalTo(&serviceEndpoints) + if err != nil { + return nil, err + } + + var result []controller.Request + + for _, endpoint := range serviceEndpoints.Endpoints { + // Convert the reference to a workload to a ProxyStateTemplate ID. + // Because these resources are name and tenancy aligned, we only need to change the type. + + // Skip service endpoints without target refs. These resources would typically be created for + // services external to Consul, and we don't need to reconcile those as they don't have + // associated workloads. + if endpoint.TargetRef != nil { + result = append(result, controller.Request{ + ID: &pbresource.ID{ + Name: endpoint.TargetRef.Name, + Tenancy: endpoint.TargetRef.Tenancy, + Type: types.ProxyStateTemplateType, + }, + }) + } + } + + // Look up any source proxies for this service and generate updates. + serviceID := resource.ReplaceType(catalog.ServiceType, res.Id) + + if len(serviceEndpoints.Endpoints) > 0 { + // All port names in the endpoints object should be the same as filter out to ports that are selected + // by the service, and so it's sufficient to check just the first endpoint. + for portName := range serviceEndpoints.Endpoints[0].Ports { + destination := m.cache.ReadDestination(resource.Reference(serviceID, ""), portName) + if destination != nil { + for _, id := range destination.SourceProxies { + result = append(result, controller.Request{ID: id}) + } + } + } + } + + return result, err +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper_test.go b/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper_test.go new file mode 100644 index 00000000000..f5c73331a4b --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper_test.go @@ -0,0 +1,80 @@ +package mapper + +import ( + "context" + "testing" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/stretchr/testify/require" +) + +func TestMapServiceEndpointsToProxyStateTemplate(t *testing.T) { + workload1 := resourcetest.Resource(catalog.WorkloadType, "workload-1").Build() + workload2 := resourcetest.Resource(catalog.WorkloadType, "workload-2").Build() + serviceEndpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "service"). + WithData(t, &pbcatalog.ServiceEndpoints{ + Endpoints: []*pbcatalog.Endpoint{ + { + TargetRef: workload1.Id, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp1": {Port: 8080}, + "tcp2": {Port: 8081}, + }, + }, + { + TargetRef: workload2.Id, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp1": {Port: 8080}, + "tcp2": {Port: 8081}, + }, + }, + }, + }).Build() + proxyTmpl1ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-1").ID() + proxyTmpl2ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-2").ID() + + c := cache.New() + mapper := &Mapper{cache: c} + sourceProxy1 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-3").ID() + sourceProxy2 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-4").ID() + sourceProxy3 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-5").ID() + destination1 := &intermediate.CombinedDestinationRef{ + ServiceRef: resourcetest.Resource(catalog.ServiceType, "service").ReferenceNoSection(), + Port: "tcp1", + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(sourceProxy1): sourceProxy1, + cache.KeyFromID(sourceProxy2): sourceProxy2, + }, + } + destination2 := &intermediate.CombinedDestinationRef{ + ServiceRef: resourcetest.Resource(catalog.ServiceType, "service").ReferenceNoSection(), + Port: "tcp2", + SourceProxies: map[string]*pbresource.ID{ + cache.KeyFromID(sourceProxy1): sourceProxy1, + cache.KeyFromID(sourceProxy3): sourceProxy3, + }, + } + c.Write(destination1) + c.Write(destination2) + + expRequests := []controller.Request{ + {ID: proxyTmpl1ID}, + {ID: proxyTmpl2ID}, + {ID: sourceProxy1}, + {ID: sourceProxy2}, + {ID: sourceProxy1}, + {ID: sourceProxy3}, + } + + requests, err := mapper.MapServiceEndpointsToProxyStateTemplate(context.Background(), controller.Runtime{}, serviceEndpoints) + require.NoError(t, err) + prototest.AssertElementsMatch(t, expRequests, requests) +} diff --git a/internal/mesh/internal/controllers/sidecar-proxy/status/status.go b/internal/mesh/internal/controllers/sidecar-proxy/status/status.go new file mode 100644 index 00000000000..0ba125fae63 --- /dev/null +++ b/internal/mesh/internal/controllers/sidecar-proxy/status/status.go @@ -0,0 +1,55 @@ +package status + +import ( + "fmt" + + "github.com/hashicorp/consul/proto-public/pbresource" +) + +const ( + StatusConditionMeshDestination = "MeshDestination" + + StatusReasonNonMeshDestination = "MeshPortProtocolNotFound" + StatusReasonMeshDestination = "MeshPortProtocolFound" + + StatusConditionDestinationExists = "DestinationExists" + + StatusReasonDestinationServiceNotFound = "ServiceNotFound" + StatusReasonDestinationServiceFound = "ServiceFound" +) + +func ConditionNonMeshDestination(serviceRef string) *pbresource.Condition { + return &pbresource.Condition{ + Type: StatusConditionMeshDestination, + State: pbresource.Condition_STATE_FALSE, + Reason: StatusReasonNonMeshDestination, + Message: fmt.Sprintf("service %q cannot be referenced as a Destination because it's not mesh-enabled.", serviceRef), + } +} + +func ConditionMeshDestination(serviceRef string) *pbresource.Condition { + return &pbresource.Condition{ + Type: StatusConditionMeshDestination, + State: pbresource.Condition_STATE_TRUE, + Reason: StatusReasonMeshDestination, + Message: fmt.Sprintf("service %q is on the mesh.", serviceRef), + } +} + +func ConditionDestinationServiceNotFound(serviceRef string) *pbresource.Condition { + return &pbresource.Condition{ + Type: StatusConditionDestinationExists, + State: pbresource.Condition_STATE_FALSE, + Reason: StatusReasonDestinationServiceNotFound, + Message: fmt.Sprintf("service %q does not exist.", serviceRef), + } +} + +func ConditionDestinationServiceFound(serviceRef string) *pbresource.Condition { + return &pbresource.Condition{ + Type: StatusConditionDestinationExists, + State: pbresource.Condition_STATE_TRUE, + Reason: StatusReasonDestinationServiceFound, + Message: fmt.Sprintf("service %q exists.", serviceRef), + } +} diff --git a/internal/mesh/internal/types/intermediate/types.go b/internal/mesh/internal/types/intermediate/types.go new file mode 100644 index 00000000000..533017bd36c --- /dev/null +++ b/internal/mesh/internal/types/intermediate/types.go @@ -0,0 +1,56 @@ +package intermediate + +import ( + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// todo should it be destination? +// the problem is that it's compiled from different source objects +type CombinedDestinationRef struct { + // ServiceRef is the reference to the destination service for this upstream + ServiceRef *pbresource.Reference + + Port string + + // sourceProxies are the IDs of source proxy state template resources. + SourceProxies map[string]*pbresource.ID + + // explicitUpstreamID is the id of an explicit upstreams resource. For implicit upstreams, + // this should be nil. + ExplicitDestinationsID *pbresource.ID +} + +type ServiceEndpoints struct { + Resource *pbresource.Resource + Endpoints *pbcatalog.ServiceEndpoints +} + +type Destinations struct { + Resource *pbresource.Resource + Destinations *pbmesh.Upstreams +} + +type Workload struct { + Resource *pbresource.Resource + Workload *pbcatalog.Workload +} + +type ProxyStateTemplate struct { + Resource *pbresource.Resource + Tmpl *pbmesh.ProxyStateTemplate +} + +type Destination struct { + Explicit *pbmesh.Upstream + ServiceEndpoints *ServiceEndpoints + Identities []*pbresource.Reference +} + +type Status struct { + ID *pbresource.ID + Generation string + Conditions []*pbresource.Condition + OldStatus map[string]*pbresource.Status +} diff --git a/internal/mesh/internal/types/upstreams.go b/internal/mesh/internal/types/upstreams.go index 1ef73e181e0..238e18d2dea 100644 --- a/internal/mesh/internal/types/upstreams.go +++ b/internal/mesh/internal/types/upstreams.go @@ -10,7 +10,7 @@ import ( ) const ( - UpstreamsKind = "Upstreams" + UpstreamsKind = "Destinations" ) var ( diff --git a/internal/resource/reference.go b/internal/resource/reference.go index 47c2a0da2de..d5eafe292c8 100644 --- a/internal/resource/reference.go +++ b/internal/resource/reference.go @@ -37,3 +37,11 @@ var ( _ ReferenceOrID = (*pbresource.ID)(nil) _ ReferenceOrID = (*pbresource.Reference)(nil) ) + +func ReplaceType(typ *pbresource.Type, id *pbresource.ID) *pbresource.ID { + return &pbresource.ID{ + Type: typ, + Name: id.Name, + Tenancy: id.Tenancy, + } +} diff --git a/internal/resource/resourcetest/builder.go b/internal/resource/resourcetest/builder.go index 2e0f5991e28..d32e9651583 100644 --- a/internal/resource/resourcetest/builder.go +++ b/internal/resource/resourcetest/builder.go @@ -127,6 +127,10 @@ func (b *resourceBuilder) Reference(section string) *pbresource.Reference { return resource.Reference(b.ID(), section) } +func (b *resourceBuilder) ReferenceNoSection() *pbresource.Reference { + return resource.Reference(b.ID(), "") +} + func (b *resourceBuilder) Write(t T, client pbresource.ResourceServiceClient) *pbresource.Resource { t.Helper() diff --git a/proto-public/pbcatalog/v1alpha1/service_endpoints.pb.go b/proto-public/pbcatalog/v1alpha1/service_endpoints.pb.go index 903de0706bf..436d8d0c9f0 100644 --- a/proto-public/pbcatalog/v1alpha1/service_endpoints.pb.go +++ b/proto-public/pbcatalog/v1alpha1/service_endpoints.pb.go @@ -88,6 +88,8 @@ type Endpoint struct { Ports map[string]*WorkloadPort `protobuf:"bytes,3,rep,name=ports,proto3" json:"ports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // health_status is the aggregated health status of this endpoint. HealthStatus Health `protobuf:"varint,4,opt,name=health_status,json=healthStatus,proto3,enum=hashicorp.consul.catalog.v1alpha1.Health" json:"health_status,omitempty"` + // identity is the name of the workload identity for this endpoint. + Identity string `protobuf:"bytes,5,opt,name=identity,proto3" json:"identity,omitempty"` } func (x *Endpoint) Reset() { @@ -150,6 +152,13 @@ func (x *Endpoint) GetHealthStatus() Health { return Health_HEALTH_ANY } +func (x *Endpoint) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + var File_pbcatalog_v1alpha1_service_endpoints_proto protoreflect.FileDescriptor var file_pbcatalog_v1alpha1_service_endpoints_proto_rawDesc = []byte{ @@ -169,7 +178,7 @@ var file_pbcatalog_v1alpha1_service_endpoints_proto_rawDesc = []byte{ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xa3, 0x03, + 0x6e, 0x74, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xbf, 0x03, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, @@ -189,33 +198,35 @@ var file_pbcatalog_v1alpha1_service_endpoints_proto_rawDesc = []byte{ 0x29, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x69, 0x0a, 0x0a, 0x50, 0x6f, 0x72, 0x74, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, - 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, - 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x42, 0xb2, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, - 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x15, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x2f, 0x70, 0x62, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x3b, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x43, 0xaa, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x43, 0x61, 0x74, - 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x21, + 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x1a, 0x69, 0x0a, 0x0a, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, + 0x50, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0xb2, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, + 0x02, 0x03, 0x48, 0x43, 0x43, 0xaa, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0xea, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x3a, 0x3a, - 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x24, + 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x3a, 0x3a, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto-public/pbcatalog/v1alpha1/service_endpoints.proto b/proto-public/pbcatalog/v1alpha1/service_endpoints.proto index df3c70e0312..2b5a8740258 100644 --- a/proto-public/pbcatalog/v1alpha1/service_endpoints.proto +++ b/proto-public/pbcatalog/v1alpha1/service_endpoints.proto @@ -29,4 +29,7 @@ message Endpoint { // health_status is the aggregated health status of this endpoint. Health health_status = 4; + + // identity is the name of the workload identity for this endpoint. + string identity = 5; } From d44c715c35dfda179f2c7354491357a03c85e821 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Thu, 3 Aug 2023 17:19:43 -0600 Subject: [PATCH 04/11] endpoints-controller: add workload identity to the service endpoints resource --- agent/connect/uri_service.go | 2 +- .../catalogtest/test_integration_v1alpha1.go | 41 +++++++++++++++++++ .../catalogtest/test_lifecycle_v1alpha1.go | 8 ++++ .../controllers/endpoints/controller.go | 1 + .../controllers/endpoints/controller_test.go | 5 +++ .../mesh/internal/types/intermediate/types.go | 5 +++ internal/resource/resourcetest/builder.go | 2 +- 7 files changed, 62 insertions(+), 2 deletions(-) diff --git a/agent/connect/uri_service.go b/agent/connect/uri_service.go index f02310e2623..833beb8d2d0 100644 --- a/agent/connect/uri_service.go +++ b/agent/connect/uri_service.go @@ -74,7 +74,7 @@ func (id SpiffeIDIdentity) URI() *url.URL { return &result } -// SpiffeIDFromIdentityRef creates the SIFFE ID from an identity. +// SpiffeIDFromIdentityRef creates the SPIFFE ID from an identity. func SpiffeIDFromIdentityRef(trustDomain string, ref *pbresource.Reference) string { return SpiffeIDIdentity{ Host: trustDomain, diff --git a/internal/catalog/catalogtest/test_integration_v1alpha1.go b/internal/catalog/catalogtest/test_integration_v1alpha1.go index fea86c15a5d..a499575c3e3 100644 --- a/internal/catalog/catalogtest/test_integration_v1alpha1.go +++ b/internal/catalog/catalogtest/test_integration_v1alpha1.go @@ -165,6 +165,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, // api-2 { @@ -179,6 +180,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_WARNING, + Identity: "api", }, // api-3 { @@ -193,6 +195,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-4 { @@ -207,6 +210,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-5 { @@ -221,6 +225,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_WARNING, + Identity: "api", }, // api-6 { @@ -235,6 +240,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_WARNING, + Identity: "api", }, // api-7 { @@ -249,6 +255,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-8 { @@ -263,6 +270,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-9 { @@ -277,6 +285,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-10 { @@ -291,6 +300,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-11 { @@ -305,6 +315,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-12 { @@ -319,6 +330,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-13 { @@ -333,6 +345,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-14 { @@ -347,6 +360,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-15 { @@ -361,6 +375,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-16 { @@ -375,6 +390,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-17 { @@ -389,6 +405,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, // api-18 { @@ -403,6 +420,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_WARNING, + Identity: "api", }, // api-19 { @@ -417,6 +435,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-20 { @@ -431,6 +450,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, }, } @@ -449,6 +469,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, // api-10 { @@ -460,6 +481,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-11 { @@ -471,6 +493,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-12 { @@ -482,6 +505,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-13 { @@ -493,6 +517,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-14 { @@ -504,6 +529,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-15 { @@ -515,6 +541,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-16 { @@ -526,6 +553,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-17 { @@ -537,6 +565,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, // api-18 { @@ -548,6 +577,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_WARNING, + Identity: "api", }, // api-19 { @@ -559,6 +589,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, }, } @@ -579,6 +610,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, // api-2 { @@ -592,6 +624,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_WARNING, + Identity: "api", }, // api-3 { @@ -605,6 +638,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-4 { @@ -618,6 +652,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-5 { @@ -631,6 +666,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_WARNING, + Identity: "api", }, // api-6 { @@ -644,6 +680,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_WARNING, + Identity: "api", }, // api-7 { @@ -657,6 +694,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-8 { @@ -670,6 +708,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, // api-9 { @@ -683,6 +722,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, // api-20 { @@ -696,6 +736,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S "mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE, + Identity: "api", }, }, } diff --git a/internal/catalog/catalogtest/test_lifecycle_v1alpha1.go b/internal/catalog/catalogtest/test_lifecycle_v1alpha1.go index 0c52ba16924..2e9c99c1234 100644 --- a/internal/catalog/catalogtest/test_lifecycle_v1alpha1.go +++ b/internal/catalog/catalogtest/test_lifecycle_v1alpha1.go @@ -482,6 +482,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb "http": {Port: 443, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, }, }). @@ -530,6 +531,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, { TargetRef: api3.Id, @@ -540,6 +542,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, }, }) @@ -569,6 +572,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, { TargetRef: api2.Id, @@ -580,6 +584,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, }, }) @@ -614,6 +619,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, }, }) @@ -645,6 +651,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }, }, }) @@ -664,6 +671,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }, }, }) diff --git a/internal/catalog/internal/controllers/endpoints/controller.go b/internal/catalog/internal/controllers/endpoints/controller.go index 5a3e65d0a37..aa8e4559f7b 100644 --- a/internal/catalog/internal/controllers/endpoints/controller.go +++ b/internal/catalog/internal/controllers/endpoints/controller.go @@ -384,5 +384,6 @@ func workloadToEndpoint(svc *pbcatalog.Service, data *workloadData) *pbcatalog.E HealthStatus: health, Addresses: workloadAddrs, Ports: endpointPorts, + Identity: data.workload.Identity, } } diff --git a/internal/catalog/internal/controllers/endpoints/controller_test.go b/internal/catalog/internal/controllers/endpoints/controller_test.go index 8d9b864ca36..1f2e02daa83 100644 --- a/internal/catalog/internal/controllers/endpoints/controller_test.go +++ b/internal/catalog/internal/controllers/endpoints/controller_test.go @@ -123,6 +123,7 @@ func TestWorkloadToEndpoint(t *testing.T) { // the protocol is wrong here so it will not show up in the endpoints. "grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2}, }, + Identity: "test-identity", } data := &workloadData{ @@ -146,6 +147,7 @@ func TestWorkloadToEndpoint(t *testing.T) { // that we can properly determine the health status and the overall // controller tests will prove that the integration works as expected. HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: workload.Identity, } prototest.AssertDeepEqual(t, expected, workloadToEndpoint(service, data)) @@ -630,6 +632,7 @@ func (suite *controllerSuite) TestController() { "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_CRITICAL, + Identity: "api", }) // Update the health status of the workload @@ -661,6 +664,7 @@ func (suite *controllerSuite) TestController() { "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }) // rewrite the service to add more selection criteria. This should trigger @@ -712,6 +716,7 @@ func (suite *controllerSuite) TestController() { "grpc": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, }, HealthStatus: pbcatalog.Health_HEALTH_PASSING, + Identity: "api", }) // Delete the endpoints. The controller should bring these back momentarily diff --git a/internal/mesh/internal/types/intermediate/types.go b/internal/mesh/internal/types/intermediate/types.go index 533017bd36c..d7c033f9060 100644 --- a/internal/mesh/internal/types/intermediate/types.go +++ b/internal/mesh/internal/types/intermediate/types.go @@ -42,6 +42,11 @@ type ProxyStateTemplate struct { Tmpl *pbmesh.ProxyStateTemplate } +type ProxyConfiguration struct { + Resource *pbresource.Resource + Cfg *pbmesh.ProxyConfiguration +} + type Destination struct { Explicit *pbmesh.Upstream ServiceEndpoints *ServiceEndpoints diff --git a/internal/resource/resourcetest/builder.go b/internal/resource/resourcetest/builder.go index d32e9651583..2bd6275d542 100644 --- a/internal/resource/resourcetest/builder.go +++ b/internal/resource/resourcetest/builder.go @@ -148,7 +148,7 @@ func (b *resourceBuilder) Write(t T, client pbresource.ResourceServiceClient) *p Resource: res, }) - if err == nil || res.Id.Uid != "" || status.Code(err) == codes.FailedPrecondition { + if err == nil || res.Id.Uid != "" || status.Code(err) != codes.FailedPrecondition { if err != nil { t.Logf("write saw error: %v", err) } From d18db661ed1339ab06ec796ce48cdfdf0f311f85 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Thu, 3 Aug 2023 19:22:28 -0600 Subject: [PATCH 05/11] refactor trust domain fetcher --- agent/consul/server.go | 17 +++++------------ agent/consul/server_connect.go | 32 +++++++++++++++++++++----------- 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/agent/consul/server.go b/agent/consul/server.go index 50bf0f0d1c0..4b0c07e1945 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -19,7 +19,6 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/internal/mesh" "github.com/hashicorp/consul/internal/resource" @@ -911,20 +910,14 @@ func (s *Server) registerControllers(deps Deps, proxyUpdater ProxyUpdater) { return &bundle, nil }, ProxyUpdater: proxyUpdater, + // This function is adapted from server_connect.go:getCARoots. TrustDomainFetcher: func() (string, error) { - if s.config.CAConfig == nil || s.config.CAConfig.ClusterID == "" { - return "", fmt.Errorf("CA has not finished initializing") - } - - // Build TrustDomain based on the ClusterID stored. - signingID := connect.SpiffeIDSigningForCluster(s.config.CAConfig.ClusterID) - if signingID == nil { - // If CA is bootstrapped at all then this should never happen but be - // defensive. - return "", fmt.Errorf("no cluster trust domain setup") + _, caConfig, err := s.fsm.State().CAConfig(nil) + if err != nil { + return "", err } - return signingID.Host(), nil + return s.getTrustDomain(caConfig) }, }) } diff --git a/agent/consul/server_connect.go b/agent/consul/server_connect.go index d76e4fc8c42..2274aff523b 100644 --- a/agent/consul/server_connect.go +++ b/agent/consul/server_connect.go @@ -19,21 +19,15 @@ func (s *Server) getCARoots(ws memdb.WatchSet, state *state.Store) (*structs.Ind if err != nil { return nil, err } - if config == nil || config.ClusterID == "" { - return nil, fmt.Errorf("CA has not finished initializing") + + trustDomain, err := s.getTrustDomain(config) + if err != nil { + return nil, err } indexedRoots := &structs.IndexedCARoots{} - // Build TrustDomain based on the ClusterID stored. - signingID := connect.SpiffeIDSigningForCluster(config.ClusterID) - if signingID == nil { - // If CA is bootstrapped at all then this should never happen but be - // defensive. - return nil, fmt.Errorf("no cluster trust domain setup") - } - - indexedRoots.TrustDomain = signingID.Host() + indexedRoots.TrustDomain = trustDomain indexedRoots.Index, indexedRoots.Roots = index, roots if indexedRoots.Roots == nil { @@ -77,3 +71,19 @@ func (s *Server) getCARoots(ws memdb.WatchSet, state *state.Store) (*structs.Ind return indexedRoots, nil } + +func (s *Server) getTrustDomain(config *structs.CAConfiguration) (string, error) { + if config == nil || config.ClusterID == "" { + return "", fmt.Errorf("CA has not finished initializing") + } + + // Build TrustDomain based on the ClusterID stored. + signingID := connect.SpiffeIDSigningForCluster(config.ClusterID) + if signingID == nil { + // If CA is bootstrapped at all then this should never happen but be + // defensive. + return "", fmt.Errorf("no cluster trust domain setup") + } + + return signingID.Host(), nil +} From 93d2f229e30ba3dc0564086d6d9e0587db5c4347 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Fri, 4 Aug 2023 15:58:24 -0600 Subject: [PATCH 06/11] small fixes --- .../controllers/endpoints/controller.go | 4 ---- .../controllers/endpoints/controller_test.go | 14 ----------- .../builder/destination_builder.go | 23 ++++++++++--------- .../sidecar-proxy/builder/local_app.go | 4 +++- .../controllers/sidecar-proxy/cache/cache.go | 4 ++-- .../controllers/sidecar-proxy/controller.go | 6 ++--- .../sidecar-proxy/fetcher/data_fetcher.go | 2 +- .../mesh/internal/types/intermediate/types.go | 12 ++++++---- internal/mesh/internal/types/upstreams.go | 2 +- 9 files changed, 29 insertions(+), 42 deletions(-) diff --git a/internal/catalog/internal/controllers/endpoints/controller.go b/internal/catalog/internal/controllers/endpoints/controller.go index aa8e4559f7b..ea5865788f3 100644 --- a/internal/catalog/internal/controllers/endpoints/controller.go +++ b/internal/catalog/internal/controllers/endpoints/controller.go @@ -5,7 +5,6 @@ package endpoints import ( "context" - "fmt" "sort" "github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth" @@ -376,9 +375,6 @@ func workloadToEndpoint(svc *pbcatalog.Service, data *workloadData) *pbcatalog.E return nil } - if data.resource.Id == nil { - fmt.Println("-------------------iryna: workload id is nil") - } return &pbcatalog.Endpoint{ TargetRef: data.resource.Id, HealthStatus: health, diff --git a/internal/catalog/internal/controllers/endpoints/controller_test.go b/internal/catalog/internal/controllers/endpoints/controller_test.go index 1f2e02daa83..c9e6c1d3add 100644 --- a/internal/catalog/internal/controllers/endpoints/controller_test.go +++ b/internal/catalog/internal/controllers/endpoints/controller_test.go @@ -705,20 +705,6 @@ func (suite *controllerSuite) TestController() { endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version) rtest.RequireOwner(suite.T(), endpoints, updatedService.Id, false) - // ensure the endpoint was put into the passing state - suite.requireEndpoints(endpoints, &pbcatalog.Endpoint{ - TargetRef: workload.Id, - Addresses: []*pbcatalog.WorkloadAddress{ - {Host: "127.0.0.1", Ports: []string{"grpc", "http"}}, - }, - Ports: map[string]*pbcatalog.WorkloadPort{ - "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, - "grpc": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, - }, - HealthStatus: pbcatalog.Health_HEALTH_PASSING, - Identity: "api", - }) - // Delete the endpoints. The controller should bring these back momentarily suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: endpointsID}) diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go b/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go index 062ceb02f1e..f1975291b83 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go @@ -23,15 +23,17 @@ func (b *Builder) buildExplicitDestination(destination *intermediate.Destination clusterName := DestinationClusterName(destination.Explicit.DestinationRef, destination.Explicit.Datacenter, b.trustDomain) statPrefix := DestinationStatPrefix(destination.Explicit.DestinationRef, destination.Explicit.Datacenter) - // We assume that all endpoints have the same port. Later, we will change service endpoints to - // have the global ports map rather than per address. - destPort := destination.ServiceEndpoints.Endpoints.Endpoints[0].Ports[destination.Explicit.DestinationPort] - - if destPort != nil { - return b.addOutboundDestinationListener(destination.Explicit). - addRouter(clusterName, statPrefix, destPort.Protocol). - addCluster(clusterName, destination.Identities). - addEndpointsRef(clusterName, destination.ServiceEndpoints.Resource.Id, destination.Explicit.DestinationPort) + // All endpoints should have the same protocol as the endpoints controller ensures that is the case, + // so it's sufficient to read just the first endpoint. + if len(destination.ServiceEndpoints.Endpoints.Endpoints) > 0 { + destPort := destination.ServiceEndpoints.Endpoints.Endpoints[0].Ports[destination.Explicit.DestinationPort] + + if destPort != nil { + return b.addOutboundDestinationListener(destination.Explicit). + addRouter(clusterName, statPrefix, destPort.Protocol). + addCluster(clusterName, destination.Identities). + addEndpointsRef(clusterName, destination.ServiceEndpoints.Resource.Id, destination.Explicit.DestinationPort) + } } return b @@ -67,10 +69,10 @@ func (b *Builder) addOutboundDestinationListener(explicit *pbmesh.Upstream) *Bui return b.addListener(listener) } -// for explicit destinations, we have no filter chain match, and filters based on port protocol func (b *Builder) addRouter(clusterName, statPrefix string, protocol pbcatalog.Protocol) *Builder { listener := b.getLastBuiltListener() + // For explicit destinations, we have no filter chain match, and filters are based on port protocol. switch protocol { case pbcatalog.Protocol_PROTOCOL_TCP: router := &pbproxystate.Router{ @@ -123,7 +125,6 @@ func (b *Builder) addCluster(clusterName string, destinationIdentities []*pbreso } func (b *Builder) addEndpointsRef(clusterName string, serviceEndpointsID *pbresource.ID, destinationPort string) *Builder { - // Finally, add endpoints references. b.proxyStateTemplate.RequiredEndpoints[clusterName] = &pbproxystate.EndpointRef{ Id: serviceEndpointsID, Port: destinationPort, diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go b/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go index 5082ce298ee..323863475b4 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go +++ b/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go @@ -64,8 +64,10 @@ func (b *Builder) addInboundRouters(workload *pbcatalog.Workload) *Builder { listener := b.getLastBuiltListener() // Go through workload ports and add the first non-mesh port we see. + // Note that the order of ports is non-deterministic here but the xds generation + // code should make sure to send it in the same order to Envoy to avoid unnecessary + // updates. // todo (ishustava): Note we will need to support multiple ports in the future. - // todo (ishustava): make sure we always iterate through ports in the same order so we don't need to send more updates to envoy. for portName, port := range workload.Ports { clusterName := fmt.Sprintf("%s:%s", xdscommon.LocalAppClusterName, portName) if port.Protocol == pbcatalog.Protocol_PROTOCOL_TCP { diff --git a/internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go b/internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go index 5e8dd7d8b12..8eee2c10b92 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go +++ b/internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go @@ -9,11 +9,11 @@ import ( "github.com/hashicorp/consul/proto-public/pbresource" ) -// Cache stores information needed for the mesh controller to reconcile efficiently. +// Cache stores information needed for the sidecar-proxy controller to reconcile efficiently. // This currently means storing a list of all destinations for easy look up // as well as indices of source proxies where those destinations are referenced. // -// It is the responsibility of controller and its subcomponents (like mapper and data fetcher) +// It is the responsibility of the controller and its subcomponents (like mapper and data fetcher) // to keep this cache up-to-date as we're observing new data. type Cache struct { lock sync.RWMutex diff --git a/internal/mesh/internal/controllers/sidecar-proxy/controller.go b/internal/mesh/internal/controllers/sidecar-proxy/controller.go index 2f1ea8c9437..ddaff28d5ad 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/controller.go +++ b/internal/mesh/internal/controllers/sidecar-proxy/controller.go @@ -49,7 +49,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c // Instantiate a data fetcher to fetch all reconciliation data. dataFetcher := fetcher.Fetcher{Client: rt.Client, Cache: r.cache} - // Check if the apiWorkload exists. + // Check if the workload exists. workloadID := resource.ReplaceType(catalog.WorkloadType, req.ID) workload, err := dataFetcher.FetchWorkload(ctx, resource.ReplaceType(catalog.WorkloadType, req.ID)) if err != nil { @@ -57,7 +57,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c return err } if workload == nil { - // If apiWorkload has been deleted, then return as ProxyStateTemplate should be cleaned up + // If workload has been deleted, then return as ProxyStateTemplate should be cleaned up // by the garbage collector because of the owner reference. rt.Logger.Trace("workload doesn't exist; skipping reconciliation", "workload", workloadID) return nil @@ -70,7 +70,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c } if proxyStateTemplate == nil { - // If proxy state template has been deleted + // If proxy state template has been deleted, we will need to generate a new one. rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one", "id", req.ID) } diff --git a/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go b/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go index ac9e8cc2077..98db4043938 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go +++ b/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go @@ -203,7 +203,7 @@ func (f *Fetcher) FetchDestinationsData( return destinations, statuses, nil } -// IsMeshEnabled returns true if apiWorkload or service endpoints port +// IsMeshEnabled returns true if the workload or service endpoints port // contain a port with the "mesh" protocol. func IsMeshEnabled(ports map[string]*pbcatalog.WorkloadPort) bool { for _, port := range ports { diff --git a/internal/mesh/internal/types/intermediate/types.go b/internal/mesh/internal/types/intermediate/types.go index d7c033f9060..5be14102b94 100644 --- a/internal/mesh/internal/types/intermediate/types.go +++ b/internal/mesh/internal/types/intermediate/types.go @@ -6,18 +6,20 @@ import ( "github.com/hashicorp/consul/proto-public/pbresource" ) -// todo should it be destination? -// the problem is that it's compiled from different source objects +// CombinedDestinationRef contains all references we need for a specific +// destination on the mesh. type CombinedDestinationRef struct { - // ServiceRef is the reference to the destination service for this upstream + // ServiceRef is the reference to the destination service. ServiceRef *pbresource.Reference + // Port is the port name for this destination. Port string - // sourceProxies are the IDs of source proxy state template resources. + // SourceProxies are the IDs of source proxy state template resources. + // The keys are a string representation of *pbresource.ID. SourceProxies map[string]*pbresource.ID - // explicitUpstreamID is the id of an explicit upstreams resource. For implicit upstreams, + // ExplicitDestinationsID is the id of the pbmesh.Upstreams resource. For implicit destinations, // this should be nil. ExplicitDestinationsID *pbresource.ID } diff --git a/internal/mesh/internal/types/upstreams.go b/internal/mesh/internal/types/upstreams.go index 238e18d2dea..1ef73e181e0 100644 --- a/internal/mesh/internal/types/upstreams.go +++ b/internal/mesh/internal/types/upstreams.go @@ -10,7 +10,7 @@ import ( ) const ( - UpstreamsKind = "Destinations" + UpstreamsKind = "Upstreams" ) var ( From 78b13258677180712975804b9e130b4fdcb35906 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Mon, 7 Aug 2023 18:19:17 -0600 Subject: [PATCH 07/11] review comments --- agent/connect/uri_service.go | 11 +++++----- internal/mesh/exports.go | 21 +++++++------------ .../mesh/internal/controllers/register.go | 13 +++++++----- .../builder/builder.go | 0 .../builder/builder_test.go | 0 .../builder/destination_builder.go | 0 .../builder/destination_builder_test.go | 0 .../builder/local_app.go | 0 .../builder/local_app_test.go | 0 .../builder/naming.go | 0 .../testdata/l4-multi-destination.golden | 0 ...kload-addresses-with-specific-ports.golden | 0 ...le-workload-addresses-without-ports.golden | 0 ...le-destination-ip-port-bind-address.golden | 0 ...estination-unix-socket-bind-address.golden | 0 ...ngle-workload-address-without-ports.golden | 0 .../cache/cache.go | 0 .../cache/cache_test.go | 0 .../controller.go | 10 ++++----- .../controller_test.go | 10 ++++----- .../fetcher/data_fetcher.go | 4 ++-- .../fetcher/data_fetcher_test.go | 4 ++-- .../mapper/destinations_mapper.go | 2 +- .../mapper/destinations_mapper_test.go | 2 +- .../mapper/service_endpoints_mapper.go | 2 +- .../mapper/service_endpoints_mapper_test.go | 2 +- .../status/status.go | 0 27 files changed, 40 insertions(+), 41 deletions(-) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/builder.go (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/builder_test.go (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/destination_builder.go (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/destination_builder_test.go (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/local_app.go (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/local_app_test.go (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/naming.go (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/testdata/l4-multi-destination.golden (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/testdata/l4-multiple-workload-addresses-without-ports.golden (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/testdata/l4-single-destination-ip-port-bind-address.golden (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/testdata/l4-single-destination-unix-socket-bind-address.golden (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/builder/testdata/l4-single-workload-address-without-ports.golden (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/cache/cache.go (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/cache/cache_test.go (100%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/controller.go (98%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/controller_test.go (99%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/fetcher/data_fetcher.go (99%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/fetcher/data_fetcher_test.go (99%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/mapper/destinations_mapper.go (98%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/mapper/destinations_mapper_test.go (99%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/mapper/service_endpoints_mapper.go (99%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/mapper/service_endpoints_mapper_test.go (99%) rename internal/mesh/internal/controllers/{sidecar-proxy => sidecarproxy}/status/status.go (100%) diff --git a/agent/connect/uri_service.go b/agent/connect/uri_service.go index 833beb8d2d0..6a242a0d9fc 100644 --- a/agent/connect/uri_service.go +++ b/agent/connect/uri_service.go @@ -54,15 +54,15 @@ func (id SpiffeIDService) uriPath() string { return path } -// SpiffeIDIdentity is the structure to represent the SPIFFE ID for an identity. -type SpiffeIDIdentity struct { +// SpiffeIDWorkloadIdentity is the structure to represent the SPIFFE ID for a workload identity. +type SpiffeIDWorkloadIdentity struct { Host string Partition string Namespace string Identity string } -func (id SpiffeIDIdentity) URI() *url.URL { +func (id SpiffeIDWorkloadIdentity) URI() *url.URL { var result url.URL result.Scheme = "spiffe" result.Host = id.Host @@ -74,9 +74,10 @@ func (id SpiffeIDIdentity) URI() *url.URL { return &result } -// SpiffeIDFromIdentityRef creates the SPIFFE ID from an identity. +// SpiffeIDFromIdentityRef creates the SPIFFE ID from a workload identity. +// TODO (ishustava): make sure ref type is workload identity. func SpiffeIDFromIdentityRef(trustDomain string, ref *pbresource.Reference) string { - return SpiffeIDIdentity{ + return SpiffeIDWorkloadIdentity{ Host: trustDomain, Partition: ref.Tenancy.Partition, Namespace: ref.Tenancy.Namespace, diff --git a/internal/mesh/exports.go b/internal/mesh/exports.go index e406f58c16c..942c4bd10e2 100644 --- a/internal/mesh/exports.go +++ b/internal/mesh/exports.go @@ -6,8 +6,8 @@ package mesh import ( "github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/mesh/internal/controllers" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/status" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource" ) @@ -60,16 +60,11 @@ var ( // Controller statuses. // Sidecar-proxy controller. - - SidecarProxyStatusKey = sidecar_proxy.ControllerName - - SidecarProxyStatusConditionMeshDestination = status.StatusConditionMeshDestination - - SidecarProxyStatusReasonNonMeshDestination = status.StatusReasonNonMeshDestination - SidecarProxyStatusReasonMeshDestination = status.StatusReasonMeshDestination - - SidecarProxyStatusConditionDestinationExists = status.StatusConditionDestinationExists - + SidecarProxyStatusKey = sidecarproxy.ControllerName + SidecarProxyStatusConditionMeshDestination = status.StatusConditionMeshDestination + SidecarProxyStatusReasonNonMeshDestination = status.StatusReasonNonMeshDestination + SidecarProxyStatusReasonMeshDestination = status.StatusReasonMeshDestination + SidecarProxyStatusConditionDestinationExists = status.StatusConditionDestinationExists SidecarProxyStatusReasonDestinationServiceNotFound = status.StatusReasonDestinationServiceNotFound SidecarProxyStatusReasonDestinationServiceFound = status.StatusReasonDestinationServiceFound ) @@ -86,6 +81,6 @@ func RegisterControllers(mgr *controller.Manager, deps ControllerDependencies) { controllers.Register(mgr, deps) } -type TrustDomainFetcher = sidecar_proxy.TrustDomainFetcher +type TrustDomainFetcher = sidecarproxy.TrustDomainFetcher type ControllerDependencies = controllers.Dependencies diff --git a/internal/mesh/internal/controllers/register.go b/internal/mesh/internal/controllers/register.go index 23a9d507fd7..eb5131bd506 100644 --- a/internal/mesh/internal/controllers/register.go +++ b/internal/mesh/internal/controllers/register.go @@ -4,18 +4,21 @@ package controllers import ( + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/mapper" + "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/mapper" "github.com/hashicorp/consul/internal/mesh/internal/controllers/xds" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource/mappers/bimapper" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/mapper" ) type Dependencies struct { - TrustDomainFetcher sidecar_proxy.TrustDomainFetcher + TrustDomainFetcher sidecarproxy.TrustDomainFetcher TrustBundleFetcher xds.TrustBundleFetcher ProxyUpdater xds.ProxyUpdater } @@ -25,5 +28,5 @@ func Register(mgr *controller.Manager, deps Dependencies) { m := mapper.New(c) mapper := bimapper.New(types.ProxyStateTemplateType, catalog.ServiceEndpointsType) mgr.Register(xds.Controller(mapper, deps.ProxyUpdater, deps.TrustBundleFetcher)) - mgr.Register(sidecar_proxy.Controller(c, m, deps.TrustDomainFetcher)) + mgr.Register(sidecarproxy.Controller(c, m, deps.TrustDomainFetcher)) } diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/builder.go b/internal/mesh/internal/controllers/sidecarproxy/builder/builder.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/builder.go rename to internal/mesh/internal/controllers/sidecarproxy/builder/builder.go diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/builder_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/builder_test.go rename to internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder.go rename to internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/destination_builder_test.go rename to internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/local_app.go rename to internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/local_app_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/local_app_test.go rename to internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/naming.go b/internal/mesh/internal/controllers/sidecarproxy/builder/naming.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/naming.go rename to internal/mesh/internal/controllers/sidecarproxy/builder/naming.go diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multi-destination.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multi-destination.golden rename to internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden rename to internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden rename to internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden rename to internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden rename to internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden diff --git a/internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-workload-address-without-ports.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-workload-address-without-ports.golden similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/builder/testdata/l4-single-workload-address-without-ports.golden rename to internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-workload-address-without-ports.golden diff --git a/internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go b/internal/mesh/internal/controllers/sidecarproxy/cache/cache.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/cache/cache.go rename to internal/mesh/internal/controllers/sidecarproxy/cache/cache.go diff --git a/internal/mesh/internal/controllers/sidecar-proxy/cache/cache_test.go b/internal/mesh/internal/controllers/sidecarproxy/cache/cache_test.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/cache/cache_test.go rename to internal/mesh/internal/controllers/sidecarproxy/cache/cache_test.go diff --git a/internal/mesh/internal/controllers/sidecar-proxy/controller.go b/internal/mesh/internal/controllers/sidecarproxy/controller.go similarity index 98% rename from internal/mesh/internal/controllers/sidecar-proxy/controller.go rename to internal/mesh/internal/controllers/sidecarproxy/controller.go index ddaff28d5ad..95c1fa9244a 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/controller.go +++ b/internal/mesh/internal/controllers/sidecarproxy/controller.go @@ -1,17 +1,17 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package sidecar_proxy +package sidecarproxy import ( "context" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/builder" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/fetcher" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/mapper" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/fetcher" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/mapper" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" diff --git a/internal/mesh/internal/controllers/sidecar-proxy/controller_test.go b/internal/mesh/internal/controllers/sidecarproxy/controller_test.go similarity index 99% rename from internal/mesh/internal/controllers/sidecar-proxy/controller_test.go rename to internal/mesh/internal/controllers/sidecarproxy/controller_test.go index 74e56de9f38..74a38d4ed6c 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/controller_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/controller_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package sidecar_proxy +package sidecarproxy import ( "context" @@ -10,10 +10,10 @@ import ( svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/builder" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/mapper" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/status" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/mapper" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" diff --git a/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher.go similarity index 99% rename from internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go rename to internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher.go index 98db4043938..d38bebdabe0 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher.go +++ b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher.go @@ -4,8 +4,8 @@ import ( "context" "github.com/hashicorp/consul/internal/catalog" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" - ctrlStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/status" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + ctrlStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status" "github.com/hashicorp/consul/internal/mesh/internal/types" intermediateTypes "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" diff --git a/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher_test.go b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go similarity index 99% rename from internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher_test.go rename to internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go index 17eaf1ffa83..1bf799f42ee 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/fetcher/data_fetcher_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go @@ -7,8 +7,8 @@ import ( svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" - meshStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/status" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + meshStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" diff --git a/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper.go b/internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper.go similarity index 98% rename from internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper.go rename to internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper.go index 420725134a6..6bf98fd0787 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper.go +++ b/internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" diff --git a/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper_test.go b/internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper_test.go similarity index 99% rename from internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper_test.go rename to internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper_test.go index 69186527bdc..1aacac30029 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/mapper/destinations_mapper_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper_test.go @@ -7,7 +7,7 @@ import ( svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" diff --git a/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper.go b/internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper.go similarity index 99% rename from internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper.go rename to internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper.go index 52afab086ff..9c7d768e589 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper.go +++ b/internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" diff --git a/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper_test.go b/internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper_test.go similarity index 99% rename from internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper_test.go rename to internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper_test.go index f5c73331a4b..f463afbc47f 100644 --- a/internal/mesh/internal/controllers/sidecar-proxy/mapper/service_endpoints_mapper_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource/resourcetest" diff --git a/internal/mesh/internal/controllers/sidecar-proxy/status/status.go b/internal/mesh/internal/controllers/sidecarproxy/status/status.go similarity index 100% rename from internal/mesh/internal/controllers/sidecar-proxy/status/status.go rename to internal/mesh/internal/controllers/sidecarproxy/status/status.go From 74c3f9007171309993e33da3abf3a7fe7566a112 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Mon, 7 Aug 2023 18:44:05 -0600 Subject: [PATCH 08/11] Make sure endpoint refs route to mesh port instead of an app port --- .../sidecarproxy/builder/destination_builder.go | 17 ++++++++++++++++- .../builder/destination_builder_test.go | 1 + .../testdata/l4-multi-destination.golden | 4 ++-- ...ngle-destination-ip-port-bind-address.golden | 2 +- ...-destination-unix-socket-bind-address.golden | 2 +- 5 files changed, 21 insertions(+), 5 deletions(-) diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go index f1975291b83..0b46a48c4e6 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go @@ -26,13 +26,19 @@ func (b *Builder) buildExplicitDestination(destination *intermediate.Destination // All endpoints should have the same protocol as the endpoints controller ensures that is the case, // so it's sufficient to read just the first endpoint. if len(destination.ServiceEndpoints.Endpoints.Endpoints) > 0 { + // Get destination port so that we can configure this destination correctly based on its protocol. destPort := destination.ServiceEndpoints.Endpoints.Endpoints[0].Ports[destination.Explicit.DestinationPort] + // Find the destination proxy's port. + // Endpoints refs will need to route to mesh port instead of the destination port as that + // is the port of the destination's proxy. + meshPortName := findMeshPort(destination.ServiceEndpoints.Endpoints.Endpoints[0].Ports) + if destPort != nil { return b.addOutboundDestinationListener(destination.Explicit). addRouter(clusterName, statPrefix, destPort.Protocol). addCluster(clusterName, destination.Identities). - addEndpointsRef(clusterName, destination.ServiceEndpoints.Resource.Id, destination.Explicit.DestinationPort) + addEndpointsRef(clusterName, destination.ServiceEndpoints.Resource.Id, meshPortName) } } @@ -131,3 +137,12 @@ func (b *Builder) addEndpointsRef(clusterName string, serviceEndpointsID *pbreso } return b } + +func findMeshPort(ports map[string]*pbcatalog.WorkloadPort) string { + for name, port := range ports { + if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { + return name + } + } + return "" +} diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go index 6d8371662fd..86af902285d 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go @@ -23,6 +23,7 @@ var ( Ports: map[string]*pbcatalog.WorkloadPort{ "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, "http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, + "mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, }, }, diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden index dae364f5c07..7298777d870 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden @@ -100,7 +100,7 @@ "peerName": "local" } }, - "port": "tcp" + "port": "mesh" }, "api-2.default.dc1.internal.foo.consul": { "id": { @@ -116,7 +116,7 @@ "peerName": "local" } }, - "port": "tcp" + "port": "mesh" } } } \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden index 44c97ca76f5..b0e113ec93a 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden @@ -64,7 +64,7 @@ "peerName": "local" } }, - "port": "tcp" + "port": "mesh" } } } \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden index 2dbaa61a1ff..aa21472ad57 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden @@ -64,7 +64,7 @@ "peerName": "local" } }, - "port": "tcp" + "port": "mesh" } } } \ No newline at end of file From 1ac3a482056e6e336ca707f4d9bf6846cf232407 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Wed, 9 Aug 2023 18:40:23 -0600 Subject: [PATCH 09/11] Address PR comments --- go.mod | 18 +- go.sum | 36 ++-- internal/mesh/exports.go | 55 +++--- .../internal/cache/sidecarproxycache/cache.go | 185 ++++++++++++++++++ .../sidecarproxycache}/cache_test.go | 80 +++++--- .../mesh/internal/controllers/register.go | 6 +- .../sidecarproxy/builder/builder.go | 19 +- .../sidecarproxy/builder/builder_test.go | 2 +- .../builder/destination_builder.go | 19 +- .../builder/destination_builder_test.go | 3 +- .../sidecarproxy/builder/local_app.go | 182 +++++++++-------- .../sidecarproxy/builder/local_app_test.go | 2 +- .../testdata/l4-multi-destination.golden | 148 +++++++------- ...kload-addresses-with-specific-ports.golden | 80 ++++---- ...le-workload-addresses-without-ports.golden | 80 ++++---- ...le-destination-ip-port-bind-address.golden | 84 ++++---- ...estination-unix-socket-bind-address.golden | 84 ++++---- ...ngle-workload-address-without-ports.golden | 80 ++++---- .../controllers/sidecarproxy/cache/cache.go | 144 -------------- .../controllers/sidecarproxy/controller.go | 10 +- .../sidecarproxy/controller_test.go | 38 ++-- .../sidecarproxy/fetcher/data_fetcher.go | 46 +++-- .../sidecarproxy/fetcher/data_fetcher_test.go | 177 +++++++++++------ .../controllers/sidecarproxy/status/status.go | 43 ++-- .../destinations_mapper.go | 18 +- .../destinations_mapper_test.go | 22 +-- .../service_endpoints_mapper.go | 42 ++-- .../service_endpoints_mapper_test.go | 39 ++-- .../mesh/internal/types/intermediate/types.go | 6 +- .../resource/mappers/bimapper/bimapper.go | 4 + proto-public/go.mod | 5 +- proto-public/go.sum | 9 +- .../pbcatalog/v1alpha1/workload_addon.go | 40 ++++ .../pbcatalog/v1alpha1/workload_addon_test.go | 104 ++++++++++ 34 files changed, 1135 insertions(+), 775 deletions(-) create mode 100644 internal/mesh/internal/cache/sidecarproxycache/cache.go rename internal/mesh/internal/{controllers/sidecarproxy/cache => cache/sidecarproxycache}/cache_test.go (69%) delete mode 100644 internal/mesh/internal/controllers/sidecarproxy/cache/cache.go rename internal/mesh/internal/{controllers/sidecarproxy/mapper => mappers/sidecarproxymapper}/destinations_mapper.go (78%) rename internal/mesh/internal/{controllers/sidecarproxy/mapper => mappers/sidecarproxymapper}/destinations_mapper_test.go (86%) rename internal/mesh/internal/{controllers/sidecarproxy/mapper => mappers/sidecarproxymapper}/service_endpoints_mapper.go (62%) rename internal/mesh/internal/{controllers/sidecarproxy/mapper => mappers/sidecarproxymapper}/service_endpoints_mapper_test.go (66%) create mode 100644 proto-public/pbcatalog/v1alpha1/workload_addon.go create mode 100644 proto-public/pbcatalog/v1alpha1/workload_addon_test.go diff --git a/go.mod b/go.mod index 2747f4b91b5..d8fd8a45aff 100644 --- a/go.mod +++ b/go.mod @@ -105,12 +105,12 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.39.0 go.opentelemetry.io/proto/otlp v0.19.0 go.uber.org/goleak v1.1.10 - golang.org/x/crypto v0.11.0 - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 - golang.org/x/net v0.13.0 + golang.org/x/crypto v0.12.0 + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + golang.org/x/net v0.14.0 golang.org/x/oauth2 v0.6.0 - golang.org/x/sync v0.2.0 - golang.org/x/sys v0.10.0 + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.11.0 golang.org/x/time v0.3.0 google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 google.golang.org/grpc v1.55.0 @@ -251,10 +251,10 @@ require ( go.opentelemetry.io/otel/trace v1.16.0 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/tools v0.9.1 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 92213557b71..833718ff33b 100644 --- a/go.sum +++ b/go.sum @@ -1011,8 +1011,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1023,8 +1023,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1052,8 +1052,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1108,8 +1108,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1141,8 +1141,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1227,13 +1227,13 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1245,8 +1245,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1318,8 +1318,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/mesh/exports.go b/internal/mesh/exports.go index 942c4bd10e2..dc3c31649ba 100644 --- a/internal/mesh/exports.go +++ b/internal/mesh/exports.go @@ -21,16 +21,16 @@ var ( // Resource Kind Names. - ProxyConfigurationKind = types.ProxyConfigurationKind - UpstreamsKind = types.UpstreamsKind - UpstreamsConfigurationKind = types.UpstreamsConfigurationKind - ProxyStateKind = types.ProxyStateTemplateKind - HTTPRouteKind = types.HTTPRouteKind - GRPCRouteKind = types.GRPCRouteKind - TCPRouteKind = types.TCPRouteKind - DestinationPolicyKind = types.DestinationPolicyKind - ComputedRoutesKind = types.ComputedRoutesKind - ProxyStateTemplateV1Alpha1Type = types.ProxyStateTemplateV1Alpha1Type + ProxyConfigurationKind = types.ProxyConfigurationKind + UpstreamsKind = types.UpstreamsKind + UpstreamsConfigurationKind = types.UpstreamsConfigurationKind + ProxyStateKind = types.ProxyStateTemplateKind + HTTPRouteKind = types.HTTPRouteKind + GRPCRouteKind = types.GRPCRouteKind + TCPRouteKind = types.TCPRouteKind + DestinationPolicyKind = types.DestinationPolicyKind + ComputedRoutesKind = types.ComputedRoutesKind + ProxyStateTemplateKind = types.ProxyStateTemplateKind // Resource Types for the v1alpha1 version. @@ -43,30 +43,31 @@ var ( TCPRouteV1Alpha1Type = types.TCPRouteV1Alpha1Type DestinationPolicyV1Alpha1Type = types.DestinationPolicyV1Alpha1Type ComputedRoutesV1Alpha1Type = types.ComputedRoutesV1Alpha1Type - ProxyStateTemplateType = types.ProxyStateTemplateV1Alpha1Type + ProxyStateTemplateV1AlphaType = types.ProxyStateTemplateV1Alpha1Type // Resource Types for the latest version. - ProxyConfigurationType = types.ProxyConfigurationType - UpstreamsType = types.UpstreamsType - UpstreamsConfigurationType = types.UpstreamsConfigurationType - ProxyStateTemplateConfigurationType = types.ProxyStateTemplateType - HTTPRouteType = types.HTTPRouteType - GRPCRouteType = types.GRPCRouteType - TCPRouteType = types.TCPRouteType - DestinationPolicyType = types.DestinationPolicyType - ComputedRoutesType = types.ComputedRoutesType + ProxyConfigurationType = types.ProxyConfigurationType + UpstreamsType = types.UpstreamsType + UpstreamsConfigurationType = types.UpstreamsConfigurationType + ProxyStateTemplateType = types.ProxyStateTemplateType + HTTPRouteType = types.HTTPRouteType + GRPCRouteType = types.GRPCRouteType + TCPRouteType = types.TCPRouteType + DestinationPolicyType = types.DestinationPolicyType + ComputedRoutesType = types.ComputedRoutesType // Controller statuses. // Sidecar-proxy controller. - SidecarProxyStatusKey = sidecarproxy.ControllerName - SidecarProxyStatusConditionMeshDestination = status.StatusConditionMeshDestination - SidecarProxyStatusReasonNonMeshDestination = status.StatusReasonNonMeshDestination - SidecarProxyStatusReasonMeshDestination = status.StatusReasonMeshDestination - SidecarProxyStatusConditionDestinationExists = status.StatusConditionDestinationExists - SidecarProxyStatusReasonDestinationServiceNotFound = status.StatusReasonDestinationServiceNotFound - SidecarProxyStatusReasonDestinationServiceFound = status.StatusReasonDestinationServiceFound + SidecarProxyStatusKey = sidecarproxy.ControllerName + SidecarProxyStatusConditionMeshDestination = status.StatusConditionDestinationAccepted + SidecarProxyStatusReasonNonMeshDestination = status.StatusReasonMeshProtocolNotFound + SidecarProxyStatusReasonMeshDestination = status.StatusReasonMeshProtocolFound + SidecarProxyStatusReasonDestinationServiceNotFound = status.StatusReasonDestinationServiceNotFound + SidecarProxyStatusReasonDestinationServiceFound = status.StatusReasonDestinationServiceFound + SidecarProxyStatusReasonMeshProtocolDestinationPort = status.StatusReasonMeshProtocolDestinationPort + SidecarProxyStatusReasonNonMeshProtocolDestinationPort = status.StatusReasonNonMeshProtocolDestinationPort ) // RegisterTypes adds all resource types within the "mesh" API group diff --git a/internal/mesh/internal/cache/sidecarproxycache/cache.go b/internal/mesh/internal/cache/sidecarproxycache/cache.go new file mode 100644 index 00000000000..edebcbf029c --- /dev/null +++ b/internal/mesh/internal/cache/sidecarproxycache/cache.go @@ -0,0 +1,185 @@ +package sidecarproxycache + +import ( + "sync" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// Cache stores information needed for the sidecar-proxy controller to reconcile efficiently. +// This currently means storing a list of all destinations for easy look up +// as well as indices of source proxies where those destinations are referenced. +// +// It is the responsibility of the controller and its subcomponents (like mapper and data fetcher) +// to keep this cache up-to-date as we're observing new data. +type Cache struct { + lock sync.RWMutex + + // store is a map from destination service reference and port as a reference key + // to the object representing destination reference. + store map[ReferenceKeyWithPort]intermediate.CombinedDestinationRef + + // sourceProxiesIndex stores a map from a reference key of source proxy IDs + // to the keys in the store map. + sourceProxiesIndex map[resource.ReferenceKey]storeKeys +} + +type storeKeys map[ReferenceKeyWithPort]struct{} + +func New() *Cache { + return &Cache{ + store: make(map[ReferenceKeyWithPort]intermediate.CombinedDestinationRef), + sourceProxiesIndex: make(map[resource.ReferenceKey]storeKeys), + } +} + +type ReferenceKeyWithPort struct { + resource.ReferenceKey + port string +} + +func KeyFromRefAndPort(ref *pbresource.Reference, port string) ReferenceKeyWithPort { + refKey := resource.NewReferenceKey(ref) + return ReferenceKeyWithPort{refKey, port} +} + +// WriteDestination adds destination reference to the cache. +func (c *Cache) WriteDestination(d intermediate.CombinedDestinationRef) { + // Check that reference is a catalog.Service type. + if !resource.EqualType(catalog.ServiceType, d.ServiceRef.Type) { + panic("ref must of type catalog.Service") + } + + // Also, check that explicit destination reference is a mesh.Upstreams type. + if d.ExplicitDestinationsID != nil && + !resource.EqualType(types.UpstreamsType, d.ExplicitDestinationsID.Type) { + panic("ExplicitDestinationsID must be of type mesh.Upstreams") + } + + c.lock.Lock() + defer c.lock.Unlock() + + c.deleteLocked(d.ServiceRef, d.Port) + c.addLocked(d) +} + +// DeleteDestination deletes a given destination reference and port from cache. +func (c *Cache) DeleteDestination(ref *pbresource.Reference, port string) { + // Check that reference is a catalog.Service type. + if !resource.EqualType(catalog.ServiceType, ref.Type) { + panic("ref must of type catalog.Service") + } + + c.lock.Lock() + defer c.lock.Unlock() + + c.deleteLocked(ref, port) +} + +func (c *Cache) addLocked(d intermediate.CombinedDestinationRef) { + key := KeyFromRefAndPort(d.ServiceRef, d.Port) + + c.store[key] = d + + // Update source proxies index. + for proxyRef := range d.SourceProxies { + _, ok := c.sourceProxiesIndex[proxyRef] + if !ok { + c.sourceProxiesIndex[proxyRef] = make(storeKeys) + } + + c.sourceProxiesIndex[proxyRef][key] = struct{}{} + } +} + +func (c *Cache) deleteLocked(ref *pbresource.Reference, port string) { + key := KeyFromRefAndPort(ref, port) + + // First get it from the store. + dest, ok := c.store[key] + if !ok { + // If it's not there, return as there's nothing for us to. + return + } + + // Update source proxies indices. + for proxyRef := range dest.SourceProxies { + // Delete our destination key from this source proxy. + delete(c.sourceProxiesIndex[proxyRef], key) + } + + // Finally, delete this destination from the store. + delete(c.store, key) +} + +// DeleteSourceProxy deletes the source proxy given by id from the cache. +func (c *Cache) DeleteSourceProxy(id *pbresource.ID) { + // Check that id is the ProxyStateTemplate type. + if !resource.EqualType(types.ProxyStateTemplateType, id.Type) { + panic("id must of type mesh.ProxyStateTemplate") + } + + c.lock.Lock() + defer c.lock.Unlock() + + proxyIDKey := resource.NewReferenceKey(id) + + // Get all destination keys. + destKeys := c.sourceProxiesIndex[proxyIDKey] + + for destKey := range destKeys { + // Read destination. + dest, ok := c.store[destKey] + if !ok { + // If there's no destination with that key, skip it as there's nothing for us to do. + continue + } + + // Delete the source proxy ID. + delete(dest.SourceProxies, proxyIDKey) + } + + // Finally, delete the index for this proxy. + delete(c.sourceProxiesIndex, proxyIDKey) +} + +// ReadDestination returns a destination reference for the given service reference and port. +func (c *Cache) ReadDestination(ref *pbresource.Reference, port string) (intermediate.CombinedDestinationRef, bool) { + // Check that reference is a catalog.Service type. + if !resource.EqualType(catalog.ServiceType, ref.Type) { + panic("ref must of type catalog.Service") + } + + c.lock.RLock() + defer c.lock.RUnlock() + + key := KeyFromRefAndPort(ref, port) + + d, found := c.store[key] + return d, found +} + +// DestinationsBySourceProxy returns all destinations that are a referenced by the given source proxy id. +func (c *Cache) DestinationsBySourceProxy(id *pbresource.ID) []intermediate.CombinedDestinationRef { + // Check that id is the ProxyStateTemplate type. + if !resource.EqualType(types.ProxyStateTemplateType, id.Type) { + panic("id must of type mesh.ProxyStateTemplate") + } + + c.lock.RLock() + defer c.lock.RUnlock() + + var destinations []intermediate.CombinedDestinationRef + + proxyIDKey := resource.NewReferenceKey(id) + + for destKey := range c.sourceProxiesIndex[proxyIDKey] { + destinations = append(destinations, c.store[destKey]) + } + + return destinations +} diff --git a/internal/mesh/internal/controllers/sidecarproxy/cache/cache_test.go b/internal/mesh/internal/cache/sidecarproxycache/cache_test.go similarity index 69% rename from internal/mesh/internal/controllers/sidecarproxy/cache/cache_test.go rename to internal/mesh/internal/cache/sidecarproxycache/cache_test.go index 7f0dd4b437d..e2d634c98e2 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/cache/cache_test.go +++ b/internal/mesh/internal/cache/sidecarproxycache/cache_test.go @@ -1,4 +1,4 @@ -package cache +package sidecarproxycache import ( "testing" @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" "github.com/hashicorp/consul/proto-public/pbresource" "github.com/stretchr/testify/require" @@ -16,18 +17,20 @@ func TestWrite_Create(t *testing.T) { proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() destination := testDestination(proxyID) - cache.Write(destination) + cache.WriteDestination(destination) destKey := KeyFromRefAndPort(destination.ServiceRef, destination.Port) require.Equal(t, destination, cache.store[destKey]) actualSourceProxies := cache.sourceProxiesIndex - expectedSourceProxies := map[string]storeKeys{ - KeyFromID(proxyID): {destKey: struct{}{}}, + expectedSourceProxies := map[resource.ReferenceKey]storeKeys{ + resource.NewReferenceKey(proxyID): {destKey: struct{}{}}, } require.Equal(t, expectedSourceProxies, actualSourceProxies) // Check that we can read back the destination successfully. - require.Equal(t, destination, cache.ReadDestination(destination.ServiceRef, destination.Port)) + actualDestination, found := cache.ReadDestination(destination.ServiceRef, destination.Port) + require.True(t, found) + require.Equal(t, destination, actualDestination) } func TestWrite_Update(t *testing.T) { @@ -35,17 +38,17 @@ func TestWrite_Update(t *testing.T) { proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() destination1 := testDestination(proxyID) - cache.Write(destination1) + cache.WriteDestination(destination1) // Add another destination for the same proxy ID. destination2 := testDestination(proxyID) destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection() - cache.Write(destination2) + cache.WriteDestination(destination2) // Check that the source proxies are updated. actualSourceProxies := cache.sourceProxiesIndex - expectedSourceProxies := map[string]storeKeys{ - KeyFromID(proxyID): { + expectedSourceProxies := map[resource.ReferenceKey]storeKeys{ + resource.NewReferenceKey(proxyID): { KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{}, KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{}, }, @@ -56,15 +59,31 @@ func TestWrite_Update(t *testing.T) { anotherProxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-def").ID() destination3 := testDestination(anotherProxyID) destination3.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-3").ReferenceNoSection() - cache.Write(destination3) + cache.WriteDestination(destination3) actualSourceProxies = cache.sourceProxiesIndex - expectedSourceProxies = map[string]storeKeys{ - KeyFromID(proxyID): { + expectedSourceProxies = map[resource.ReferenceKey]storeKeys{ + resource.NewReferenceKey(proxyID): { KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{}, KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{}, }, - KeyFromID(anotherProxyID): { + resource.NewReferenceKey(anotherProxyID): { + KeyFromRefAndPort(destination3.ServiceRef, destination3.Port): struct{}{}, + }, + } + require.Equal(t, expectedSourceProxies, actualSourceProxies) + + // Overwrite the proxy id completely. + destination1.SourceProxies = map[resource.ReferenceKey]struct{}{resource.NewReferenceKey(anotherProxyID): {}} + cache.WriteDestination(destination1) + + actualSourceProxies = cache.sourceProxiesIndex + expectedSourceProxies = map[resource.ReferenceKey]storeKeys{ + resource.NewReferenceKey(proxyID): { + KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{}, + }, + resource.NewReferenceKey(anotherProxyID): { + KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{}, KeyFromRefAndPort(destination3.ServiceRef, destination3.Port): struct{}{}, }, } @@ -76,28 +95,28 @@ func TestWrite_Delete(t *testing.T) { proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() destination1 := testDestination(proxyID) - cache.Write(destination1) + cache.WriteDestination(destination1) // Add another destination for the same proxy ID. destination2 := testDestination(proxyID) destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection() - cache.Write(destination2) + cache.WriteDestination(destination2) - cache.Delete(destination1.ServiceRef, destination1.Port) + cache.DeleteDestination(destination1.ServiceRef, destination1.Port) require.NotContains(t, cache.store, KeyFromRefAndPort(destination1.ServiceRef, destination1.Port)) // Check that the source proxies are updated. actualSourceProxies := cache.sourceProxiesIndex - expectedSourceProxies := map[string]storeKeys{ - KeyFromID(proxyID): { + expectedSourceProxies := map[resource.ReferenceKey]storeKeys{ + resource.NewReferenceKey(proxyID): { KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{}, }, } require.Equal(t, expectedSourceProxies, actualSourceProxies) // Try to delete non-existing destination and check that nothing has changed.. - cache.Delete( + cache.DeleteDestination( resourcetest.Resource(catalog.ServiceType, "does-not-exist").ReferenceNoSection(), "doesn't-matter") @@ -110,17 +129,17 @@ func TestDeleteSourceProxy(t *testing.T) { proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() destination1 := testDestination(proxyID) - cache.Write(destination1) + cache.WriteDestination(destination1) // Add another destination for the same proxy ID. destination2 := testDestination(proxyID) destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection() - cache.Write(destination2) + cache.WriteDestination(destination2) cache.DeleteSourceProxy(proxyID) // Check that source proxy index is gone. - proxyKey := KeyFromID(proxyID) + proxyKey := resource.NewReferenceKey(proxyID) require.NotContains(t, cache.sourceProxiesIndex, proxyKey) // Check that the destinations no longer have this proxy as the source. @@ -128,7 +147,8 @@ func TestDeleteSourceProxy(t *testing.T) { require.NotContains(t, destination2.SourceProxies, proxyKey) // Try to add a non-existent key to source proxy index - cache.sourceProxiesIndex[proxyKey] = map[string]struct{}{"doesn't-exist": {}} + cache.sourceProxiesIndex[proxyKey] = map[ReferenceKeyWithPort]struct{}{ + {port: "doesn't-matter"}: {}} cache.DeleteSourceProxy(proxyID) // Check that source proxy index is gone. @@ -144,25 +164,25 @@ func TestDestinationsBySourceProxy(t *testing.T) { proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() destination1 := testDestination(proxyID) - cache.Write(destination1) + cache.WriteDestination(destination1) // Add another destination for the same proxy ID. destination2 := testDestination(proxyID) destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection() - cache.Write(destination2) + cache.WriteDestination(destination2) actualDestinations := cache.DestinationsBySourceProxy(proxyID) - expectedDestinations := []*intermediate.CombinedDestinationRef{destination1, destination2} + expectedDestinations := []intermediate.CombinedDestinationRef{destination1, destination2} require.ElementsMatch(t, expectedDestinations, actualDestinations) } -func testDestination(proxyID *pbresource.ID) *intermediate.CombinedDestinationRef { - return &intermediate.CombinedDestinationRef{ +func testDestination(proxyID *pbresource.ID) intermediate.CombinedDestinationRef { + return intermediate.CombinedDestinationRef{ ServiceRef: resourcetest.Resource(catalog.ServiceType, "test-service").ReferenceNoSection(), Port: "tcp", ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "test-servicedestinations").ID(), - SourceProxies: map[string]*pbresource.ID{ - KeyFromID(proxyID): proxyID, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(proxyID): {}, }, } } diff --git a/internal/mesh/internal/controllers/register.go b/internal/mesh/internal/controllers/register.go index eb5131bd506..12fa9c40de9 100644 --- a/internal/mesh/internal/controllers/register.go +++ b/internal/mesh/internal/controllers/register.go @@ -9,12 +9,14 @@ import ( "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/mapper" "github.com/hashicorp/consul/internal/mesh/internal/controllers/xds" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource/mappers/bimapper" + "github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper" ) type Dependencies struct { @@ -24,8 +26,8 @@ type Dependencies struct { } func Register(mgr *controller.Manager, deps Dependencies) { - c := cache.New() - m := mapper.New(c) + c := sidecarproxycache.New() + m := sidecarproxymapper.New(c) mapper := bimapper.New(types.ProxyStateTemplateType, catalog.ServiceEndpointsType) mgr.Register(xds.Controller(mapper, deps.ProxyUpdater, deps.TrustBundleFetcher)) mgr.Register(sidecarproxy.Controller(c, m, deps.TrustDomainFetcher)) diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/builder.go b/internal/mesh/internal/controllers/sidecarproxy/builder/builder.go index d675dbec7d6..257b94d5203 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/builder.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/builder.go @@ -34,9 +34,20 @@ func (b *Builder) Build() *pbmesh.ProxyStateTemplate { return b.proxyStateTemplate } -func (b *Builder) addListener(l *pbproxystate.Listener) *Builder { - // Add listener to proxy state template - b.proxyStateTemplate.ProxyState.Listeners = append(b.proxyStateTemplate.ProxyState.Listeners, l) +type ListenerBuilder struct { + listener *pbproxystate.Listener + builder *Builder +} + +func (b *Builder) NewListenerBuilder(l *pbproxystate.Listener) *ListenerBuilder { + return &ListenerBuilder{ + listener: l, + builder: b, + } +} + +func (l *ListenerBuilder) buildListener() *Builder { + l.builder.proxyStateTemplate.ProxyState.Listeners = append(l.builder.proxyStateTemplate.ProxyState.Listeners, l.listener) - return b + return l.builder } diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go index 3d36c446ba1..5c806424b92 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go @@ -23,7 +23,7 @@ func TestMain(m *testing.M) { func protoToJSON(t *testing.T, pb proto.Message) string { t.Helper() m := protojson.MarshalOptions{ - Multiline: true, + Indent: " ", } gotJSON, err := m.Marshal(pb) require.NoError(t, err) diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go index 0b46a48c4e6..8776fae8fb4 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go @@ -19,7 +19,7 @@ func (b *Builder) BuildDestinations(destinations []*intermediate.Destination) *B return b } -func (b *Builder) buildExplicitDestination(destination *intermediate.Destination) *Builder { +func (b *Builder) buildExplicitDestination(destination *intermediate.Destination) { clusterName := DestinationClusterName(destination.Explicit.DestinationRef, destination.Explicit.Datacenter, b.trustDomain) statPrefix := DestinationStatPrefix(destination.Explicit.DestinationRef, destination.Explicit.Datacenter) @@ -35,17 +35,16 @@ func (b *Builder) buildExplicitDestination(destination *intermediate.Destination meshPortName := findMeshPort(destination.ServiceEndpoints.Endpoints.Endpoints[0].Ports) if destPort != nil { - return b.addOutboundDestinationListener(destination.Explicit). + b.addOutboundDestinationListener(destination.Explicit). addRouter(clusterName, statPrefix, destPort.Protocol). + buildListener(). addCluster(clusterName, destination.Identities). addEndpointsRef(clusterName, destination.ServiceEndpoints.Resource.Id, meshPortName) } } - - return b } -func (b *Builder) addOutboundDestinationListener(explicit *pbmesh.Upstream) *Builder { +func (b *Builder) addOutboundDestinationListener(explicit *pbmesh.Upstream) *ListenerBuilder { listener := &pbproxystate.Listener{ Direction: pbproxystate.Direction_DIRECTION_OUTBOUND, } @@ -72,12 +71,10 @@ func (b *Builder) addOutboundDestinationListener(explicit *pbmesh.Upstream) *Bui listener.Name = DestinationListenerName(explicit.DestinationRef.Name, explicit.DestinationPort, destinationAddr.Unix.Path, 0) } - return b.addListener(listener) + return b.NewListenerBuilder(listener) } -func (b *Builder) addRouter(clusterName, statPrefix string, protocol pbcatalog.Protocol) *Builder { - listener := b.getLastBuiltListener() - +func (l *ListenerBuilder) addRouter(clusterName, statPrefix string, protocol pbcatalog.Protocol) *ListenerBuilder { // For explicit destinations, we have no filter chain match, and filters are based on port protocol. switch protocol { case pbcatalog.Protocol_PROTOCOL_TCP: @@ -89,9 +86,9 @@ func (b *Builder) addRouter(clusterName, statPrefix string, protocol pbcatalog.P }, }, } - listener.Routers = append(listener.Routers, router) + l.listener.Routers = append(l.listener.Routers, router) } - return b + return l } func (b *Builder) addCluster(clusterName string, destinationIdentities []*pbresource.Reference) *Builder { diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go index 86af902285d..cbe1a1a4379 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go @@ -101,7 +101,6 @@ func TestBuildExplicitDestinations(t *testing.T) { actual := protoToJSON(t, proxyTmpl) expected := goldenValue(t, name, actual, *update) - require.Equal(t, expected, actual) + require.JSONEq(t, expected, actual) } - } diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go index 323863475b4..72bc54596df 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go @@ -9,45 +9,58 @@ import ( ) func (b *Builder) BuildLocalApp(workload *pbcatalog.Workload) *Builder { - return b.addInboundListener(xdscommon.PublicListenerName, workload). - addInboundRouters(workload). - addInboundTLS() -} + // Go through workload ports and add the first non-mesh port we see. + // Note that the order of ports is non-deterministic here but the xds generation + // code should make sure to send it in the same order to Envoy to avoid unnecessary + // updates. + // todo (ishustava): Note we will need to support multiple ports in the future. + for portName, port := range workload.Ports { + clusterName := fmt.Sprintf("%s:%s", xdscommon.LocalAppClusterName, portName) + + if port.Protocol != pbcatalog.Protocol_PROTOCOL_MESH { + b.addInboundListener(xdscommon.PublicListenerName, workload). + addInboundRouter(clusterName, port). + addInboundTLS(). + buildListener(). + addLocalAppCluster(clusterName). + addLocalAppStaticEndpoints(clusterName, port) + break + } + } -func (b *Builder) getLastBuiltListener() *pbproxystate.Listener { - lastBuiltIndex := len(b.proxyStateTemplate.ProxyState.Listeners) - 1 - return b.proxyStateTemplate.ProxyState.Listeners[lastBuiltIndex] + return b } -func (b *Builder) addInboundListener(name string, workload *pbcatalog.Workload) *Builder { +func (b *Builder) addInboundListener(name string, workload *pbcatalog.Workload) *ListenerBuilder { listener := &pbproxystate.Listener{ Name: name, Direction: pbproxystate.Direction_DIRECTION_INBOUND, } - // We will take listener bind port from the workload for now. + // We will take listener bind port from the workload. // Find mesh port. - var meshPort string - for portName, port := range workload.Ports { - if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { - meshPort = portName - break + meshPort, ok := workload.GetMeshPortName() + if !ok { + // At this point, we should only get workloads that have mesh ports. + return &ListenerBuilder{ + builder: b, } } // Check if the workload has a specific address for the mesh port. - var meshAddress string - for _, address := range workload.Addresses { - for _, port := range address.Ports { - if port == meshPort { - meshAddress = address.Host - } + meshAddresses := workload.GetNonExternalAddressesForPort(meshPort) + + // If there are no mesh addresses, return. This should be impossible. + if len(meshAddresses) == 0 { + return &ListenerBuilder{ + builder: b, } } - // Otherwise, assume the first address in the addresses list. - if meshAddress == "" { - // It is safe to assume that there's at least one address because we validate it when creating the workload. - meshAddress = workload.Addresses[0].Host + + // If there are more than one mesh address, use the first one in the list. + var meshAddress string + if len(meshAddresses) > 0 { + meshAddress = meshAddresses[0].Host } listener.BindAddress = &pbproxystate.Listener_HostPort{ @@ -57,85 +70,90 @@ func (b *Builder) addInboundListener(name string, workload *pbcatalog.Workload) }, } - return b.addListener(listener) + return b.NewListenerBuilder(listener) } -func (b *Builder) addInboundRouters(workload *pbcatalog.Workload) *Builder { - listener := b.getLastBuiltListener() +func (l *ListenerBuilder) addInboundRouter(clusterName string, port *pbcatalog.WorkloadPort) *ListenerBuilder { + if l.listener == nil { + return l + } - // Go through workload ports and add the first non-mesh port we see. - // Note that the order of ports is non-deterministic here but the xds generation - // code should make sure to send it in the same order to Envoy to avoid unnecessary - // updates. - // todo (ishustava): Note we will need to support multiple ports in the future. - for portName, port := range workload.Ports { - clusterName := fmt.Sprintf("%s:%s", xdscommon.LocalAppClusterName, portName) - if port.Protocol == pbcatalog.Protocol_PROTOCOL_TCP { - r := &pbproxystate.Router{ - Destination: &pbproxystate.Router_L4{ - L4: &pbproxystate.L4Destination{ - Name: clusterName, - StatPrefix: listener.Name, - }, - }, - } - listener.Routers = append(listener.Routers, r) - - // Make cluster for this router destination. - b.proxyStateTemplate.ProxyState.Clusters[clusterName] = &pbproxystate.Cluster{ - Group: &pbproxystate.Cluster_EndpointGroup{ - EndpointGroup: &pbproxystate.EndpointGroup{ - Group: &pbproxystate.EndpointGroup_Static{ - Static: &pbproxystate.StaticEndpointGroup{}, - }, - }, + if port.Protocol == pbcatalog.Protocol_PROTOCOL_TCP { + r := &pbproxystate.Router{ + Destination: &pbproxystate.Router_L4{ + L4: &pbproxystate.L4Destination{ + Name: clusterName, + StatPrefix: l.listener.Name, }, - } - - // Finally, add static endpoints. We're adding it statically as opposed to creating an endpoint ref - // because this endpoint is less likely to change as we're not tracking the health. - endpoint := &pbproxystate.Endpoint{ - Address: &pbproxystate.Endpoint_HostPort{ - HostPort: &pbproxystate.HostPortAddress{ - Host: "127.0.0.1", - Port: port.Port, - }, - }, - } - b.proxyStateTemplate.ProxyState.Endpoints[clusterName] = &pbproxystate.Endpoints{ - Endpoints: []*pbproxystate.Endpoint{endpoint}, - } - break + }, } + l.listener.Routers = append(l.listener.Routers, r) + } + return l +} + +func (b *Builder) addLocalAppCluster(clusterName string) *Builder { + // Make cluster for this router destination. + b.proxyStateTemplate.ProxyState.Clusters[clusterName] = &pbproxystate.Cluster{ + Group: &pbproxystate.Cluster_EndpointGroup{ + EndpointGroup: &pbproxystate.EndpointGroup{ + Group: &pbproxystate.EndpointGroup_Static{ + Static: &pbproxystate.StaticEndpointGroup{}, + }, + }, + }, } return b } -func (b *Builder) addInboundTLS() *Builder { - listener := b.getLastBuiltListener() +func (b *Builder) addLocalAppStaticEndpoints(clusterName string, port *pbcatalog.WorkloadPort) *Builder { + // We're adding endpoints statically as opposed to creating an endpoint ref + // because this endpoint is less likely to change as we're not tracking the health. + endpoint := &pbproxystate.Endpoint{ + Address: &pbproxystate.Endpoint_HostPort{ + HostPort: &pbproxystate.HostPortAddress{ + Host: "127.0.0.1", + Port: port.Port, + }, + }, + } + b.proxyStateTemplate.ProxyState.Endpoints[clusterName] = &pbproxystate.Endpoints{ + Endpoints: []*pbproxystate.Endpoint{endpoint}, + } + + return b +} + +func (l *ListenerBuilder) addInboundTLS() *ListenerBuilder { + if l.listener == nil { + return nil + } + // For inbound TLS, we want to use this proxy's identity. - workloadIdentity := b.proxyStateTemplate.ProxyState.Identity.Name + workloadIdentity := l.builder.proxyStateTemplate.ProxyState.Identity.Name inboundTLS := &pbproxystate.TransportSocket{ ConnectionTls: &pbproxystate.TransportSocket_InboundMesh{ InboundMesh: &pbproxystate.InboundMeshMTLS{ - IdentityKey: workloadIdentity, - ValidationContext: &pbproxystate.MeshInboundValidationContext{TrustBundlePeerNameKeys: []string{b.id.Tenancy.PeerName}}, + IdentityKey: workloadIdentity, + ValidationContext: &pbproxystate.MeshInboundValidationContext{ + TrustBundlePeerNameKeys: []string{l.builder.id.Tenancy.PeerName}, + }, }, }, } - b.proxyStateTemplate.RequiredLeafCertificates[workloadIdentity] = &pbproxystate.LeafCertificateRef{ + l.builder.proxyStateTemplate.RequiredLeafCertificates[workloadIdentity] = &pbproxystate.LeafCertificateRef{ Name: workloadIdentity, - Namespace: b.id.Tenancy.Namespace, - Partition: b.id.Tenancy.Partition, + Namespace: l.builder.id.Tenancy.Namespace, + Partition: l.builder.id.Tenancy.Partition, } - b.proxyStateTemplate.RequiredTrustBundles[b.id.Tenancy.PeerName] = &pbproxystate.TrustBundleRef{ - Peer: b.id.Tenancy.PeerName, + l.builder.proxyStateTemplate.RequiredTrustBundles[l.builder.id.Tenancy.PeerName] = &pbproxystate.TrustBundleRef{ + Peer: l.builder.id.Tenancy.PeerName, } - for i := range listener.Routers { - listener.Routers[i].InboundTls = inboundTLS + for i := range l.listener.Routers { + l.listener.Routers[i].InboundTls = inboundTLS } - return b + return l } diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go index 5a71e282a66..cbd76954ffa 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go @@ -70,7 +70,7 @@ func TestBuildLocalApp(t *testing.T) { actual := protoToJSON(t, proxyTmpl) expected := goldenValue(t, name, actual, *update) - require.Equal(t, expected, actual) + require.JSONEq(t, expected, actual) }) } } diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden index 7298777d870..36310c806f0 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden @@ -1,83 +1,83 @@ { - "proxyState": { - "identity": { - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" }, - "name": "test-identity" + "name": "test-identity" }, - "listeners": [ + "listeners": [ { - "name": "api-1:tcp:1.1.1.1:1234", - "direction": "DIRECTION_OUTBOUND", - "hostPort": { - "host": "1.1.1.1", - "port": 1234 + "name": "api-1:tcp:1.1.1.1:1234", + "direction": "DIRECTION_OUTBOUND", + "hostPort": { + "host": "1.1.1.1", + "port": 1234 }, - "routers": [ + "routers": [ { - "l4": { - "name": "api-1.default.dc1.internal.foo.consul", - "statPrefix": "upstream.api-1.default.default.dc1" + "l4": { + "name": "api-1.default.dc1.internal.foo.consul", + "statPrefix": "upstream.api-1.default.default.dc1" } } ] }, { - "name": "api-2:tcp:/path/to/socket", - "direction": "DIRECTION_OUTBOUND", - "unixSocket": { - "path": "/path/to/socket", - "mode": "0666" + "name": "api-2:tcp:/path/to/socket", + "direction": "DIRECTION_OUTBOUND", + "unixSocket": { + "path": "/path/to/socket", + "mode": "0666" }, - "routers": [ + "routers": [ { - "l4": { - "name": "api-2.default.dc1.internal.foo.consul", - "statPrefix": "upstream.api-2.default.default.dc1" + "l4": { + "name": "api-2.default.dc1.internal.foo.consul", + "statPrefix": "upstream.api-2.default.default.dc1" } } ] } ], - "clusters": { - "api-1.default.dc1.internal.foo.consul": { - "endpointGroup": { - "dynamic": { - "config": { - "disablePanicThreshold": true + "clusters": { + "api-1.default.dc1.internal.foo.consul": { + "endpointGroup": { + "dynamic": { + "config": { + "disablePanicThreshold": true }, - "outboundTls": { - "outboundMesh": { - "identityKey": "test-identity", - "validationContext": { - "spiffeIds": [ + "outboundTls": { + "outboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "spiffeIds": [ "spiffe://foo.consul/ap/default/ns/default/identity/api1-identity" ] }, - "sni": "api-1.default.dc1.internal.foo.consul" + "sni": "api-1.default.dc1.internal.foo.consul" } } } } }, - "api-2.default.dc1.internal.foo.consul": { - "endpointGroup": { - "dynamic": { - "config": { - "disablePanicThreshold": true + "api-2.default.dc1.internal.foo.consul": { + "endpointGroup": { + "dynamic": { + "config": { + "disablePanicThreshold": true }, - "outboundTls": { - "outboundMesh": { - "identityKey": "test-identity", - "validationContext": { - "spiffeIds": [ + "outboundTls": { + "outboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "spiffeIds": [ "spiffe://foo.consul/ap/default/ns/default/identity/api2-identity" ] }, - "sni": "api-2.default.dc1.internal.foo.consul" + "sni": "api-2.default.dc1.internal.foo.consul" } } } @@ -85,38 +85,38 @@ } } }, - "requiredEndpoints": { - "api-1.default.dc1.internal.foo.consul": { - "id": { - "name": "api-1", - "type": { - "group": "catalog", - "groupVersion": "v1alpha1", - "kind": "ServiceEndpoints" + "requiredEndpoints": { + "api-1.default.dc1.internal.foo.consul": { + "id": { + "name": "api-1", + "type": { + "group": "catalog", + "groupVersion": "v1alpha1", + "kind": "ServiceEndpoints" }, - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" } }, - "port": "mesh" + "port": "mesh" }, - "api-2.default.dc1.internal.foo.consul": { - "id": { - "name": "api-2", - "type": { - "group": "catalog", - "groupVersion": "v1alpha1", - "kind": "ServiceEndpoints" + "api-2.default.dc1.internal.foo.consul": { + "id": { + "name": "api-2", + "type": { + "group": "catalog", + "groupVersion": "v1alpha1", + "kind": "ServiceEndpoints" }, - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" } }, - "port": "mesh" + "port": "mesh" } } } \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden index ff3d8ef0c07..834672743cc 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-with-specific-ports.golden @@ -1,32 +1,32 @@ { - "proxyState": { - "identity": { - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" }, - "name": "test-identity" + "name": "test-identity" }, - "listeners": [ + "listeners": [ { - "name": "public_listener", - "direction": "DIRECTION_INBOUND", - "hostPort": { - "host": "10.0.0.2", - "port": 20000 + "name": "public_listener", + "direction": "DIRECTION_INBOUND", + "hostPort": { + "host": "10.0.0.2", + "port": 20000 }, - "routers": [ + "routers": [ { - "l4": { - "name": "local_app:port1", - "statPrefix": "public_listener" + "l4": { + "name": "local_app:port1", + "statPrefix": "public_listener" }, - "inboundTls": { - "inboundMesh": { - "identityKey": "test-identity", - "validationContext": { - "trustBundlePeerNameKeys": [ + "inboundTls": { + "inboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "trustBundlePeerNameKeys": [ "local" ] } @@ -36,36 +36,36 @@ ] } ], - "clusters": { - "local_app:port1": { - "endpointGroup": { - "static": {} + "clusters": { + "local_app:port1": { + "endpointGroup": { + "static": {} } } }, - "endpoints": { - "local_app:port1": { - "endpoints": [ + "endpoints": { + "local_app:port1": { + "endpoints": [ { - "hostPort": { - "host": "127.0.0.1", - "port": 8080 + "hostPort": { + "host": "127.0.0.1", + "port": 8080 } } ] } } }, - "requiredLeafCertificates": { - "test-identity": { - "name": "test-identity", - "namespace": "default", - "partition": "default" + "requiredLeafCertificates": { + "test-identity": { + "name": "test-identity", + "namespace": "default", + "partition": "default" } }, - "requiredTrustBundles": { - "local": { - "peer": "local" + "requiredTrustBundles": { + "local": { + "peer": "local" } } } \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden index 9c22e94d597..39c532d7376 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multiple-workload-addresses-without-ports.golden @@ -1,32 +1,32 @@ { - "proxyState": { - "identity": { - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" }, - "name": "test-identity" + "name": "test-identity" }, - "listeners": [ + "listeners": [ { - "name": "public_listener", - "direction": "DIRECTION_INBOUND", - "hostPort": { - "host": "10.0.0.1", - "port": 20000 + "name": "public_listener", + "direction": "DIRECTION_INBOUND", + "hostPort": { + "host": "10.0.0.1", + "port": 20000 }, - "routers": [ + "routers": [ { - "l4": { - "name": "local_app:port1", - "statPrefix": "public_listener" + "l4": { + "name": "local_app:port1", + "statPrefix": "public_listener" }, - "inboundTls": { - "inboundMesh": { - "identityKey": "test-identity", - "validationContext": { - "trustBundlePeerNameKeys": [ + "inboundTls": { + "inboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "trustBundlePeerNameKeys": [ "local" ] } @@ -36,36 +36,36 @@ ] } ], - "clusters": { - "local_app:port1": { - "endpointGroup": { - "static": {} + "clusters": { + "local_app:port1": { + "endpointGroup": { + "static": {} } } }, - "endpoints": { - "local_app:port1": { - "endpoints": [ + "endpoints": { + "local_app:port1": { + "endpoints": [ { - "hostPort": { - "host": "127.0.0.1", - "port": 8080 + "hostPort": { + "host": "127.0.0.1", + "port": 8080 } } ] } } }, - "requiredLeafCertificates": { - "test-identity": { - "name": "test-identity", - "namespace": "default", - "partition": "default" + "requiredLeafCertificates": { + "test-identity": { + "name": "test-identity", + "namespace": "default", + "partition": "default" } }, - "requiredTrustBundles": { - "local": { - "peer": "local" + "requiredTrustBundles": { + "local": { + "peer": "local" } } } \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden index b0e113ec93a..e1865f7fe2c 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-ip-port-bind-address.golden @@ -1,47 +1,47 @@ { - "proxyState": { - "identity": { - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" }, - "name": "test-identity" + "name": "test-identity" }, - "listeners": [ + "listeners": [ { - "name": "api-1:tcp:1.1.1.1:1234", - "direction": "DIRECTION_OUTBOUND", - "hostPort": { - "host": "1.1.1.1", - "port": 1234 + "name": "api-1:tcp:1.1.1.1:1234", + "direction": "DIRECTION_OUTBOUND", + "hostPort": { + "host": "1.1.1.1", + "port": 1234 }, - "routers": [ + "routers": [ { - "l4": { - "name": "api-1.default.dc1.internal.foo.consul", - "statPrefix": "upstream.api-1.default.default.dc1" + "l4": { + "name": "api-1.default.dc1.internal.foo.consul", + "statPrefix": "upstream.api-1.default.default.dc1" } } ] } ], - "clusters": { - "api-1.default.dc1.internal.foo.consul": { - "endpointGroup": { - "dynamic": { - "config": { - "disablePanicThreshold": true + "clusters": { + "api-1.default.dc1.internal.foo.consul": { + "endpointGroup": { + "dynamic": { + "config": { + "disablePanicThreshold": true }, - "outboundTls": { - "outboundMesh": { - "identityKey": "test-identity", - "validationContext": { - "spiffeIds": [ + "outboundTls": { + "outboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "spiffeIds": [ "spiffe://foo.consul/ap/default/ns/default/identity/api1-identity" ] }, - "sni": "api-1.default.dc1.internal.foo.consul" + "sni": "api-1.default.dc1.internal.foo.consul" } } } @@ -49,22 +49,22 @@ } } }, - "requiredEndpoints": { - "api-1.default.dc1.internal.foo.consul": { - "id": { - "name": "api-1", - "type": { - "group": "catalog", - "groupVersion": "v1alpha1", - "kind": "ServiceEndpoints" + "requiredEndpoints": { + "api-1.default.dc1.internal.foo.consul": { + "id": { + "name": "api-1", + "type": { + "group": "catalog", + "groupVersion": "v1alpha1", + "kind": "ServiceEndpoints" }, - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" } }, - "port": "mesh" + "port": "mesh" } } } \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden index aa21472ad57..a67ba1c22cc 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-destination-unix-socket-bind-address.golden @@ -1,47 +1,47 @@ { - "proxyState": { - "identity": { - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" }, - "name": "test-identity" + "name": "test-identity" }, - "listeners": [ + "listeners": [ { - "name": "api-2:tcp:/path/to/socket", - "direction": "DIRECTION_OUTBOUND", - "unixSocket": { - "path": "/path/to/socket", - "mode": "0666" + "name": "api-2:tcp:/path/to/socket", + "direction": "DIRECTION_OUTBOUND", + "unixSocket": { + "path": "/path/to/socket", + "mode": "0666" }, - "routers": [ + "routers": [ { - "l4": { - "name": "api-2.default.dc1.internal.foo.consul", - "statPrefix": "upstream.api-2.default.default.dc1" + "l4": { + "name": "api-2.default.dc1.internal.foo.consul", + "statPrefix": "upstream.api-2.default.default.dc1" } } ] } ], - "clusters": { - "api-2.default.dc1.internal.foo.consul": { - "endpointGroup": { - "dynamic": { - "config": { - "disablePanicThreshold": true + "clusters": { + "api-2.default.dc1.internal.foo.consul": { + "endpointGroup": { + "dynamic": { + "config": { + "disablePanicThreshold": true }, - "outboundTls": { - "outboundMesh": { - "identityKey": "test-identity", - "validationContext": { - "spiffeIds": [ + "outboundTls": { + "outboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "spiffeIds": [ "spiffe://foo.consul/ap/default/ns/default/identity/api2-identity" ] }, - "sni": "api-2.default.dc1.internal.foo.consul" + "sni": "api-2.default.dc1.internal.foo.consul" } } } @@ -49,22 +49,22 @@ } } }, - "requiredEndpoints": { - "api-2.default.dc1.internal.foo.consul": { - "id": { - "name": "api-2", - "type": { - "group": "catalog", - "groupVersion": "v1alpha1", - "kind": "ServiceEndpoints" + "requiredEndpoints": { + "api-2.default.dc1.internal.foo.consul": { + "id": { + "name": "api-2", + "type": { + "group": "catalog", + "groupVersion": "v1alpha1", + "kind": "ServiceEndpoints" }, - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" } }, - "port": "mesh" + "port": "mesh" } } } \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-workload-address-without-ports.golden b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-workload-address-without-ports.golden index 9c22e94d597..39c532d7376 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-workload-address-without-ports.golden +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-single-workload-address-without-ports.golden @@ -1,32 +1,32 @@ { - "proxyState": { - "identity": { - "tenancy": { - "partition": "default", - "namespace": "default", - "peerName": "local" + "proxyState": { + "identity": { + "tenancy": { + "partition": "default", + "namespace": "default", + "peerName": "local" }, - "name": "test-identity" + "name": "test-identity" }, - "listeners": [ + "listeners": [ { - "name": "public_listener", - "direction": "DIRECTION_INBOUND", - "hostPort": { - "host": "10.0.0.1", - "port": 20000 + "name": "public_listener", + "direction": "DIRECTION_INBOUND", + "hostPort": { + "host": "10.0.0.1", + "port": 20000 }, - "routers": [ + "routers": [ { - "l4": { - "name": "local_app:port1", - "statPrefix": "public_listener" + "l4": { + "name": "local_app:port1", + "statPrefix": "public_listener" }, - "inboundTls": { - "inboundMesh": { - "identityKey": "test-identity", - "validationContext": { - "trustBundlePeerNameKeys": [ + "inboundTls": { + "inboundMesh": { + "identityKey": "test-identity", + "validationContext": { + "trustBundlePeerNameKeys": [ "local" ] } @@ -36,36 +36,36 @@ ] } ], - "clusters": { - "local_app:port1": { - "endpointGroup": { - "static": {} + "clusters": { + "local_app:port1": { + "endpointGroup": { + "static": {} } } }, - "endpoints": { - "local_app:port1": { - "endpoints": [ + "endpoints": { + "local_app:port1": { + "endpoints": [ { - "hostPort": { - "host": "127.0.0.1", - "port": 8080 + "hostPort": { + "host": "127.0.0.1", + "port": 8080 } } ] } } }, - "requiredLeafCertificates": { - "test-identity": { - "name": "test-identity", - "namespace": "default", - "partition": "default" + "requiredLeafCertificates": { + "test-identity": { + "name": "test-identity", + "namespace": "default", + "partition": "default" } }, - "requiredTrustBundles": { - "local": { - "peer": "local" + "requiredTrustBundles": { + "local": { + "peer": "local" } } } \ No newline at end of file diff --git a/internal/mesh/internal/controllers/sidecarproxy/cache/cache.go b/internal/mesh/internal/controllers/sidecarproxy/cache/cache.go deleted file mode 100644 index 8eee2c10b92..00000000000 --- a/internal/mesh/internal/controllers/sidecarproxy/cache/cache.go +++ /dev/null @@ -1,144 +0,0 @@ -package cache - -import ( - "fmt" - "sync" - - "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" - "github.com/hashicorp/consul/internal/resource" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -// Cache stores information needed for the sidecar-proxy controller to reconcile efficiently. -// This currently means storing a list of all destinations for easy look up -// as well as indices of source proxies where those destinations are referenced. -// -// It is the responsibility of the controller and its subcomponents (like mapper and data fetcher) -// to keep this cache up-to-date as we're observing new data. -type Cache struct { - lock sync.RWMutex - - // store is a map from destination service reference and port as a string ID - // to the object representing destination reference. - store map[string]*intermediate.CombinedDestinationRef - - // sourceProxiesIndex stores a map from a string representation of source proxy ID - // to the keys in the store map. - sourceProxiesIndex map[string]storeKeys -} - -type storeKeys map[string]struct{} - -func New() *Cache { - return &Cache{ - store: make(map[string]*intermediate.CombinedDestinationRef), - sourceProxiesIndex: make(map[string]storeKeys), - } -} - -func KeyFromID(id *pbresource.ID) string { - return fmt.Sprintf("%s/%s/%s", - resource.ToGVK(id.Type), - resource.TenancyToString(id.Tenancy), - id.Name) -} - -func KeyFromRefAndPort(ref *pbresource.Reference, port string) string { - return fmt.Sprintf("%s:%s", - resource.ReferenceToString(ref), - port) -} - -func (c *Cache) Write(d *intermediate.CombinedDestinationRef) { - c.lock.Lock() - defer c.lock.Unlock() - - key := KeyFromRefAndPort(d.ServiceRef, d.Port) - - c.store[key] = d - - // Update source proxies index. - for _, proxyID := range d.SourceProxies { - proxyIDKey := KeyFromID(proxyID) - - _, ok := c.sourceProxiesIndex[proxyIDKey] - if !ok { - c.sourceProxiesIndex[proxyIDKey] = make(storeKeys) - } - - c.sourceProxiesIndex[proxyIDKey][key] = struct{}{} - } -} - -func (c *Cache) Delete(ref *pbresource.Reference, port string) { - c.lock.Lock() - defer c.lock.Unlock() - - key := KeyFromRefAndPort(ref, port) - - // First get it from the store. - dest, ok := c.store[key] - if !ok { - // If it's not there, return as there's nothing for us to. - return - } - - // Update source proxies indices. - for _, proxyID := range dest.SourceProxies { - proxyIDKey := KeyFromID(proxyID) - - // Delete our destination key from this source proxy. - delete(c.sourceProxiesIndex[proxyIDKey], key) - } - - // Finally, delete this destination from the store. - delete(c.store, key) -} - -func (c *Cache) DeleteSourceProxy(id *pbresource.ID) { - c.lock.Lock() - defer c.lock.Unlock() - - proxyIDKey := KeyFromID(id) - - // Get all destination keys. - destKeys := c.sourceProxiesIndex[proxyIDKey] - - for destKey := range destKeys { - // Read destination. - dest, ok := c.store[destKey] - if !ok { - // If there's no destination with that key, skip it as there's nothing for us to do. - continue - } - - // Delete the source proxy ID. - delete(dest.SourceProxies, proxyIDKey) - } - - // Finally, delete the index for this proxy. - delete(c.sourceProxiesIndex, proxyIDKey) -} - -func (c *Cache) ReadDestination(ref *pbresource.Reference, port string) *intermediate.CombinedDestinationRef { - c.lock.RLock() - defer c.lock.RUnlock() - - key := KeyFromRefAndPort(ref, port) - return c.store[key] -} - -func (c *Cache) DestinationsBySourceProxy(id *pbresource.ID) []*intermediate.CombinedDestinationRef { - c.lock.RLock() - defer c.lock.RUnlock() - - var destinations []*intermediate.CombinedDestinationRef - - proxyIDKey := KeyFromID(id) - - for destKey := range c.sourceProxiesIndex[proxyIDKey] { - destinations = append(destinations, c.store[destKey]) - } - - return destinations -} diff --git a/internal/mesh/internal/controllers/sidecarproxy/controller.go b/internal/mesh/internal/controllers/sidecarproxy/controller.go index 95c1fa9244a..ef68ee93350 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/controller.go +++ b/internal/mesh/internal/controllers/sidecarproxy/controller.go @@ -8,10 +8,10 @@ import ( "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/fetcher" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/mapper" + "github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" @@ -25,7 +25,7 @@ const ControllerName = "consul.io/sidecar-proxy-controller" type TrustDomainFetcher func() (string, error) -func Controller(cache *cache.Cache, mapper *mapper.Mapper, trustDomainFetcher TrustDomainFetcher) controller.Controller { +func Controller(cache *sidecarproxycache.Cache, mapper *sidecarproxymapper.Mapper, trustDomainFetcher TrustDomainFetcher) controller.Controller { if cache == nil || mapper == nil || trustDomainFetcher == nil { panic("cache, mapper and trust domain fetcher are required") } @@ -37,7 +37,7 @@ func Controller(cache *cache.Cache, mapper *mapper.Mapper, trustDomainFetcher Tr } type reconciler struct { - cache *cache.Cache + cache *sidecarproxycache.Cache getTrustDomain TrustDomainFetcher } @@ -74,7 +74,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one", "id", req.ID) } - if !fetcher.IsMeshEnabled(workload.Workload.Ports) { + if !workload.Workload.IsMeshEnabled() { // Skip non-mesh workloads. // If there's existing proxy state template, delete it. diff --git a/internal/mesh/internal/controllers/sidecarproxy/controller_test.go b/internal/mesh/internal/controllers/sidecarproxy/controller_test.go index 74a38d4ed6c..fbebd244c87 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/controller_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/controller_test.go @@ -10,10 +10,10 @@ import ( svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/mapper" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status" + "github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" @@ -53,7 +53,7 @@ func (suite *meshControllerTestSuite) SetupTest() { suite.ctx = testutil.TestContext(suite.T()) suite.ctl = &reconciler{ - cache: cache.New(), + cache: sidecarproxycache.New(), getTrustDomain: func() (string, error) { return "test.consul", nil }, @@ -260,8 +260,8 @@ func (suite *meshControllerTestSuite) TestController() { // Run the controller manager mgr := controller.NewManager(suite.client, suite.runtime.Logger) - c := cache.New() - m := mapper.New(c) + c := sidecarproxycache.New() + m := sidecarproxymapper.New(c) mgr.Register(Controller(c, m, func() (string, error) { return "test.consul", nil @@ -293,7 +293,7 @@ func (suite *meshControllerTestSuite) TestController() { }).Write(suite.T(), suite.client) webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) - // Update destination's service apiEndpoints and workload to be non-mesh + // Update destination's service endpoints and workload to be non-mesh // and check that: // * api's proxy state template is deleted // * we get a new web proxy resource re-generated @@ -301,6 +301,19 @@ func (suite *meshControllerTestSuite) TestController() { nonMeshPorts := map[string]*pbcatalog.WorkloadPort{ "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, } + + // Note: the order matters here because in reality service endpoints will only + // be reconciled after the workload has been updated, and so we need to write the + // workload before we write service endpoints. + suite.runtime.Logger.Trace("test: updating api-abc workload to be non-mesh") + resourcetest.Resource(catalog.WorkloadType, "api-abc"). + WithData(suite.T(), &pbcatalog.Workload{ + Identity: "api-identity", + Addresses: suite.apiWorkload.Addresses, + Ports: nonMeshPorts}). + Write(suite.T(), suite.client) + + suite.runtime.Logger.Trace("test: updating api-service to be non-mesh") resourcetest.Resource(catalog.ServiceEndpointsType, "api-service"). WithData(suite.T(), &pbcatalog.ServiceEndpoints{ Endpoints: []*pbcatalog.Endpoint{ @@ -314,22 +327,15 @@ func (suite *meshControllerTestSuite) TestController() { }). Write(suite.T(), suite.client.ResourceServiceClient) - resourcetest.Resource(catalog.WorkloadType, "api-abc"). - WithData(suite.T(), &pbcatalog.Workload{ - Identity: "api-identity", - Addresses: suite.apiWorkload.Addresses, - Ports: nonMeshPorts}). - Write(suite.T(), suite.client) - // Check that api proxy template is gone. retry.Run(suite.T(), func(r *retry.R) { suite.client.RequireResourceNotFound(r, apiProxyStateTemplateID) }) // Check status on the pbmesh.Upstreams resource. - serviceRef := cache.KeyFromRefAndPort(resource.Reference(suite.apiService.Id, ""), "tcp") + serviceRef := resource.ReferenceToString(resource.Reference(suite.apiService.Id, "")) suite.client.WaitForStatusCondition(suite.T(), webDestinations.Id, ControllerName, - status.ConditionNonMeshDestination(serviceRef)) + status.ConditionMeshProtocolNotFound(serviceRef)) // We should get a new web proxy template resource because this destination should be removed. webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) @@ -341,7 +347,7 @@ func (suite *meshControllerTestSuite) TestController() { Write(suite.T(), suite.client.ResourceServiceClient) suite.client.WaitForStatusCondition(suite.T(), webDestinations.Id, ControllerName, - status.ConditionMeshDestination(serviceRef)) + status.ConditionMeshProtocolFound(serviceRef)) // We should also get a new web proxy template resource as this destination should be added again. webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) diff --git a/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher.go b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher.go index d38bebdabe0..9267ec93e21 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher.go +++ b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher.go @@ -4,7 +4,7 @@ import ( "context" "github.com/hashicorp/consul/internal/catalog" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" ctrlStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status" "github.com/hashicorp/consul/internal/mesh/internal/types" intermediateTypes "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" @@ -18,7 +18,7 @@ import ( type Fetcher struct { Client pbresource.ResourceServiceClient - Cache *cache.Cache + Cache *sidecarproxycache.Cache } func (f *Fetcher) FetchWorkload(ctx context.Context, id *pbresource.ID) (*intermediateTypes.Workload, error) { @@ -122,11 +122,14 @@ func (f *Fetcher) FetchDestinations(ctx context.Context, id *pbresource.ID) (*in func (f *Fetcher) FetchDestinationsData( ctx context.Context, - destinationRefs []*intermediateTypes.CombinedDestinationRef, + destinationRefs []intermediateTypes.CombinedDestinationRef, ) ([]*intermediateTypes.Destination, map[string]*intermediateTypes.Status, error) { - var destinations []*intermediateTypes.Destination - statuses := make(map[string]*intermediateTypes.Status) + var ( + destinations []*intermediateTypes.Destination + statuses = make(map[string]*intermediateTypes.Status) + ) + for _, dest := range destinationRefs { // Fetch Destinations resource if there is one. us, err := f.FetchDestinations(ctx, dest.ExplicitDestinationsID) @@ -138,14 +141,14 @@ func (f *Fetcher) FetchDestinationsData( if us == nil { // If the Destinations resource is not found, then we should delete it from cache and continue. - f.Cache.Delete(dest.ServiceRef, dest.Port) + f.Cache.DeleteDestination(dest.ServiceRef, dest.Port) continue } - u := &intermediateTypes.Destination{} + d := &intermediateTypes.Destination{} // As Destinations resource contains a list of destinations, // we need to find the one that references our service and port. - u.Explicit = findDestination(dest.ServiceRef, dest.Port, us.Destinations) + d.Explicit = findDestination(dest.ServiceRef, dest.Port, us.Destinations) // Fetch ServiceEndpoints. serviceID := resource.IDFromReference(dest.ServiceRef) @@ -154,11 +157,11 @@ func (f *Fetcher) FetchDestinationsData( return nil, statuses, err } - serviceRef := cache.KeyFromRefAndPort(dest.ServiceRef, dest.Port) - upstreamsRef := cache.KeyFromID(us.Resource.Id) + serviceRef := resource.ReferenceToString(dest.ServiceRef) + upstreamsRef := resource.IDToString(us.Resource.Id) if se == nil { // If the Service Endpoints resource is not found, then we update the status of the Upstreams resource - // but not remove it from cache in case it comes back. + // but don't remove it from cache in case it comes back. updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID, us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceNotFound(serviceRef)) continue @@ -167,14 +170,14 @@ func (f *Fetcher) FetchDestinationsData( us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceFound(serviceRef)) } - u.ServiceEndpoints = se + d.ServiceEndpoints = se // Check if this endpoints is mesh-enabled. If not, remove it from cache and return an error. if !IsMeshEnabled(se.Endpoints.Endpoints[0].Ports) { // Add invalid status but don't remove from cache. If this state changes, // we want to be able to detect this change. updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID, - us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionNonMeshDestination(serviceRef)) + us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolNotFound(serviceRef)) // This error should not cause the execution to stop, as we want to make sure that this non-mesh destination // gets removed from the proxy state. @@ -182,7 +185,18 @@ func (f *Fetcher) FetchDestinationsData( } else { // If everything was successful, add an empty condition so that we can remove any existing statuses. updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID, - us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshDestination(serviceRef)) + us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolFound(serviceRef)) + } + + // No destination port should point to a port with "mesh" protocol, + // so check if destination port has the mesh protocol and update the status. + if se.Endpoints.Endpoints[0].Ports[dest.Port].Protocol == pbcatalog.Protocol_PROTOCOL_MESH { + updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID, + us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolDestinationPort(serviceRef, dest.Port)) + continue + } else { + updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID, + us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, dest.Port)) } // Gather all identities. @@ -194,10 +208,10 @@ func (f *Fetcher) FetchDestinationsData( Tenancy: se.Resource.Id.Tenancy, }) } - u.Identities = identities + d.Identities = identities } - destinations = append(destinations, u) + destinations = append(destinations, d) } return destinations, statuses, nil diff --git a/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go index 1bf799f42ee..d2f7728891b 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go @@ -7,7 +7,7 @@ import ( svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" meshStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" @@ -175,25 +175,25 @@ func (suite *dataFetcherSuite) TestFetcher_FetchWorkload_WorkloadNotFound() { proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID() // Create cache and pre-populate it. - c := cache.New() - dest1 := &intermediate.CombinedDestinationRef{ + c := sidecarproxycache.New() + dest1 := intermediate.CombinedDestinationRef{ ServiceRef: resourcetest.Resource(catalog.ServiceType, "test-service-1").ReferenceNoSection(), Port: "tcp", ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "test-servicedestinations-1").ID(), - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(proxyID): proxyID, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(proxyID): {}, }, } - dest2 := &intermediate.CombinedDestinationRef{ + dest2 := intermediate.CombinedDestinationRef{ ServiceRef: resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection(), Port: "tcp", ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "test-servicedestinations-2").ID(), - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(proxyID): proxyID, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(proxyID): {}, }, } - c.Write(dest1) - c.Write(dest2) + c.WriteDestination(dest1) + c.WriteDestination(dest2) f := Fetcher{Cache: c, Client: suite.client} _, err := f.FetchWorkload(context.Background(), proxyID) @@ -313,35 +313,35 @@ func (suite *dataFetcherSuite) TestFetcher_FetchErrors() { } func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { - destination1 := &intermediate.CombinedDestinationRef{ + destination1 := intermediate.CombinedDestinationRef{ ServiceRef: resource.Reference(suite.api1Service.Id, ""), Port: "tcp", ExplicitDestinationsID: suite.webDestinations.Id, - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(suite.webProxy.Id): {}, }, } - destination2 := &intermediate.CombinedDestinationRef{ + destination2 := intermediate.CombinedDestinationRef{ ServiceRef: resource.Reference(suite.api2Service.Id, ""), Port: "tcp1", ExplicitDestinationsID: suite.webDestinations.Id, - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(suite.webProxy.Id): {}, }, } - destination3 := &intermediate.CombinedDestinationRef{ + destination3 := intermediate.CombinedDestinationRef{ ServiceRef: resource.Reference(suite.api2Service.Id, ""), Port: "tcp2", ExplicitDestinationsID: suite.webDestinations.Id, - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(suite.webProxy.Id): {}, }, } - c := cache.New() - c.Write(destination1) - c.Write(destination2) - c.Write(destination3) + c := sidecarproxycache.New() + c.WriteDestination(destination1) + c.WriteDestination(destination2) + c.WriteDestination(destination3) f := Fetcher{ Cache: c, @@ -349,48 +349,50 @@ func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { } suite.T().Run("destinations not found", func(t *testing.T) { - destinationRefNoDestinations := &intermediate.CombinedDestinationRef{ + destinationRefNoDestinations := intermediate.CombinedDestinationRef{ ServiceRef: resource.Reference(suite.api1Service.Id, ""), Port: "tcp", ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "not-found").ID(), - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(suite.webProxy.Id): {}, }, } - c.Write(destinationRefNoDestinations) + c.WriteDestination(destinationRefNoDestinations) - destinationRefs := []*intermediate.CombinedDestinationRef{destinationRefNoDestinations} + destinationRefs := []intermediate.CombinedDestinationRef{destinationRefNoDestinations} destinations, _, err := f.FetchDestinationsData(suite.ctx, destinationRefs) require.NoError(t, err) require.Nil(t, destinations) - require.Nil(t, c.ReadDestination(destinationRefNoDestinations.ServiceRef, destinationRefNoDestinations.Port)) + _, foundDest := c.ReadDestination(destinationRefNoDestinations.ServiceRef, destinationRefNoDestinations.Port) + require.False(t, foundDest) }) suite.T().Run("service endpoints not found", func(t *testing.T) { notFoundServiceRef := resourcetest.Resource(catalog.ServiceType, "not-found").ReferenceNoSection() - destinationNoServiceEndpoints := &intermediate.CombinedDestinationRef{ + destinationNoServiceEndpoints := intermediate.CombinedDestinationRef{ ServiceRef: notFoundServiceRef, Port: "tcp", ExplicitDestinationsID: suite.webDestinations.Id, - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(suite.webProxy.Id): {}, }, } - c.Write(destinationNoServiceEndpoints) + c.WriteDestination(destinationNoServiceEndpoints) - destinationRefs := []*intermediate.CombinedDestinationRef{destinationNoServiceEndpoints} + destinationRefs := []intermediate.CombinedDestinationRef{destinationNoServiceEndpoints} destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs) require.NoError(t, err) require.Nil(t, destinations) - destinationRef := cache.KeyFromID(destinationNoServiceEndpoints.ExplicitDestinationsID) - serviceRef := cache.KeyFromRefAndPort(destinationNoServiceEndpoints.ServiceRef, destinationNoServiceEndpoints.Port) + destinationRef := resource.IDToString(destinationNoServiceEndpoints.ExplicitDestinationsID) + serviceRef := resource.ReferenceToString(destinationNoServiceEndpoints.ServiceRef) require.Len(t, statuses[destinationRef].Conditions, 1) require.Equal(t, statuses[destinationRef].Conditions[0], meshStatus.ConditionDestinationServiceNotFound(serviceRef)) - require.NotNil(t, c.ReadDestination(destinationNoServiceEndpoints.ServiceRef, destinationNoServiceEndpoints.Port)) + _, foundDest := c.ReadDestination(destinationNoServiceEndpoints.ServiceRef, destinationNoServiceEndpoints.Port) + require.True(t, foundDest) }) suite.T().Run("service endpoints not on mesh", func(t *testing.T) { @@ -407,35 +409,36 @@ func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { } apiNonMeshServiceEndpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "api-1"). WithData(suite.T(), apiNonMeshServiceEndpointsData).Write(suite.T(), suite.client) - destinationNonMeshServiceEndpoints := &intermediate.CombinedDestinationRef{ + destinationNonMeshServiceEndpoints := intermediate.CombinedDestinationRef{ ServiceRef: resource.Reference(apiNonMeshServiceEndpoints.Owner, ""), Port: "tcp", ExplicitDestinationsID: suite.webDestinations.Id, - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(suite.webProxy.Id): suite.webProxy.Id, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(suite.webProxy.Id): {}, }, } - c.Write(destinationNonMeshServiceEndpoints) + c.WriteDestination(destinationNonMeshServiceEndpoints) - destinationRefs := []*intermediate.CombinedDestinationRef{destinationNonMeshServiceEndpoints} + destinationRefs := []intermediate.CombinedDestinationRef{destinationNonMeshServiceEndpoints} destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs) require.NoError(t, err) require.Nil(t, destinations) - destinationRef := cache.KeyFromID(destinationNonMeshServiceEndpoints.ExplicitDestinationsID) - serviceRef := cache.KeyFromRefAndPort(destinationNonMeshServiceEndpoints.ServiceRef, destinationNonMeshServiceEndpoints.Port) + destinationRef := resource.IDToString(destinationNonMeshServiceEndpoints.ExplicitDestinationsID) + serviceRef := resource.ReferenceToString(destinationNonMeshServiceEndpoints.ServiceRef) require.Len(t, statuses[destinationRef].Conditions, 2) prototest.AssertElementsMatch(t, statuses[destinationRef].Conditions, []*pbresource.Condition{ meshStatus.ConditionDestinationServiceFound(serviceRef), - meshStatus.ConditionNonMeshDestination(serviceRef), + meshStatus.ConditionMeshProtocolNotFound(serviceRef), }) - require.NotNil(t, c.ReadDestination(destinationNonMeshServiceEndpoints.ServiceRef, destinationNonMeshServiceEndpoints.Port)) + _, foundDest := c.ReadDestination(destinationNonMeshServiceEndpoints.ServiceRef, destinationNonMeshServiceEndpoints.Port) + require.True(t, foundDest) }) - suite.T().Run("invalid destinations", func(t *testing.T) { + suite.T().Run("invalid destinations: destination is not on the mesh", func(t *testing.T) { // Update api1 to no longer be on the mesh. suite.api1ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-1"). WithData(suite.T(), &pbcatalog.ServiceEndpoints{ @@ -450,17 +453,17 @@ func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { }, }).Write(suite.T(), suite.client) - destinationRefs := []*intermediate.CombinedDestinationRef{destination1} + destinationRefs := []intermediate.CombinedDestinationRef{destination1} destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs) - serviceRef := cache.KeyFromRefAndPort(destination1.ServiceRef, destination1.Port) - destinationRef := cache.KeyFromID(destination1.ExplicitDestinationsID) + serviceRef := resource.ReferenceToString(destination1.ServiceRef) + destinationRef := resource.IDToString(destination1.ExplicitDestinationsID) expectedStatus := &intermediate.Status{ ID: suite.webDestinations.Id, Generation: suite.webDestinations.Generation, Conditions: []*pbresource.Condition{ meshStatus.ConditionDestinationServiceFound(serviceRef), - meshStatus.ConditionNonMeshDestination(serviceRef), + meshStatus.ConditionMeshProtocolNotFound(serviceRef), }, } @@ -474,7 +477,8 @@ func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { // Check that destination service is still in cache because it's still referenced from the pbmesh.Upstreams // resource. - require.NotNil(t, c.ReadDestination(destination1.ServiceRef, destination1.Port)) + _, foundDest := c.ReadDestination(destination1.ServiceRef, destination1.Port) + require.True(t, foundDest) // Update the endpoints to be mesh enabled again and check that the status is now valid. suite.api1ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-1"). @@ -484,7 +488,65 @@ func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { Generation: suite.webDestinations.Generation, Conditions: []*pbresource.Condition{ meshStatus.ConditionDestinationServiceFound(serviceRef), - meshStatus.ConditionMeshDestination(serviceRef), + meshStatus.ConditionMeshProtocolFound(serviceRef), + meshStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, destination1.Port), + }, + } + + _, statuses, err = f.FetchDestinationsData(suite.ctx, destinationRefs) + require.NoError(t, err) + prototest.AssertDeepEqual(t, expectedStatus, statuses[destinationRef]) + }) + + suite.T().Run("invalid destinations: destination is pointing to a mesh port", func(t *testing.T) { + // Create a destination pointing to the mesh port. + destinationMeshDestinationPort := intermediate.CombinedDestinationRef{ + ServiceRef: resource.Reference(suite.api1Service.Id, ""), + Port: "mesh", + ExplicitDestinationsID: suite.webDestinations.Id, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(suite.webProxy.Id): {}, + }, + } + c.WriteDestination(destinationMeshDestinationPort) + destinationRefs := []intermediate.CombinedDestinationRef{destinationMeshDestinationPort} + + destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs) + serviceRef := resource.ReferenceToString(destination1.ServiceRef) + destinationRef := resource.IDToString(destination1.ExplicitDestinationsID) + expectedStatus := &intermediate.Status{ + ID: suite.webDestinations.Id, + Generation: suite.webDestinations.Generation, + Conditions: []*pbresource.Condition{ + meshStatus.ConditionDestinationServiceFound(serviceRef), + meshStatus.ConditionMeshProtocolFound(serviceRef), + meshStatus.ConditionMeshProtocolDestinationPort(serviceRef, destinationMeshDestinationPort.Port), + }, + } + + require.NoError(t, err) + + // Check that the status is generated correctly. + prototest.AssertDeepEqual(t, expectedStatus, statuses[destinationRef]) + + // Check that we didn't return any destinations. + require.Nil(t, destinations) + + // Check that destination service is still in cache because it's still referenced from the pbmesh.Upstreams + // resource. + _, foundDest := c.ReadDestination(destinationMeshDestinationPort.ServiceRef, destinationMeshDestinationPort.Port) + require.True(t, foundDest) + + // Update the destination to point to a non-mesh port and check that the status is now updated. + destinationRefs[0].Port = "tcp" + c.WriteDestination(destinationMeshDestinationPort) + expectedStatus = &intermediate.Status{ + ID: suite.webDestinations.Id, + Generation: suite.webDestinations.Generation, + Conditions: []*pbresource.Condition{ + meshStatus.ConditionDestinationServiceFound(serviceRef), + meshStatus.ConditionMeshProtocolFound(serviceRef), + meshStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, destinationRefs[0].Port), }, } @@ -494,7 +556,7 @@ func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { }) suite.T().Run("happy path", func(t *testing.T) { - destinationRefs := []*intermediate.CombinedDestinationRef{destination1, destination2, destination3} + destinationRefs := []intermediate.CombinedDestinationRef{destination1, destination2, destination3} expectedDestinations := []*intermediate.Destination{ { Explicit: suite.webDestinationsData.Upstreams[0], @@ -538,17 +600,18 @@ func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { } var expectedConditions []*pbresource.Condition for _, d := range destinationRefs { - ref := cache.KeyFromRefAndPort(d.ServiceRef, d.Port) + ref := resource.ReferenceToString(d.ServiceRef) expectedConditions = append(expectedConditions, meshStatus.ConditionDestinationServiceFound(ref), - meshStatus.ConditionMeshDestination(ref)) + meshStatus.ConditionMeshProtocolFound(ref), + meshStatus.ConditionNonMeshProtocolDestinationPort(ref, d.Port)) } actualDestinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs) require.NoError(t, err) // Check that all statuses have "happy" conditions. - dref := cache.KeyFromID(destination1.ExplicitDestinationsID) + dref := resource.IDToString(destination1.ExplicitDestinationsID) prototest.AssertElementsMatch(t, expectedConditions, statuses[dref].Conditions) // Check that we've computed expanded destinations correctly. diff --git a/internal/mesh/internal/controllers/sidecarproxy/status/status.go b/internal/mesh/internal/controllers/sidecarproxy/status/status.go index 0ba125fae63..036c8281b1e 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/status/status.go +++ b/internal/mesh/internal/controllers/sidecarproxy/status/status.go @@ -7,38 +7,39 @@ import ( ) const ( - StatusConditionMeshDestination = "MeshDestination" + StatusConditionDestinationAccepted = "DestinationAccepted" - StatusReasonNonMeshDestination = "MeshPortProtocolNotFound" - StatusReasonMeshDestination = "MeshPortProtocolFound" + StatusReasonMeshProtocolNotFound = "MeshPortProtocolNotFound" + StatusReasonMeshProtocolFound = "MeshPortProtocolFound" - StatusConditionDestinationExists = "DestinationExists" + StatusReasonMeshProtocolDestinationPort = "DestinationWithMeshPortProtocol" + StatusReasonNonMeshProtocolDestinationPort = "DestinationWithNonMeshPortProtocol" StatusReasonDestinationServiceNotFound = "ServiceNotFound" StatusReasonDestinationServiceFound = "ServiceFound" ) -func ConditionNonMeshDestination(serviceRef string) *pbresource.Condition { +func ConditionMeshProtocolNotFound(serviceRef string) *pbresource.Condition { return &pbresource.Condition{ - Type: StatusConditionMeshDestination, + Type: StatusConditionDestinationAccepted, State: pbresource.Condition_STATE_FALSE, - Reason: StatusReasonNonMeshDestination, + Reason: StatusReasonMeshProtocolNotFound, Message: fmt.Sprintf("service %q cannot be referenced as a Destination because it's not mesh-enabled.", serviceRef), } } -func ConditionMeshDestination(serviceRef string) *pbresource.Condition { +func ConditionMeshProtocolFound(serviceRef string) *pbresource.Condition { return &pbresource.Condition{ - Type: StatusConditionMeshDestination, + Type: StatusConditionDestinationAccepted, State: pbresource.Condition_STATE_TRUE, - Reason: StatusReasonMeshDestination, + Reason: StatusReasonMeshProtocolFound, Message: fmt.Sprintf("service %q is on the mesh.", serviceRef), } } func ConditionDestinationServiceNotFound(serviceRef string) *pbresource.Condition { return &pbresource.Condition{ - Type: StatusConditionDestinationExists, + Type: StatusConditionDestinationAccepted, State: pbresource.Condition_STATE_FALSE, Reason: StatusReasonDestinationServiceNotFound, Message: fmt.Sprintf("service %q does not exist.", serviceRef), @@ -47,9 +48,27 @@ func ConditionDestinationServiceNotFound(serviceRef string) *pbresource.Conditio func ConditionDestinationServiceFound(serviceRef string) *pbresource.Condition { return &pbresource.Condition{ - Type: StatusConditionDestinationExists, + Type: StatusConditionDestinationAccepted, State: pbresource.Condition_STATE_TRUE, Reason: StatusReasonDestinationServiceFound, Message: fmt.Sprintf("service %q exists.", serviceRef), } } + +func ConditionMeshProtocolDestinationPort(serviceRef, port string) *pbresource.Condition { + return &pbresource.Condition{ + Type: StatusConditionDestinationAccepted, + State: pbresource.Condition_STATE_FALSE, + Reason: StatusReasonMeshProtocolDestinationPort, + Message: fmt.Sprintf("destination port %q for service %q has PROTOCOL_MESH which is unsupported for destination services", port, serviceRef), + } +} + +func ConditionNonMeshProtocolDestinationPort(serviceRef, port string) *pbresource.Condition { + return &pbresource.Condition{ + Type: StatusConditionDestinationAccepted, + State: pbresource.Condition_STATE_TRUE, + Reason: StatusReasonNonMeshProtocolDestinationPort, + Message: fmt.Sprintf("destination port %q for service %q has a non-mesh protocol", port, serviceRef), + } +} diff --git a/internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper.go b/internal/mesh/internal/mappers/sidecarproxymapper/destinations_mapper.go similarity index 78% rename from internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper.go rename to internal/mesh/internal/mappers/sidecarproxymapper/destinations_mapper.go index 6bf98fd0787..a0a36a6f48f 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper.go +++ b/internal/mesh/internal/mappers/sidecarproxymapper/destinations_mapper.go @@ -1,11 +1,10 @@ -package mapper +package sidecarproxymapper import ( "context" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" @@ -21,7 +20,7 @@ func (m *Mapper) MapDestinationsToProxyStateTemplate(ctx context.Context, rt con } // Look up workloads for this destinations. - sourceProxyIDs := make(map[string]*pbresource.ID) + sourceProxyIDs := make(map[resource.ReferenceKey]struct{}) var result []controller.Request for _, prefix := range destinations.Workloads.Prefixes { resp, err := rt.Client.List(ctx, &pbresource.ListRequest{ @@ -34,7 +33,7 @@ func (m *Mapper) MapDestinationsToProxyStateTemplate(ctx context.Context, rt con } for _, r := range resp.Resources { proxyID := resource.ReplaceType(types.ProxyStateTemplateType, r.Id) - sourceProxyIDs[cache.KeyFromID(proxyID)] = proxyID + sourceProxyIDs[resource.NewReferenceKey(proxyID)] = struct{}{} result = append(result, controller.Request{ ID: proxyID, }) @@ -42,13 +41,12 @@ func (m *Mapper) MapDestinationsToProxyStateTemplate(ctx context.Context, rt con } for _, name := range destinations.Workloads.Names { - id := &pbresource.ID{ + proxyID := &pbresource.ID{ Name: name, Tenancy: res.Id.Tenancy, - Type: catalog.WorkloadType, + Type: types.ProxyStateTemplateType, } - proxyID := resource.ReplaceType(types.ProxyStateTemplateType, id) - sourceProxyIDs[cache.KeyFromID(proxyID)] = proxyID + sourceProxyIDs[resource.NewReferenceKey(proxyID)] = struct{}{} result = append(result, controller.Request{ ID: proxyID, }) @@ -56,13 +54,13 @@ func (m *Mapper) MapDestinationsToProxyStateTemplate(ctx context.Context, rt con // Add this destination to cache. for _, destination := range destinations.Upstreams { - destinationRef := &intermediate.CombinedDestinationRef{ + destinationRef := intermediate.CombinedDestinationRef{ ServiceRef: destination.DestinationRef, Port: destination.DestinationPort, ExplicitDestinationsID: res.Id, SourceProxies: sourceProxyIDs, } - m.cache.Write(destinationRef) + m.cache.WriteDestination(destinationRef) } return result, nil diff --git a/internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper_test.go b/internal/mesh/internal/mappers/sidecarproxymapper/destinations_mapper_test.go similarity index 86% rename from internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper_test.go rename to internal/mesh/internal/mappers/sidecarproxymapper/destinations_mapper_test.go index 1aacac30029..07a7939404a 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/mapper/destinations_mapper_test.go +++ b/internal/mesh/internal/mappers/sidecarproxymapper/destinations_mapper_test.go @@ -1,4 +1,4 @@ -package mapper +package sidecarproxymapper import ( "context" @@ -7,14 +7,13 @@ import ( svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" - "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto/private/prototest" "github.com/stretchr/testify/require" ) @@ -65,7 +64,7 @@ func TestMapDestinationsToProxyStateTemplate(t *testing.T) { WithData(t, webDestinationsData). Write(t, client) - c := cache.New() + c := sidecarproxycache.New() mapper := &Mapper{cache: c} expRequests := []controller.Request{ @@ -83,17 +82,18 @@ func TestMapDestinationsToProxyStateTemplate(t *testing.T) { proxy2ID := resourcetest.Resource(types.ProxyStateTemplateType, webWorkload2.Id.Name).ID() proxy3ID := resourcetest.Resource(types.ProxyStateTemplateType, webWorkload3.Id.Name).ID() for _, u := range webDestinationsData.Upstreams { - expDestination := &intermediate.CombinedDestinationRef{ + expDestination := intermediate.CombinedDestinationRef{ ServiceRef: u.DestinationRef, Port: u.DestinationPort, ExplicitDestinationsID: webDestinations.Id, - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(proxy1ID): proxy1ID, - cache.KeyFromID(proxy2ID): proxy2ID, - cache.KeyFromID(proxy3ID): proxy3ID, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(proxy1ID): {}, + resource.NewReferenceKey(proxy2ID): {}, + resource.NewReferenceKey(proxy3ID): {}, }, } - prototest.AssertDeepEqual(t, expDestination, c.ReadDestination(u.DestinationRef, u.DestinationPort)) + actualDestination, found := c.ReadDestination(u.DestinationRef, u.DestinationPort) + require.True(t, found) + prototest.AssertDeepEqual(t, expDestination, actualDestination) } - } diff --git a/internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper.go b/internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper.go similarity index 62% rename from internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper.go rename to internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper.go index 9c7d768e589..3ba78dbd386 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper.go +++ b/internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper.go @@ -1,11 +1,11 @@ -package mapper +package sidecarproxymapper import ( "context" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" @@ -13,10 +13,10 @@ import ( ) type Mapper struct { - cache *cache.Cache + cache *sidecarproxycache.Cache } -func New(c *cache.Cache) *Mapper { +func New(c *sidecarproxycache.Cache) *Mapper { return &Mapper{ cache: c, } @@ -24,10 +24,6 @@ func New(c *cache.Cache) *Mapper { // MapServiceEndpointsToProxyStateTemplate maps catalog.ServiceEndpoints objects to the IDs of // ProxyStateTemplate. -// For a destination proxy, we only need to generate requests from workloads this "endpoints" points to -// so that we can re-generate proxy state for the sidecar proxy. -// If this service endpoints is a source for some proxies, we need to generate requests for those proxies as well. -// so we need to have a map from service endpoints to source proxy Ids. func (m *Mapper) MapServiceEndpointsToProxyStateTemplate(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { // This mapper needs to look up workload IDs from service endpoints and replace them with ProxyStateTemplate type. var serviceEndpoints pbcatalog.ServiceEndpoints @@ -38,6 +34,8 @@ func (m *Mapper) MapServiceEndpointsToProxyStateTemplate(_ context.Context, _ co var result []controller.Request + // First, we need to generate requests from workloads this "endpoints" points to + // so that we can re-generate proxy state for the sidecar proxy. for _, endpoint := range serviceEndpoints.Endpoints { // Convert the reference to a workload to a ProxyStateTemplate ID. // Because these resources are name and tenancy aligned, we only need to change the type. @@ -46,12 +44,13 @@ func (m *Mapper) MapServiceEndpointsToProxyStateTemplate(_ context.Context, _ co // services external to Consul, and we don't need to reconcile those as they don't have // associated workloads. if endpoint.TargetRef != nil { + id := &pbresource.ID{ + Name: endpoint.TargetRef.Name, + Tenancy: endpoint.TargetRef.Tenancy, + Type: types.ProxyStateTemplateType, + } result = append(result, controller.Request{ - ID: &pbresource.ID{ - Name: endpoint.TargetRef.Name, - Tenancy: endpoint.TargetRef.Tenancy, - Type: types.ProxyStateTemplateType, - }, + ID: id, }) } } @@ -59,14 +58,19 @@ func (m *Mapper) MapServiceEndpointsToProxyStateTemplate(_ context.Context, _ co // Look up any source proxies for this service and generate updates. serviceID := resource.ReplaceType(catalog.ServiceType, res.Id) + // Second, we need to generate requests for any proxies where this service is a destination. if len(serviceEndpoints.Endpoints) > 0 { - // All port names in the endpoints object should be the same as filter out to ports that are selected + // All port names in the endpoints object should be the same as we filter out to ports that are selected // by the service, and so it's sufficient to check just the first endpoint. - for portName := range serviceEndpoints.Endpoints[0].Ports { - destination := m.cache.ReadDestination(resource.Reference(serviceID, ""), portName) - if destination != nil { - for _, id := range destination.SourceProxies { - result = append(result, controller.Request{ID: id}) + for portName, port := range serviceEndpoints.Endpoints[0].Ports { + if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { + // Skip mesh ports. These should never be used as destination ports. + continue + } + serviceRef := resource.Reference(serviceID, "") + if destination, ok := m.cache.ReadDestination(serviceRef, portName); ok { + for refKey := range destination.SourceProxies { + result = append(result, controller.Request{ID: refKey.ToID()}) } } } diff --git a/internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper_test.go b/internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper_test.go similarity index 66% rename from internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper_test.go rename to internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper_test.go index f463afbc47f..b49df973722 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/mapper/service_endpoints_mapper_test.go +++ b/internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper_test.go @@ -1,4 +1,4 @@ -package mapper +package sidecarproxymapper import ( "context" @@ -6,12 +6,12 @@ import ( "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" + "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" + "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" - "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto/private/prototest" "github.com/stretchr/testify/require" ) @@ -27,6 +27,7 @@ func TestMapServiceEndpointsToProxyStateTemplate(t *testing.T) { Ports: map[string]*pbcatalog.WorkloadPort{ "tcp1": {Port: 8080}, "tcp2": {Port: 8081}, + "mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, }, { @@ -34,6 +35,7 @@ func TestMapServiceEndpointsToProxyStateTemplate(t *testing.T) { Ports: map[string]*pbcatalog.WorkloadPort{ "tcp1": {Port: 8080}, "tcp2": {Port: 8081}, + "mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, }, }, }, @@ -41,29 +43,38 @@ func TestMapServiceEndpointsToProxyStateTemplate(t *testing.T) { proxyTmpl1ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-1").ID() proxyTmpl2ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-2").ID() - c := cache.New() + c := sidecarproxycache.New() mapper := &Mapper{cache: c} sourceProxy1 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-3").ID() sourceProxy2 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-4").ID() sourceProxy3 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-5").ID() - destination1 := &intermediate.CombinedDestinationRef{ + destination1 := intermediate.CombinedDestinationRef{ ServiceRef: resourcetest.Resource(catalog.ServiceType, "service").ReferenceNoSection(), Port: "tcp1", - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(sourceProxy1): sourceProxy1, - cache.KeyFromID(sourceProxy2): sourceProxy2, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(sourceProxy1): {}, + resource.NewReferenceKey(sourceProxy2): {}, }, } - destination2 := &intermediate.CombinedDestinationRef{ + destination2 := intermediate.CombinedDestinationRef{ ServiceRef: resourcetest.Resource(catalog.ServiceType, "service").ReferenceNoSection(), Port: "tcp2", - SourceProxies: map[string]*pbresource.ID{ - cache.KeyFromID(sourceProxy1): sourceProxy1, - cache.KeyFromID(sourceProxy3): sourceProxy3, + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(sourceProxy1): {}, + resource.NewReferenceKey(sourceProxy3): {}, }, } - c.Write(destination1) - c.Write(destination2) + destination3 := intermediate.CombinedDestinationRef{ + ServiceRef: resourcetest.Resource(catalog.ServiceType, "service").ReferenceNoSection(), + Port: "mesh", + SourceProxies: map[resource.ReferenceKey]struct{}{ + resource.NewReferenceKey(sourceProxy1): {}, + resource.NewReferenceKey(sourceProxy3): {}, + }, + } + c.WriteDestination(destination1) + c.WriteDestination(destination2) + c.WriteDestination(destination3) expRequests := []controller.Request{ {ID: proxyTmpl1ID}, diff --git a/internal/mesh/internal/types/intermediate/types.go b/internal/mesh/internal/types/intermediate/types.go index 5be14102b94..93ae9df4e92 100644 --- a/internal/mesh/internal/types/intermediate/types.go +++ b/internal/mesh/internal/types/intermediate/types.go @@ -1,6 +1,7 @@ package intermediate import ( + "github.com/hashicorp/consul/internal/resource" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" @@ -15,9 +16,8 @@ type CombinedDestinationRef struct { // Port is the port name for this destination. Port string - // SourceProxies are the IDs of source proxy state template resources. - // The keys are a string representation of *pbresource.ID. - SourceProxies map[string]*pbresource.ID + // SourceProxies are the reference keys of source proxy state template resources. + SourceProxies map[resource.ReferenceKey]struct{} // ExplicitDestinationsID is the id of the pbmesh.Upstreams resource. For implicit destinations, // this should be nil. diff --git a/internal/resource/mappers/bimapper/bimapper.go b/internal/resource/mappers/bimapper/bimapper.go index fd47f0beedc..34302f8ee8b 100644 --- a/internal/resource/mappers/bimapper/bimapper.go +++ b/internal/resource/mappers/bimapper/bimapper.go @@ -296,6 +296,8 @@ func (m *Mapper) MapLink(_ context.Context, _ controller.Runtime, res *pbresourc } func (m *Mapper) itemIDsByLink(link resource.ReferenceKey) []*pbresource.ID { + // a lock must be held both to read item from the map and to read the + // the returned items. m.lock.Lock() defer m.lock.Unlock() @@ -312,6 +314,8 @@ func (m *Mapper) itemIDsByLink(link resource.ReferenceKey) []*pbresource.ID { } func (m *Mapper) itemRefsByLink(link resource.ReferenceKey) []*pbresource.Reference { + // a lock must be held both to read item from the map and to read the + // the returned items. m.lock.Lock() defer m.lock.Unlock() diff --git a/proto-public/go.mod b/proto-public/go.mod index 20feb72ecdc..63f22c4d455 100644 --- a/proto-public/go.mod +++ b/proto-public/go.mod @@ -3,7 +3,9 @@ module github.com/hashicorp/consul/proto-public go 1.19 require ( + github.com/hashicorp/consul v1.16.1 github.com/stretchr/testify v1.8.3 + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 google.golang.org/grpc v1.55.0 google.golang.org/protobuf v1.30.0 ) @@ -11,12 +13,13 @@ require ( require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/stretchr/objx v0.5.0 // indirect golang.org/x/net v0.13.0 // indirect - golang.org/x/sys v0.10.0 // indirect + golang.org/x/sys v0.11.0 // indirect golang.org/x/text v0.11.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect diff --git a/proto-public/go.sum b/proto-public/go.sum index f030ff875f7..aa783b0eed7 100644 --- a/proto-public/go.sum +++ b/proto-public/go.sum @@ -8,6 +8,9 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/hashicorp/consul v1.16.1 h1:3CeNybQgjxJ3wu2IUSi3OySn4bQ70sv1jENtLJCrklQ= +github.com/hashicorp/consul v1.16.1/go.mod h1:GH3Ybk4rNKf0wVLfwG3btwPilh+sMwGtvymgFqFRqp0= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -30,10 +33,12 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/proto-public/pbcatalog/v1alpha1/workload_addon.go b/proto-public/pbcatalog/v1alpha1/workload_addon.go new file mode 100644 index 00000000000..e26bca3af72 --- /dev/null +++ b/proto-public/pbcatalog/v1alpha1/workload_addon.go @@ -0,0 +1,40 @@ +package catalogv1alpha1 + +import "golang.org/x/exp/slices" + +func (w *Workload) GetMeshPortName() (string, bool) { + var meshPort string + + for portName, port := range w.Ports { + if port.Protocol == Protocol_PROTOCOL_MESH { + meshPort = portName + return meshPort, true + } + } + + return "", false +} + +func (w *Workload) IsMeshEnabled() bool { + _, ok := w.GetMeshPortName() + return ok +} + +func (w *Workload) GetNonExternalAddressesForPort(portName string) []*WorkloadAddress { + var addresses []*WorkloadAddress + + for _, address := range w.Addresses { + if address.External { + // Skip external addresses. + continue + } + + // If there are no ports, that means this port is selected. + // Otherwise, check if the port is explicitly selected by this address + if len(address.Ports) == 0 || slices.Contains(address.Ports, portName) { + addresses = append(addresses, address) + } + } + + return addresses +} diff --git a/proto-public/pbcatalog/v1alpha1/workload_addon_test.go b/proto-public/pbcatalog/v1alpha1/workload_addon_test.go new file mode 100644 index 00000000000..00b0b066e44 --- /dev/null +++ b/proto-public/pbcatalog/v1alpha1/workload_addon_test.go @@ -0,0 +1,104 @@ +package catalogv1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetMeshPort(t *testing.T) { + cases := map[string]struct { + ports map[string]*WorkloadPort + exp string + }{ + "nil ports": { + ports: nil, + exp: "", + }, + "empty ports": { + ports: make(map[string]*WorkloadPort), + exp: "", + }, + "no mesh ports": { + ports: map[string]*WorkloadPort{ + "p1": {Port: 1000, Protocol: Protocol_PROTOCOL_HTTP}, + "p2": {Port: 2000, Protocol: Protocol_PROTOCOL_TCP}, + }, + exp: "", + }, + "one mesh port": { + ports: map[string]*WorkloadPort{ + "p1": {Port: 1000, Protocol: Protocol_PROTOCOL_HTTP}, + "p2": {Port: 2000, Protocol: Protocol_PROTOCOL_TCP}, + "p3": {Port: 3000, Protocol: Protocol_PROTOCOL_MESH}, + }, + exp: "p3", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + workload := Workload{ + Ports: c.ports, + } + meshPort, ok := workload.GetMeshPortName() + if c.exp != "" { + require.True(t, ok) + require.Equal(t, c.exp, meshPort) + } + }) + } +} + +func TestGetAddressesForPort(t *testing.T) { + cases := map[string]struct { + addresses []*WorkloadAddress + portName string + expAddresses []*WorkloadAddress + }{ + "empty addresses": { + addresses: nil, + portName: "doesn't matter", + expAddresses: nil, + }, + "addresses without selected port": { + addresses: []*WorkloadAddress{{Host: "1.1.1.1"}}, + portName: "not-found", + expAddresses: nil, + }, + "single selected addresses": { + addresses: []*WorkloadAddress{ + {Host: "1.1.1.1", Ports: []string{"p1", "p2"}}, + {Host: "2.2.2.2", Ports: []string{"p3", "p4"}}, + }, + portName: "p1", + expAddresses: []*WorkloadAddress{ + {Host: "1.1.1.1", Ports: []string{"p1", "p2"}}, + }, + }, + "multiple selected addresses": { + addresses: []*WorkloadAddress{ + {Host: "1.1.1.1", Ports: []string{"p1", "p2"}}, + {Host: "2.2.2.2", Ports: []string{"p3", "p4"}}, + {Host: "3.3.3.3"}, + {Host: "3.3.3.3", Ports: []string{"p1"}, External: true}, + }, + portName: "p1", + expAddresses: []*WorkloadAddress{ + {Host: "1.1.1.1", Ports: []string{"p1", "p2"}}, + {Host: "3.3.3.3"}, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + workload := Workload{ + Addresses: c.addresses, + } + + actualAddresses := workload.GetNonExternalAddressesForPort(c.portName) + require.Equal(t, actualAddresses, c.expAddresses) + }) + } +} From 59db705a228848d6caefd907ae65ac608b889c9d Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Wed, 6 Sep 2023 17:32:55 -0600 Subject: [PATCH 10/11] more PR feedback --- agent/consul/server.go | 2 -- .../mesh/internal/controllers/register.go | 7 +---- .../sidecarproxy/builder/builder_test.go | 21 -------------- .../builder/destination_builder.go | 3 ++ .../builder/destination_builder_test.go | 10 ++++--- .../sidecarproxy/builder/local_app.go | 3 ++ .../sidecarproxy/builder/local_app_test.go | 11 ++++++-- .../controllers/sidecarproxy/controller.go | 21 +++++++------- .../sidecarproxy/fetcher/data_fetcher_test.go | 13 +++++---- .../service_endpoints_mapper_test.go | 28 +++++++++++++------ .../resource/mappers/bimapper/bimapper.go | 24 ---------------- 11 files changed, 59 insertions(+), 84 deletions(-) diff --git a/agent/consul/server.go b/agent/consul/server.go index 4b0c07e1945..f138f82deb6 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -75,9 +75,7 @@ import ( "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/mesh" proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/internal/resource/reaper" raftstorage "github.com/hashicorp/consul/internal/storage/raft" diff --git a/internal/mesh/internal/controllers/register.go b/internal/mesh/internal/controllers/register.go index 12fa9c40de9..2a31d0725a7 100644 --- a/internal/mesh/internal/controllers/register.go +++ b/internal/mesh/internal/controllers/register.go @@ -4,19 +4,14 @@ package controllers import ( - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/cache" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecar-proxy/mapper" - "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache" - "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/mapper" "github.com/hashicorp/consul/internal/mesh/internal/controllers/xds" + "github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper" "github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/resource/mappers/bimapper" - "github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper" ) type Dependencies struct { diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go index 5c806424b92..5768251a67f 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go @@ -3,7 +3,6 @@ package builder import ( "flag" "os" - "path/filepath" "testing" "github.com/stretchr/testify/require" @@ -11,10 +10,6 @@ import ( "google.golang.org/protobuf/proto" ) -var ( - update = flag.Bool("update", false, "update the golden files of this test") -) - func TestMain(m *testing.M) { flag.Parse() os.Exit(m.Run()) @@ -29,19 +24,3 @@ func protoToJSON(t *testing.T, pb proto.Message) string { require.NoError(t, err) return string(gotJSON) } - -func goldenValue(t *testing.T, goldenFile string, actual string, update bool) string { - t.Helper() - goldenPath := filepath.Join("testdata", goldenFile) + ".golden" - - if update { - err := os.WriteFile(goldenPath, []byte(actual), 0644) - require.NoError(t, err) - - return actual - } - - content, err := os.ReadFile(goldenPath) - require.NoError(t, err) - return string(content) -} diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go index 8776fae8fb4..823343b189e 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package builder import ( diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go index cbe1a1a4379..ee4be71f6f5 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/destination_builder_test.go @@ -3,14 +3,16 @@ package builder import ( "testing" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" + "github.com/hashicorp/consul/internal/testing/golden" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/stretchr/testify/require" ) var ( @@ -32,10 +34,10 @@ var ( func TestBuildExplicitDestinations(t *testing.T) { api1Endpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "api-1"). - WithData(t, endpointsData).Build() + WithData(t, endpointsData).WithTenancy(resource.DefaultNamespacedTenancy()).Build() api2Endpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "api-2"). - WithData(t, endpointsData).Build() + WithData(t, endpointsData).WithTenancy(resource.DefaultNamespacedTenancy()).Build() api1Identity := &pbresource.Reference{ Name: "api1-identity", @@ -99,7 +101,7 @@ func TestBuildExplicitDestinations(t *testing.T) { Build() actual := protoToJSON(t, proxyTmpl) - expected := goldenValue(t, name, actual, *update) + expected := golden.Get(t, actual, name+".golden") require.JSONEq(t, expected, actual) } diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go index 72bc54596df..5a465f503f4 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package builder import ( diff --git a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go index cbd76954ffa..d846a21d137 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go @@ -3,11 +3,14 @@ package builder import ( "testing" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/resourcetest" + "github.com/hashicorp/consul/internal/testing/golden" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/stretchr/testify/require" ) func TestBuildLocalApp(t *testing.T) { @@ -68,7 +71,7 @@ func TestBuildLocalApp(t *testing.T) { proxyTmpl := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul").BuildLocalApp(c.workload). Build() actual := protoToJSON(t, proxyTmpl) - expected := goldenValue(t, name, actual, *update) + expected := golden.Get(t, actual, name+".golden") require.JSONEq(t, expected, actual) }) @@ -76,7 +79,9 @@ func TestBuildLocalApp(t *testing.T) { } func testProxyStateTemplateID() *pbresource.ID { - return resourcetest.Resource(types.ProxyStateTemplateType, "test").ID() + return resourcetest.Resource(types.ProxyStateTemplateType, "test"). + WithTenancy(resource.DefaultNamespacedTenancy()). + ID() } func testIdentityRef() *pbresource.Reference { diff --git a/internal/mesh/internal/controllers/sidecarproxy/controller.go b/internal/mesh/internal/controllers/sidecarproxy/controller.go index ef68ee93350..a99cfcce11c 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/controller.go +++ b/internal/mesh/internal/controllers/sidecarproxy/controller.go @@ -1,11 +1,14 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 +// SPDX-License-Identifier: BUSL-1.1 package sidecarproxy import ( "context" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" @@ -16,8 +19,6 @@ import ( "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" ) // ControllerName is the name for this controller. It's used for logging or status keys. @@ -44,14 +45,14 @@ type reconciler struct { func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error { rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", ControllerName) - rt.Logger.Trace("reconciling proxy state template", "id", req.ID) + rt.Logger.Trace("reconciling proxy state template") // Instantiate a data fetcher to fetch all reconciliation data. dataFetcher := fetcher.Fetcher{Client: rt.Client, Cache: r.cache} // Check if the workload exists. workloadID := resource.ReplaceType(catalog.WorkloadType, req.ID) - workload, err := dataFetcher.FetchWorkload(ctx, resource.ReplaceType(catalog.WorkloadType, req.ID)) + workload, err := dataFetcher.FetchWorkload(ctx, workloadID) if err != nil { rt.Logger.Error("error reading the associated workload", "error", err) return err @@ -71,7 +72,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c if proxyStateTemplate == nil { // If proxy state template has been deleted, we will need to generate a new one. - rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one", "id", req.ID) + rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one") } if !workload.Workload.IsMeshEnabled() { @@ -79,7 +80,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c // If there's existing proxy state template, delete it. if proxyStateTemplate != nil { - rt.Logger.Trace("deleting existing proxy state template because workload is no longer on the mesh", "id", req.ID) + rt.Logger.Trace("deleting existing proxy state template because workload is no longer on the mesh") _, err = rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: req.ID}) if err != nil { rt.Logger.Error("error deleting existing proxy state template", "error", err) @@ -107,7 +108,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c destinationsRefs := r.cache.DestinationsBySourceProxy(req.ID) destinationsData, statuses, err := dataFetcher.FetchDestinationsData(ctx, destinationsRefs) if err != nil { - rt.Logger.Error("error fetching destinations for this proxy", "id", req.ID, "error", err) + rt.Logger.Error("error fetching destinations for this proxy", "error", err) return err } @@ -121,7 +122,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c rt.Logger.Error("error creating proxy state template data", "error", err) return err } - rt.Logger.Trace("updating proxy state template", "id", req.ID) + rt.Logger.Trace("updating proxy state template") _, err = rt.Client.Write(ctx, &pbresource.WriteRequest{ Resource: &pbresource.Resource{ Id: req.ID, @@ -134,7 +135,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c return err } } else { - rt.Logger.Trace("proxy state template data has not changed, skipping update", "id", req.ID) + rt.Logger.Trace("proxy state template data has not changed, skipping update") } // Update any statuses. diff --git a/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go index d2f7728891b..e73bc7f8ea8 100644 --- a/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go +++ b/internal/mesh/internal/controllers/sidecarproxy/fetcher/data_fetcher_test.go @@ -4,6 +4,11 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" @@ -18,10 +23,6 @@ import ( "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto/private/prototest" "github.com/hashicorp/consul/sdk/testutil" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) func TestIsMeshEnabled(t *testing.T) { @@ -368,7 +369,9 @@ func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() { }) suite.T().Run("service endpoints not found", func(t *testing.T) { - notFoundServiceRef := resourcetest.Resource(catalog.ServiceType, "not-found").ReferenceNoSection() + notFoundServiceRef := resourcetest.Resource(catalog.ServiceType, "not-found"). + WithTenancy(resource.DefaultNamespacedTenancy()). + ReferenceNoSection() destinationNoServiceEndpoints := intermediate.CombinedDestinationRef{ ServiceRef: notFoundServiceRef, Port: "tcp", diff --git a/internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper_test.go b/internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper_test.go index b49df973722..6c17d6b43ae 100644 --- a/internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper_test.go +++ b/internal/mesh/internal/mappers/sidecarproxymapper/service_endpoints_mapper_test.go @@ -4,6 +4,8 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" @@ -13,12 +15,13 @@ import ( "github.com/hashicorp/consul/internal/resource/resourcetest" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1" "github.com/hashicorp/consul/proto/private/prototest" - "github.com/stretchr/testify/require" ) func TestMapServiceEndpointsToProxyStateTemplate(t *testing.T) { - workload1 := resourcetest.Resource(catalog.WorkloadType, "workload-1").Build() - workload2 := resourcetest.Resource(catalog.WorkloadType, "workload-2").Build() + workload1 := resourcetest.Resource(catalog.WorkloadType, "workload-1"). + WithTenancy(resource.DefaultNamespacedTenancy()).Build() + workload2 := resourcetest.Resource(catalog.WorkloadType, "workload-2"). + WithTenancy(resource.DefaultNamespacedTenancy()).Build() serviceEndpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "service"). WithData(t, &pbcatalog.ServiceEndpoints{ Endpoints: []*pbcatalog.Endpoint{ @@ -39,15 +42,22 @@ func TestMapServiceEndpointsToProxyStateTemplate(t *testing.T) { }, }, }, - }).Build() - proxyTmpl1ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-1").ID() - proxyTmpl2ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-2").ID() + }). + WithTenancy(resource.DefaultNamespacedTenancy()). + Build() + proxyTmpl1ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-1"). + WithTenancy(resource.DefaultNamespacedTenancy()).ID() + proxyTmpl2ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-2"). + WithTenancy(resource.DefaultNamespacedTenancy()).ID() c := sidecarproxycache.New() mapper := &Mapper{cache: c} - sourceProxy1 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-3").ID() - sourceProxy2 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-4").ID() - sourceProxy3 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-5").ID() + sourceProxy1 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-3"). + WithTenancy(resource.DefaultNamespacedTenancy()).ID() + sourceProxy2 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-4"). + WithTenancy(resource.DefaultNamespacedTenancy()).ID() + sourceProxy3 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-5"). + WithTenancy(resource.DefaultNamespacedTenancy()).ID() destination1 := intermediate.CombinedDestinationRef{ ServiceRef: resourcetest.Resource(catalog.ServiceType, "service").ReferenceNoSection(), Port: "tcp1", diff --git a/internal/resource/mappers/bimapper/bimapper.go b/internal/resource/mappers/bimapper/bimapper.go index 34302f8ee8b..2279c8807d4 100644 --- a/internal/resource/mappers/bimapper/bimapper.go +++ b/internal/resource/mappers/bimapper/bimapper.go @@ -215,30 +215,6 @@ func (m *Mapper) LinkIDsForItem(item *pbresource.ID) []*pbresource.ID { return out } -// LinkIDsForItem returns IDs to links related to the requested item. -func (m *Mapper) LinkIDsForItem(item *pbresource.ID) []*pbresource.ID { - if !resource.EqualType(item.Type, m.itemType) { - panic(fmt.Sprintf("expected item type %q got %q", - resource.TypeToString(m.itemType), - resource.TypeToString(item.Type), - )) - } - - m.lock.Lock() - defer m.lock.Unlock() - - links, ok := m.itemToLink[resource.NewReferenceKey(item)] - if !ok { - return nil - } - - out := make([]*pbresource.ID, 0, len(links)) - for l := range links { - out = append(out, l.ToID()) - } - return out -} - // ItemsForLink returns item ids for items related to the provided link. // Deprecated: use ItemIDsForLink func (m *Mapper) ItemsForLink(link *pbresource.ID) []*pbresource.ID { From 657709582f7b1f3ad39e03a2b8a69e7c2b6c5962 Mon Sep 17 00:00:00 2001 From: Iryna Shustava Date: Wed, 6 Sep 2023 19:45:12 -0600 Subject: [PATCH 11/11] fix test/integration/container go.mod --- test/integration/consul-container/go.mod | 6 +++--- test/integration/consul-container/go.sum | 8 ++++---- .../consul-container/test/catalog/catalog_test.go | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/integration/consul-container/go.mod b/test/integration/consul-container/go.mod index 8fde4fd94d8..7dfdb1771ea 100644 --- a/test/integration/consul-container/go.mod +++ b/test/integration/consul-container/go.mod @@ -9,7 +9,7 @@ require ( github.com/docker/go-connections v0.4.0 github.com/evanphx/json-patch v4.12.0+incompatible github.com/go-jose/go-jose/v3 v3.0.0 - github.com/hashicorp/consul v0.0.0-00010101000000-000000000000 + github.com/hashicorp/consul v1.16.1 github.com/hashicorp/consul/api v1.24.0 github.com/hashicorp/consul/envoyextensions v0.4.1 github.com/hashicorp/consul/proto-public v0.4.1 @@ -197,7 +197,7 @@ require ( go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/crypto v0.12.0 // indirect - golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b // indirect + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect golang.org/x/net v0.14.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/sync v0.3.0 // indirect @@ -205,7 +205,7 @@ require ( golang.org/x/term v0.11.0 // indirect golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.11.1 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect google.golang.org/api v0.126.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e // indirect diff --git a/test/integration/consul-container/go.sum b/test/integration/consul-container/go.sum index 026ae8db72f..b05dbbc8171 100644 --- a/test/integration/consul-container/go.sum +++ b/test/integration/consul-container/go.sum @@ -835,8 +835,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI= -golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1107,8 +1107,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.11.1 h1:ojD5zOW8+7dOGzdnNgersm8aPfcDjhMp12UfG93NIMc= -golang.org/x/tools v0.11.1/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/test/integration/consul-container/test/catalog/catalog_test.go b/test/integration/consul-container/test/catalog/catalog_test.go index a2f4216c1c4..01f798dbaad 100644 --- a/test/integration/consul-container/test/catalog/catalog_test.go +++ b/test/integration/consul-container/test/catalog/catalog_test.go @@ -12,7 +12,7 @@ import ( libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology" "github.com/hashicorp/consul/internal/catalog/catalogtest" - pbresource "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto-public/pbresource" ) func TestCatalog(t *testing.T) {