diff --git a/docs/cli-arguments.md b/docs/cli-arguments.md index 888ef5186c..2679b01901 100644 --- a/docs/cli-arguments.md +++ b/docs/cli-arguments.md @@ -9,6 +9,11 @@ Usage of ./nginx-ingress: If not set, certificate and key in the file "/etc/nginx/secrets/default" are used. If a secret is set, but the Ingress controller is not able to fetch it from Kubernetes API or a secret is not set and the file "/etc/nginx/secrets/default" does not exist, the Ingress controller will fail to start + -enable-leader-election + Enable Leader election to avoid multiple replicas of the controller reporting the status of Ingress resources -- only one replica will report status. See -report-ingress-status flag. + -external-service string + Specifies the name of the service with the type LoadBalancer through which the Ingress controller pods are exposed externally. + The external address of the service is used when reporting the status of Ingress resources. Requires -report-ingress-status. -health-status Add a location "/nginx-health" to the default server. The location responds with the 200 status code for any request. Useful for external health-checking of the Ingress controller @@ -35,8 +40,10 @@ Usage of ./nginx-ingress: -nginx-plus Enable support for NGINX Plus -proxy string - Use a proxy server to connect to Kubernetes API started by "kubectl proxy" command. For testing purposes only. - The Ingress controller does not start NGINX and does not write any generated NGINX configuration files to disk + Use a proxy server to connect to Kubernetes API started by "kubectl proxy" command. For testing purposes only. + The Ingress controller does not start NGINX and does not write any generated NGINX configuration files to disk + -report-ingress-status + Update the address field in the status of Ingresses resources. Requires the -external-service flag, or the 'external-status-address' key in the ConfigMap. -stderrthreshold value logs at or above this threshold go to stderr -use-ingress-class-only @@ -49,4 +56,4 @@ Usage of ./nginx-ingress: comma-separated list of pattern=N settings for file-filtered logging -watch-namespace string Namespace to watch for Ingress resources. By default the Ingress controller watches all namespaces -``` \ No newline at end of file +``` diff --git a/docs/installation.md b/docs/installation.md index 0b40e6c3df..cdce4710b8 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -137,6 +137,10 @@ Use the public IP of the load balancer to access the Ingress controller. To get ``` $ nslookup ``` +The public IP can be reported in the status of an ingress resource. To enable: +1. Run the Ingress controller with the `-report-ingress-status` [command-line argument](cli-arguments.md). +1. Configure the Ingress controller to use the `nginx-ingress` service name as the source of the IP with the arg `-external-service=nginx-ingress`. +1. See the [Report Ingress Status doc](report-ingress-status.md) for more details. Read more about the type LoadBalancer [here](https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer). diff --git a/docs/nginx-ingress-controllers.md b/docs/nginx-ingress-controllers.md index 8212ff69db..5e453becda 100644 --- a/docs/nginx-ingress-controllers.md +++ b/docs/nginx-ingress-controllers.md @@ -31,7 +31,7 @@ The table below summarizes the key difference between nginxinc/kubernetes-ingres | TLS certificate and key for the default server | Required as a command-line argument/ auto-generated | Required as a command-line argument | Required as a command-line argument | | Helm chart | Supported | Supported | Supported | | **Operational** | -| Reporting the IP address(es) of the Ingress controller into Ingress resources | Supported | Coming soon | Coming soon | +| Reporting the IP address(es) of the Ingress controller into Ingress resources | Supported | Supported | Supported | | Extended Status | Supported via a third-party module | Not supported | Supported | | Prometheus Integration | Supported | Not supported | Supported | | Dynamic reconfiguration of endpoints (no configuration reloading) | Supported with a third-party Lua module | Not supported | Supported | diff --git a/docs/report-ingress-status.md b/docs/report-ingress-status.md new file mode 100644 index 0000000000..b166c3115d --- /dev/null +++ b/docs/report-ingress-status.md @@ -0,0 +1,21 @@ +# Reporting Status of Ingress Resources + +An Ingress resource can have a status that includes the address (an IP address or a DNS name), through which the hosts of that Ingress resource are publicly accessible. +You can see the address in the output of the `kubectl get ingress` command, in the ADDRESS column, as shown below: + +``` +$ kubectl get ingresses +NAME HOSTS ADDRESS PORTS AGE +cafe-ingress cafe.example.com 12.13.23.123 80, 443 2m +``` + +The Ingress controller must be configured to report an Ingress status: + +1. Use the command-line flag `-report-ingress-status`. +2. Define a source for an external address. This can be either of: + 1. A user defined address, specified in the `external-status-address` [ConfigMap key](../examples/customization). + 2. A Service of the type LoadBalancer configured with an external IP or address and specified by the `-external-service` command-line flag. +3. If you're running multiple replicas of the Ingress controller, enable leader election with the `-enable-leader-election` flag +to ensure that only one replica updates an Ingress status. + +Notes: The Ingress controller does not clear the status of Ingress resources when it is being shut down. diff --git a/examples/customization/README.md b/examples/customization/README.md index 18cd467f8b..852b8d63e4 100644 --- a/examples/customization/README.md +++ b/examples/customization/README.md @@ -66,6 +66,7 @@ The table below summarizes all of the options. For some of them, there are examp | `nginx.com/health-checks-mandatory` | N/A | Configures active health checks as mandatory. | `False` | [Support for Active Health Checks](../health-checks). | | `nginx.com/health-checks-mandatory-queue` | N/A | When active health checks are mandatory, configures a queue for temporary storing incoming requests during the time when NGINX Plus is checking the health of the endpoints after a configuration reload. | `0` | [Support for Active Health Checks](../health-checks). | | `nginx.com/slow-start` | N/A | Sets the upstream server [slow-start period](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/#server-slow-start). By default, slow-start is activated after a server becomes [available](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/#passive-health-checks) or [healthy](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/#active-health-checks). To enable slow-start for newly added servers, configure [mandatory active health checks](../health-checks). | `"0s"` | | +| N/A | `external-status-address` | Sets the address to be reported in the status of Ingress resources. Requires the `-report-status` command-line argument. Overrides the `-external-service` argument. | N/A | [Report Ingress Status](../../docs/report-ingress-status.md). | ## Using ConfigMaps diff --git a/examples/openshift/nginx-ingress-rc.yaml b/examples/openshift/nginx-ingress-rc.yaml index 6dd7ba595d..5530dcb48e 100644 --- a/examples/openshift/nginx-ingress-rc.yaml +++ b/examples/openshift/nginx-ingress-rc.yaml @@ -28,6 +28,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name # Uncomment the lines below to enable extensive logging and/or customization of # NGINX configuration with configmaps args: diff --git a/examples/openshift/nginx-plus-ingress-rc.yaml b/examples/openshift/nginx-plus-ingress-rc.yaml index 90156f9994..444130b090 100644 --- a/examples/openshift/nginx-plus-ingress-rc.yaml +++ b/examples/openshift/nginx-plus-ingress-rc.yaml @@ -30,6 +30,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name # Uncomment the lines below to enable extensive logging and/or customization of # NGINX configuration with configmaps args: diff --git a/helm-chart/templates/controller-daemonset.yaml b/helm-chart/templates/controller-daemonset.yaml index f40c33dd1e..ed54366a5c 100644 --- a/helm-chart/templates/controller-daemonset.yaml +++ b/helm-chart/templates/controller-daemonset.yaml @@ -57,6 +57,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: {{- if .Values.controller.nginxplus }} - -nginx-plus diff --git a/helm-chart/templates/controller-deployment.yaml b/helm-chart/templates/controller-deployment.yaml index e080a4c61e..1c47ce53d1 100644 --- a/helm-chart/templates/controller-deployment.yaml +++ b/helm-chart/templates/controller-deployment.yaml @@ -43,6 +43,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: {{- if .Values.controller.nginxplus }} - -nginx-plus diff --git a/helm-chart/templates/rbac.yaml b/helm-chart/templates/rbac.yaml index d81eaa6e1f..79ea8acb44 100644 --- a/helm-chart/templates/rbac.yaml +++ b/helm-chart/templates/rbac.yaml @@ -15,17 +15,27 @@ rules: - services - endpoints verbs: + - get - list - watch - apiGroups: - "" resources: - - configmaps - secrets verbs: - get - list - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - update + - create - apiGroups: - "" resources: @@ -46,6 +56,12 @@ rules: verbs: - list - watch +- apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 diff --git a/install/daemon-set/nginx-ingress.yaml b/install/daemon-set/nginx-ingress.yaml index 8adea2f933..9b1e69e449 100644 --- a/install/daemon-set/nginx-ingress.yaml +++ b/install/daemon-set/nginx-ingress.yaml @@ -22,13 +22,20 @@ spec: hostPort: 80 - name: https containerPort: 443 - hostPort: 443 + hostPort: 443 env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: - -nginx-configmaps=$(POD_NAMESPACE)/nginx-config - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret #- -v=3 # Enables extensive logging. Useful for trooublshooting. + #- -report-ingress-status + #- -external-service=nginx-ingress + #- -enable-leader-election diff --git a/install/daemon-set/nginx-plus-ingress-with-prometheus.yaml b/install/daemon-set/nginx-plus-ingress-with-prometheus.yaml index 4152e3d440..4fa40d1d02 100644 --- a/install/daemon-set/nginx-plus-ingress-with-prometheus.yaml +++ b/install/daemon-set/nginx-plus-ingress-with-prometheus.yaml @@ -31,11 +31,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: - -nginx-plus - -nginx-configmaps=$(POD_NAMESPACE)/nginx-config - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret #- -v=3 # Enables extensive logging. Useful for trooublshooting. + #- -report-ingress-status + #- -external-service=nginx-ingress + #- -enable-leader-election - image: nginx/nginx-prometheus-exporter:0.1.0 name: nginx-prometheus-exporter ports: diff --git a/install/daemon-set/nginx-plus-ingress.yaml b/install/daemon-set/nginx-plus-ingress.yaml index 31b9ec09c7..3595f8dd16 100644 --- a/install/daemon-set/nginx-plus-ingress.yaml +++ b/install/daemon-set/nginx-plus-ingress.yaml @@ -28,8 +28,15 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: - -nginx-plus - -nginx-configmaps=$(POD_NAMESPACE)/nginx-config - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret #- -v=3 # Enables extensive logging. Useful for trooublshooting. + #- -report-ingress-status + #- -external-service=nginx-ingress + #- -enable-leader-election diff --git a/install/deployment/nginx-ingress.yaml b/install/deployment/nginx-ingress.yaml index 30dc245dd5..b1d5dc3f42 100644 --- a/install/deployment/nginx-ingress.yaml +++ b/install/deployment/nginx-ingress.yaml @@ -27,7 +27,14 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: - -nginx-configmaps=$(POD_NAMESPACE)/nginx-config - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret #- -v=3 # Enables extensive logging. Useful for trooublshooting. + #- -report-ingress-status + #- -external-service=nginx-ingress + #- -enable-leader-election diff --git a/install/deployment/nginx-plus-ingress-with-prometheus.yaml b/install/deployment/nginx-plus-ingress-with-prometheus.yaml index 11be868c5e..134c2e11d1 100644 --- a/install/deployment/nginx-plus-ingress-with-prometheus.yaml +++ b/install/deployment/nginx-plus-ingress-with-prometheus.yaml @@ -30,11 +30,18 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: - -nginx-plus - -nginx-configmaps=$(POD_NAMESPACE)/nginx-config - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret #- -v=3 # Enables extensive logging. Useful for trooublshooting. + #- -report-ingress-status + #- -external-service=nginx-ingress + #- -enable-leader-election - image: nginx/nginx-prometheus-exporter:0.1.0 name: nginx-prometheus-exporter ports: diff --git a/install/deployment/nginx-plus-ingress.yaml b/install/deployment/nginx-plus-ingress.yaml index f217a4abd9..2e5ee36a42 100644 --- a/install/deployment/nginx-plus-ingress.yaml +++ b/install/deployment/nginx-plus-ingress.yaml @@ -27,8 +27,15 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name args: - -nginx-plus - -nginx-configmaps=$(POD_NAMESPACE)/nginx-config - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret #- -v=3 # Enables extensive logging. Useful for trooublshooting. + #- -report-ingress-status + #- -external-service=nginx-ingress + #- -enable-leader-election diff --git a/install/rbac/rbac.yaml b/install/rbac/rbac.yaml index 446bf51ef2..58e1a3614b 100644 --- a/install/rbac/rbac.yaml +++ b/install/rbac/rbac.yaml @@ -8,18 +8,28 @@ rules: resources: - services - endpoints - verbs: + verbs: + - get - list - watch -- apiGroups: +- apiGroups: - "" resources: - - configmaps - secrets verbs: - get - list - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - update + - create - apiGroups: - "" resources: @@ -40,6 +50,12 @@ rules: verbs: - list - watch +- apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 @@ -51,5 +67,5 @@ subjects: namespace: nginx-ingress roleRef: kind: ClusterRole - name: nginx-ingress - apiGroup: rbac.authorization.k8s.io \ No newline at end of file + name: nginx-ingress + apiGroup: rbac.authorization.k8s.io diff --git a/nginx-controller/controller/controller.go b/nginx-controller/controller/controller.go index 738f3e01bf..4617edbba8 100644 --- a/nginx-controller/controller/controller.go +++ b/nginx-controller/controller/controller.go @@ -32,6 +32,7 @@ import ( scheme "k8s.io/client-go/kubernetes/scheme" core_v1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/record" "sort" @@ -48,53 +49,101 @@ const ( // LoadBalancerController watches Kubernetes API and // reconfigures NGINX via NginxController when needed type LoadBalancerController struct { - client kubernetes.Interface - ingController cache.Controller - svcController cache.Controller - endpController cache.Controller - cfgmController cache.Controller - secrController cache.Controller - ingLister StoreToIngressLister - svcLister cache.Store - endpLister StoreToEndpointLister - cfgmLister StoreToConfigMapLister - secrLister StoreToSecretLister - syncQueue *taskQueue - stopCh chan struct{} - cnf *nginx.Configurator - watchNginxConfigMaps bool - nginxPlus bool - recorder record.EventRecorder - defaultServerSecret string - ingressClass string - useIngressClassOnly bool + client kubernetes.Interface + ingController cache.Controller + svcController cache.Controller + endpController cache.Controller + cfgmController cache.Controller + secrController cache.Controller + ingLister StoreToIngressLister + svcLister cache.Store + endpLister StoreToEndpointLister + cfgmLister StoreToConfigMapLister + secrLister StoreToSecretLister + syncQueue *taskQueue + stopCh chan struct{} + cnf *nginx.Configurator + watchNginxConfigMaps bool + nginxPlus bool + recorder record.EventRecorder + defaultServerSecret string + ingressClass string + useIngressClassOnly bool + statusUpdater *StatusUpdater + leaderElector *leaderelection.LeaderElector + reportIngressStatus bool + leaderElectionEnabled bool } var keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc +// NewLoadBalancerControllerInput holds the input needed to call NewLoadBalancerController. +type NewLoadBalancerControllerInput struct { + KubeClient kubernetes.Interface + ResyncPeriod time.Duration + Namespace string + CNF *nginx.Configurator + NginxConfigMaps string + DefaultServerSecret string + NginxPlus bool + IngressClass string + UseIngressClassOnly bool + ExternalServiceName string + ControllerNamespace string + ReportIngressStatus bool + LeaderElectionEnabled bool +} + // NewLoadBalancerController creates a controller -func NewLoadBalancerController(kubeClient kubernetes.Interface, resyncPeriod time.Duration, namespace string, cnf *nginx.Configurator, nginxConfigMaps string, defaultServerSecret string, nginxPlus bool, ingressClass string, useIngressClassOnly bool) *LoadBalancerController { +func NewLoadBalancerController(input NewLoadBalancerControllerInput) *LoadBalancerController { lbc := LoadBalancerController{ - client: kubeClient, - stopCh: make(chan struct{}), - cnf: cnf, - defaultServerSecret: defaultServerSecret, - nginxPlus: nginxPlus, - ingressClass: ingressClass, - useIngressClassOnly: useIngressClassOnly, + client: input.KubeClient, + stopCh: make(chan struct{}), + cnf: input.CNF, + defaultServerSecret: input.DefaultServerSecret, + nginxPlus: input.NginxPlus, + ingressClass: input.IngressClass, + useIngressClassOnly: input.UseIngressClassOnly, + reportIngressStatus: input.ReportIngressStatus, + leaderElectionEnabled: input.LeaderElectionEnabled, } eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&core_v1.EventSinkImpl{ - Interface: core_v1.New(kubeClient.Core().RESTClient()).Events(""), + Interface: core_v1.New(input.KubeClient.Core().RESTClient()).Events(""), }) lbc.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, api_v1.EventSource{Component: "nginx-ingress-controller"}) lbc.syncQueue = NewTaskQueue(lbc.sync) - glog.V(3).Infof("Nginx Ingress Controller has class: %v", ingressClass) + glog.V(3).Infof("Nginx Ingress Controller has class: %v", input.IngressClass) + + lbc.statusUpdater = &StatusUpdater{ + client: input.KubeClient, + namespace: input.ControllerNamespace, + externalServiceName: input.ExternalServiceName, + } + + if input.ReportIngressStatus && input.LeaderElectionEnabled { + leaderCallbacks := leaderelection.LeaderCallbacks{ + OnStartedLeading: func(stop <-chan struct{}) { + glog.V(3).Info("started leading, updating ingress status") + ingresses, mergeableIngresses := lbc.getManagedIngresses() + err := lbc.statusUpdater.UpdateManagedAndMergeableIngresses(ingresses, mergeableIngresses) + if err != nil { + glog.V(3).Infof("error updating status when starting leading: %v", err) + } + }, + } + + var err error + lbc.leaderElector, err = NewLeaderElector(input.KubeClient, leaderCallbacks, input.ControllerNamespace) + if err != nil { + glog.V(3).Infof("Error starting LeaderElection: %v", err) + } + } ingHandlers := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -148,10 +197,11 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, resyncPeriod tim }, UpdateFunc: func(old, cur interface{}) { curIng := cur.(*extensions.Ingress) + oldIng := old.(*extensions.Ingress) if !lbc.isNginxIngress(curIng) { return } - if !reflect.DeepEqual(old, cur) { + if hasChanges(oldIng, curIng) { if isMinion(curIng) { master, err := lbc.findMasterForMinion(curIng) if err != nil { @@ -168,12 +218,16 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, resyncPeriod tim }, } lbc.ingLister.Store, lbc.ingController = cache.NewInformer( - cache.NewListWatchFromClient(lbc.client.Extensions().RESTClient(), "ingresses", namespace, fields.Everything()), - &extensions.Ingress{}, resyncPeriod, ingHandlers) + cache.NewListWatchFromClient(lbc.client.Extensions().RESTClient(), "ingresses", input.Namespace, fields.Everything()), + &extensions.Ingress{}, input.ResyncPeriod, ingHandlers) svcHandlers := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { addSvc := obj.(*api_v1.Service) + if lbc.isExternalServiceForStatus(addSvc) { + lbc.syncQueue.enqueue(addSvc) + return + } glog.V(3).Infof("Adding service: %v", addSvc.Name) lbc.enqueueIngressForService(addSvc) }, @@ -191,20 +245,30 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, resyncPeriod tim return } } + if lbc.isExternalServiceForStatus(remSvc) { + lbc.syncQueue.enqueue(remSvc) + return + } + glog.V(3).Infof("Removing service: %v", remSvc.Name) lbc.enqueueIngressForService(remSvc) + }, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { - glog.V(3).Infof("Service %v changed, syncing", - cur.(*api_v1.Service).Name) - lbc.enqueueIngressForService(cur.(*api_v1.Service)) + curSvc := cur.(*api_v1.Service) + if lbc.isExternalServiceForStatus(curSvc) { + lbc.syncQueue.enqueue(curSvc) + return + } + glog.V(3).Infof("Service %v changed, syncing", curSvc.Name) + lbc.enqueueIngressForService(curSvc) } }, } lbc.svcLister, lbc.svcController = cache.NewInformer( - cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "services", namespace, fields.Everything()), - &api_v1.Service{}, resyncPeriod, svcHandlers) + cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "services", input.Namespace, fields.Everything()), + &api_v1.Service{}, input.ResyncPeriod, svcHandlers) endpHandlers := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -238,8 +302,8 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, resyncPeriod tim }, } lbc.endpLister.Store, lbc.endpController = cache.NewInformer( - cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "endpoints", namespace, fields.Everything()), - &api_v1.Endpoints{}, resyncPeriod, endpHandlers) + cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "endpoints", input.Namespace, fields.Everything()), + &api_v1.Endpoints{}, input.ResyncPeriod, endpHandlers) secrHandlers := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -287,11 +351,11 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, resyncPeriod tim } lbc.secrLister.Store, lbc.secrController = cache.NewInformer( - cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "secrets", namespace, fields.Everything()), - &api_v1.Secret{}, resyncPeriod, secrHandlers) + cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "secrets", input.Namespace, fields.Everything()), + &api_v1.Secret{}, input.ResyncPeriod, secrHandlers) - if nginxConfigMaps != "" { - nginxConfigMapsNS, nginxConfigMapsName, err := ParseNamespaceName(nginxConfigMaps) + if input.NginxConfigMaps != "" { + nginxConfigMapsNS, nginxConfigMapsName, err := ParseNamespaceName(input.NginxConfigMaps) if err != nil { glog.Warning(err) } else { @@ -337,15 +401,25 @@ func NewLoadBalancerController(kubeClient kubernetes.Interface, resyncPeriod tim } lbc.cfgmLister.Store, lbc.cfgmController = cache.NewInformer( cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "configmaps", nginxConfigMapsNS, fields.Everything()), - &api_v1.ConfigMap{}, resyncPeriod, cfgmHandlers) + &api_v1.ConfigMap{}, input.ResyncPeriod, cfgmHandlers) } } return &lbc } +// hasChanges ignores Status or ResourceVersion changes +func hasChanges(oldIng *extensions.Ingress, curIng *extensions.Ingress) bool { + oldIng.Status.LoadBalancer.Ingress = curIng.Status.LoadBalancer.Ingress + oldIng.ResourceVersion = curIng.ResourceVersion + return reflect.DeepEqual(oldIng, curIng) +} + // Run starts the loadbalancer controller func (lbc *LoadBalancerController) Run() { + if lbc.leaderElector != nil { + go lbc.leaderElector.Run() + } go lbc.svcController.Run(lbc.stopCh) go lbc.endpController.Run(lbc.stopCh) go lbc.secrController.Run(lbc.stopCh) @@ -434,43 +508,18 @@ func (lbc *LoadBalancerController) syncCfgm(task Task) { if cfgmExists { cfgm := obj.(*api_v1.ConfigMap) cfg = nginx.ParseConfigMap(cfgm, lbc.nginxPlus) + + lbc.statusUpdater.SaveStatusFromExternalStatus(cfgm.Data["external-status-address"]) } - mergeableIngresses := make(map[string]*nginx.MergeableIngresses) - var ingExes []*nginx.IngressEx - ings, _ := lbc.ingLister.List() - for i := range ings.Items { - if !lbc.isNginxIngress(&ings.Items[i]) { - continue - } - if isMinion(&ings.Items[i]) { - master, err := lbc.findMasterForMinion(&ings.Items[i]) - if err != nil { - glog.Errorf("Ignoring Ingress %v(Minion): %v", ings.Items[i], err) - continue - } - if !lbc.cnf.HasIngress(master) { - continue - } - if _, exists := mergeableIngresses[master.Name]; !exists { - mergeableIngress, err := lbc.createMergableIngresses(master) - if err != nil { - glog.Errorf("Ignoring Ingress %v(Master): %v", master, err) - continue - } - mergeableIngresses[master.Name] = mergeableIngress - } - continue - } - if !lbc.cnf.HasIngress(&ings.Items[i]) { - continue - } - ingEx, err := lbc.createIngress(&ings.Items[i]) + ingresses, mergeableIngresses := lbc.getManagedIngresses() + ingExes := lbc.ingressesToIngressExes(ingresses) + + if lbc.reportStatusEnabled() { + err = lbc.statusUpdater.UpdateManagedAndMergeableIngresses(ingresses, mergeableIngresses) if err != nil { - continue + glog.V(3).Infof("error updating status on ConfigMap change: %v", err) } - - ingExes = append(ingExes, ingEx) } if err := lbc.cnf.UpdateConfig(cfg, ingExes, mergeableIngresses); err != nil { @@ -509,6 +558,55 @@ func (lbc *LoadBalancerController) syncCfgm(task Task) { } } +// getManagedIngresses gets Ingress resources that the IC is currently responsible for +func (lbc *LoadBalancerController) getManagedIngresses() ([]extensions.Ingress, map[string]*nginx.MergeableIngresses) { + mergeableIngresses := make(map[string]*nginx.MergeableIngresses) + var managedIngresses []extensions.Ingress + ings, _ := lbc.ingLister.List() + for i := range ings.Items { + ing := ings.Items[i] + if !lbc.isNginxIngress(&ing) { + continue + } + if isMinion(&ing) { + master, err := lbc.findMasterForMinion(&ing) + if err != nil { + glog.Errorf("Ignoring Ingress %v(Minion): %v", ing, err) + continue + } + if !lbc.cnf.HasIngress(master) { + continue + } + if _, exists := mergeableIngresses[master.Name]; !exists { + mergeableIngress, err := lbc.createMergableIngresses(master) + if err != nil { + glog.Errorf("Ignoring Ingress %v(Master): %v", master, err) + continue + } + mergeableIngresses[master.Name] = mergeableIngress + } + continue + } + if !lbc.cnf.HasIngress(&ing) { + continue + } + managedIngresses = append(managedIngresses, ing) + } + return managedIngresses, mergeableIngresses +} + +func (lbc *LoadBalancerController) ingressesToIngressExes(ings []extensions.Ingress) []*nginx.IngressEx { + var ingExes []*nginx.IngressEx + for _, ing := range ings { + ingEx, err := lbc.createIngress(&ing) + if err != nil { + continue + } + ingExes = append(ingExes, ingEx) + } + return ingExes +} + func (lbc *LoadBalancerController) sync(task Task) { glog.V(3).Infof("Syncing %v", task.Key) @@ -523,6 +621,9 @@ func (lbc *LoadBalancerController) sync(task Task) { return case Secret: lbc.syncSecret(task) + return + case Service: + lbc.syncExternalService(task) } } @@ -551,6 +652,12 @@ func (lbc *LoadBalancerController) syncIng(task Task) { if err != nil { lbc.syncQueue.requeueAfter(task, err, 5*time.Second) lbc.recorder.Eventf(ing, api_v1.EventTypeWarning, "Rejected", "%v was rejected: %v", key, err) + if lbc.reportStatusEnabled() { + err = lbc.statusUpdater.ClearIngressStatus(*ing) + if err != nil { + glog.V(3).Infof("error clearing ing status: %v", err) + } + } return } err = lbc.cnf.AddOrUpdateMergableIngress(mergeableIngExs) @@ -565,13 +672,24 @@ func (lbc *LoadBalancerController) syncIng(task Task) { lbc.recorder.Eventf(ing, api_v1.EventTypeNormal, "AddedOrUpdated", "Configuration for %v/%v(Minion) was added or updated", minion.Ingress.Namespace, minion.Ingress.Name) } } + if lbc.reportStatusEnabled() { + err = lbc.statusUpdater.UpdateMergableIngresses(mergeableIngExs) + if err != nil { + glog.V(3).Infof("error updating ing status: %v", err) + } + } return } - ingEx, err := lbc.createIngress(ing) if err != nil { lbc.syncQueue.requeueAfter(task, err, 5*time.Second) lbc.recorder.Eventf(ing, api_v1.EventTypeWarning, "Rejected", "%v was rejected: %v", key, err) + if lbc.reportStatusEnabled() { + err = lbc.statusUpdater.ClearIngressStatus(*ing) + if err != nil { + glog.V(3).Infof("error clearing ing status: %v", err) + } + } return } @@ -581,9 +699,56 @@ func (lbc *LoadBalancerController) syncIng(task Task) { } else { lbc.recorder.Eventf(ing, api_v1.EventTypeNormal, "AddedOrUpdated", "Configuration for %v was added or updated", key) } + if lbc.reportStatusEnabled() { + err = lbc.statusUpdater.UpdateIngressStatus(*ing) + if err != nil { + glog.V(3).Infof("error updating ing status: %v", err) + } + } + } +} + +// syncExternalService does not sync all services. +// We only watch the Service specified by the external-service flag. +func (lbc *LoadBalancerController) syncExternalService(task Task) { + key := task.Key + obj, exists, err := lbc.svcLister.GetByKey(key) + if err != nil { + lbc.syncQueue.requeue(task, err) + return + } + statusIngs, mergableIngs := lbc.getManagedIngresses() + if !exists { + // service got removed + lbc.statusUpdater.ClearStatusFromExternalService() + } else { + // service added or updated + lbc.statusUpdater.SaveStatusFromExternalService(obj.(*api_v1.Service)) + } + if lbc.reportStatusEnabled() { + err = lbc.statusUpdater.UpdateManagedAndMergeableIngresses(statusIngs, mergableIngs) + if err != nil { + glog.Errorf("error updating ingress status in syncExternalService: %v", err) + } } } +// isExternalServiceForStatus matches the service specified by the external-service arg +func (lbc *LoadBalancerController) isExternalServiceForStatus(svc *api_v1.Service) bool { + return lbc.statusUpdater.namespace == svc.Namespace && lbc.statusUpdater.externalServiceName == svc.Name +} + +// reportStatusEnabled determines if we should attempt to report status +func (lbc *LoadBalancerController) reportStatusEnabled() bool { + if lbc.reportIngressStatus { + if lbc.leaderElectionEnabled { + return lbc.leaderElector != nil && lbc.leaderElector.IsLeader() + } + return true + } + return false +} + func (lbc *LoadBalancerController) syncSecret(task Task) { key := task.Key obj, secrExists, err := lbc.secrLister.Store.GetByKey(key) diff --git a/nginx-controller/controller/leader.go b/nginx-controller/controller/leader.go new file mode 100644 index 0000000000..64b1a7cd0b --- /dev/null +++ b/nginx-controller/controller/leader.go @@ -0,0 +1,43 @@ +package controller + +import ( + "os" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" +) + +// NewLeaderElector creates a new LeaderElection and returns the Elector. +func NewLeaderElector(client kubernetes.Interface, callbacks leaderelection.LeaderCallbacks, namespace string) (*leaderelection.LeaderElector, error) { + podName := os.Getenv("POD_NAME") + + broadcaster := record.NewBroadcaster() + hostname, _ := os.Hostname() + + source := v1.EventSource{Component: "nginx-ingress-leader-elector", Host: hostname} + recorder := broadcaster.NewRecorder(scheme.Scheme, source) + + lock := resourcelock.ConfigMapLock{ + ConfigMapMeta: metav1.ObjectMeta{Namespace: namespace, Name: "leader-election"}, + Client: client.CoreV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: podName, + EventRecorder: recorder, + }, + } + + ttl := 30 * time.Second + return leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ + Lock: &lock, + LeaseDuration: ttl, + RenewDeadline: ttl / 2, + RetryPeriod: ttl / 4, + Callbacks: callbacks, + }) +} diff --git a/nginx-controller/controller/status.go b/nginx-controller/controller/status.go new file mode 100644 index 0000000000..8609326a98 --- /dev/null +++ b/nginx-controller/controller/status.go @@ -0,0 +1,202 @@ +package controller + +import ( + "fmt" + "net" + "reflect" + + "github.com/golang/glog" + "github.com/nginxinc/kubernetes-ingress/nginx-controller/nginx" + api_v1 "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" +) + +// StatusUpdater reports Ingress status information via the kubernetes +// API, primarily the IP or host of the LoadBalancer Service exposing the +// Ingress Controller, or an external IP specified in the ConfigMap. +type StatusUpdater struct { + client kubernetes.Interface + namespace string + externalServiceName string + externalStatusAddress string + externalServiceAddresses []string + status []api_v1.LoadBalancerIngress +} + +// UpdateManagedAndMergeableIngresses handles the full return format of LoadBalancerController.getManagedIngresses +func (su *StatusUpdater) UpdateManagedAndMergeableIngresses(managedIngresses []v1beta1.Ingress, mExes map[string]*nginx.MergeableIngresses) error { + ings := []v1beta1.Ingress{} + ings = append(ings, managedIngresses...) + for _, mEx := range mExes { + for _, minion := range mEx.Minions { + ings = append(ings, *minion.Ingress) + } + } + return su.BulkUpdateIngressStatus(ings) +} + +// UpdateMergableIngresses is a convience passthru to update Ingresses with our nginx.MergableIngresses type +func (su *StatusUpdater) UpdateMergableIngresses(mergableIngresses *nginx.MergeableIngresses) error { + ings := []v1beta1.Ingress{} + ingExes := []*nginx.IngressEx{} + + ingExes = append(ingExes, mergableIngresses.Master) + ingExes = append(ingExes, mergableIngresses.Minions...) + + for _, ingEx := range ingExes { + ings = append(ings, *ingEx.Ingress) + } + return su.BulkUpdateIngressStatus(ings) +} + +// ClearIngressStatus clears the Ingress status. +func (su *StatusUpdater) ClearIngressStatus(ing v1beta1.Ingress) error { + return su.updateIngressWithStatus(ing, []api_v1.LoadBalancerIngress{}) +} + +// UpdateIngressStatus updates the status on the selected Ingress. +func (su *StatusUpdater) UpdateIngressStatus(ing v1beta1.Ingress) error { + return su.updateIngressWithStatus(ing, su.status) +} + +// updateIngressWithStatus sets the provided status on the selected Ingress. +func (su *StatusUpdater) updateIngressWithStatus(ing v1beta1.Ingress, status []api_v1.LoadBalancerIngress) error { + if reflect.DeepEqual(ing.Status.LoadBalancer.Ingress, status) { + return nil + } + // Objects from the LoadBalancerController.ingLister.Store are retrieved by reference + // and are not safe to modify. + ingCopy := ing.DeepCopy() + ingCopy.Status.LoadBalancer.Ingress = status + clientIngress := su.client.ExtensionsV1beta1().Ingresses(ingCopy.Namespace) + _, err := clientIngress.UpdateStatus(ingCopy) + if err != nil { + glog.V(3).Infof("error setting ingress status: %v", err) + err = su.retryStatusUpdate(clientIngress, ingCopy) + if err != nil { + glog.V(3).Infof("error retrying status update: %v", err) + return err + } + } + glog.V(3).Infof("updated status for ing: %v %v", ing.Namespace, ing.Name) + return nil +} + +// BulkUpdateIngressStatus sets the status field on the selected Ingresses, specifically +// the External IP field. +func (su *StatusUpdater) BulkUpdateIngressStatus(ings []v1beta1.Ingress) error { + if len(ings) < 1 { + glog.V(3).Info("no ingresses to update") + return nil + } + if len(su.status) < 1 { + glog.V(3).Info("no status to set") + return nil + } + failed := false + for _, ing := range ings { + err := su.updateIngressWithStatus(ing, su.status) + if err != nil { + failed = true + } + } + if failed { + return fmt.Errorf("not all Ingresses updated") + } + return nil +} + +// retryStatusUpdate fetches a fresh copy of the Ingress from the k8s API, checks if it still needs to be +// updated, and then attempts to update. We often need to fetch fresh copies due to the +// k8s API using ResourceVersion to stop updates on stale items. +func (su *StatusUpdater) retryStatusUpdate(clientIngress extensionsv1beta1.IngressInterface, ingCopy *v1beta1.Ingress) error { + apiIng, err := clientIngress.Get(ingCopy.Name, metav1.GetOptions{}) + if err != nil { + glog.V(3).Infof("error getting ingress resource: %v", err) + return err + } + if !reflect.DeepEqual(ingCopy.Status.LoadBalancer, apiIng.Status.LoadBalancer) { + glog.V(3).Infof("retrying update status for ingress: %v, %v", ingCopy.Namespace, ingCopy.Name) + apiIng.Status.LoadBalancer = ingCopy.Status.LoadBalancer + _, err := clientIngress.UpdateStatus(apiIng) + if err != nil { + glog.V(3).Infof("update retry failed: %v", err) + } + return err + } + return nil +} + +// saveStatus saves the string array of IPs or addresses that we will set as status +// on all the Ingresses that we manage. +func (su *StatusUpdater) saveStatus(ips []string) { + statusIngs := []api_v1.LoadBalancerIngress{} + for _, ip := range ips { + if net.ParseIP(ip) == nil { + statusIngs = append(statusIngs, api_v1.LoadBalancerIngress{Hostname: ip}) + } else { + statusIngs = append(statusIngs, api_v1.LoadBalancerIngress{IP: ip}) + } + } + su.status = statusIngs +} + +func getExternalServiceAddress(svc *api_v1.Service) []string { + addresses := []string{} + if svc == nil { + return addresses + } + + if svc.Spec.Type == api_v1.ServiceTypeExternalName { + addresses = append(addresses, svc.Spec.ExternalName) + return addresses + } + + for _, ip := range svc.Status.LoadBalancer.Ingress { + if ip.IP == "" { + addresses = append(addresses, ip.Hostname) + } else { + addresses = append(addresses, ip.IP) + } + } + addresses = append(addresses, svc.Spec.ExternalIPs...) + return addresses +} + +// SaveStatusFromExternalStatus saves the status from a string. +// For use with the external-status-address ConfigMap setting. +// This method does not update ingress status - StatusUpdater.UpdateIngressStatus must be called separately. +func (su *StatusUpdater) SaveStatusFromExternalStatus(externalStatusAddress string) { + su.externalStatusAddress = externalStatusAddress + if externalStatusAddress == "" { + // if external-status-address was removed from configMap, fall back on + // external service if it exists + if len(su.externalServiceAddresses) > 0 { + su.saveStatus(su.externalServiceAddresses) + return + } + } + ips := []string{} + ips = append(ips, su.externalStatusAddress) + su.saveStatus(ips) +} + +// ClearStatusFromExternalService clears the saved status from the External Service +func (su *StatusUpdater) ClearStatusFromExternalService() { + su.SaveStatusFromExternalService(nil) +} + +// SaveStatusFromExternalService saves the external IP or address from the service. +// This method does not update ingress status - UpdateIngressStatus must be called separately. +func (su *StatusUpdater) SaveStatusFromExternalService(svc *api_v1.Service) { + ips := getExternalServiceAddress(svc) + su.externalServiceAddresses = ips + if su.externalStatusAddress != "" { + glog.V(3).Info("skipping external service address - external-status-address is set and takes precedence") + return + } + su.saveStatus(ips) +} diff --git a/nginx-controller/controller/utils.go b/nginx-controller/controller/utils.go index e3535cc01c..6e7bbcb3e4 100644 --- a/nginx-controller/controller/utils.go +++ b/nginx-controller/controller/utils.go @@ -120,6 +120,8 @@ const ( ConfigMap // Secret resource Secret + // Service resource + Service ) // Task is an element of a taskQueue @@ -140,6 +142,8 @@ func NewTask(key string, obj interface{}) (Task, error) { k = ConfigMap case *api_v1.Secret: k = Secret + case *api_v1.Service: + k = Service default: return Task{}, fmt.Errorf("Unknow type: %v", t) } diff --git a/nginx-controller/main.go b/nginx-controller/main.go index b55d56085b..25b7cfa356 100644 --- a/nginx-controller/main.go +++ b/nginx-controller/main.go @@ -68,6 +68,16 @@ var ( ingressTemplatePath = flag.String("ingress-template-path", "", `Path to the ingress NGINX configuration template for an ingress resource. (default for NGINX "nginx.ingress.tmpl"; default for NGINX Plus "nginx-plus.ingress.tmpl")`) + + externalService = flag.String("external-service", "", + `Specifies the name of the service with the type LoadBalancer through which the Ingress controller pods are exposed externally. +The external address of the service is used when reporting the status of Ingress resources. Requires -report-ingress-status.`) + + reportIngressStatus = flag.Bool("report-ingress-status", false, + "Update the address field in the status of Ingresses resources. Requires the -external-service flag, or the 'external-status-address' key in the ConfigMap.") + + leaderElectionEnabled = flag.Bool("enable-leader-election", false, + "Enable Leader election to avoid multiple replicas of the controller reporting the status of Ingress resources -- only one replica will report status. See -report-ingress-status flag.") ) func main() { @@ -203,7 +213,24 @@ func main() { } cnf := nginx.NewConfigurator(ngxc, cfg, nginxAPI, templateExecutor) - lbc := controller.NewLoadBalancerController(kubeClient, 30*time.Second, *watchNamespace, cnf, *nginxConfigMaps, *defaultServerSecret, *nginxPlus, *ingressClass, *useIngressClassOnly) + controllerNamespace := os.Getenv("POD_NAMESPACE") + + lbcInput := controller.NewLoadBalancerControllerInput{ + KubeClient: kubeClient, + ResyncPeriod: 30 * time.Second, + Namespace: *watchNamespace, + CNF: cnf, + NginxConfigMaps: *nginxConfigMaps, + DefaultServerSecret: *defaultServerSecret, + NginxPlus: *nginxPlus, + IngressClass: *ingressClass, + UseIngressClassOnly: *useIngressClassOnly, + ExternalServiceName: *externalService, + ControllerNamespace: controllerNamespace, + ReportIngressStatus: *reportIngressStatus, + LeaderElectionEnabled: *leaderElectionEnabled, + } + lbc := controller.NewLoadBalancerController(lbcInput) go handleTermination(lbc, ngxc, nginxDone) lbc.Run()