Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(helm): update chart cilium to 1.16.0 #21

Merged
merged 1 commit into from
Aug 4, 2024

Conversation

renovate[bot]
Copy link
Contributor

@renovate renovate bot commented Jul 27, 2024

Mend Renovate

This PR contains the following updates:

Package Update Change
cilium (source) minor 1.15.7 -> 1.16.0

Release Notes

cilium/cilium (cilium)

v1.16.0

Compare Source


Configuration

📅 Schedule: Branch creation - "every weekend" (UTC), Automerge - At any time (no schedule defined).

🚦 Automerge: Disabled by config. Please merge this manually once you are satisfied.

Rebasing: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox.

🔕 Ignore: Close this PR and you won't be reminded about these updates again.


  • If you want to rebase/retry this PR, check this box

This PR was generated by Mend Renovate. View the repository job log.

Copy link

--- kubernetes/apps/kube-system/cilium/app Kustomization: flux-system/cilium HelmRelease: kube-system/cilium

+++ kubernetes/apps/kube-system/cilium/app Kustomization: flux-system/cilium HelmRelease: kube-system/cilium

@@ -13,13 +13,13 @@

     spec:
       chart: cilium
       sourceRef:
         kind: HelmRepository
         name: cilium
         namespace: flux-system
-      version: 1.15.7
+      version: 1.16.0
   install:
     remediation:
       retries: 3
   interval: 30m
   upgrade:
     cleanupOnFail: true

Copy link

--- HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-dashboard

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-dashboard

@@ -4703,27 +4703,27 @@

           ],
           "spaceLength": 10,
           "stack": false,
           "steppedLine": false,
           "targets": [
             {
-              "expr": "sum(rate(cilium_policy_l7_denied_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m]))",
+              "expr": "sum(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"denied\"}[1m]))",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "denied",
               "refId": "A"
             },
             {
-              "expr": "sum(rate(cilium_policy_l7_forwarded_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m]))",
+              "expr": "sum(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"forwarded\"}[1m]))",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "forwarded",
               "refId": "B"
             },
             {
-              "expr": "sum(rate(cilium_policy_l7_received_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m]))",
+              "expr": "sum(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"received\"}[1m]))",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "received",
               "refId": "C"
             }
           ],
@@ -4869,13 +4869,13 @@

           }
         },
         {
           "aliasColors": {
             "Max per node processingTime": "#e24d42",
             "Max per node upstreamTime": "#58140c",
-            "avg(cilium_policy_l7_parse_errors_total{pod=~\"cilium.*\"})": "#bf1b00",
+            "avg(cilium_policy_l7_total{pod=~\"cilium.*\", rule=\"parse_errors\"})": "#bf1b00",
             "parse errors": "#bf1b00"
           },
           "bars": true,
           "dashLength": 10,
           "dashes": false,
           "datasource": {
@@ -4928,13 +4928,13 @@

             },
             {
               "alias": "Max per node upstreamTime",
               "yaxis": 2
             },
             {
-              "alias": "avg(cilium_policy_l7_parse_errors_total{pod=~\"cilium.*\"})",
+              "alias": "avg(cilium_policy_l7_total{pod=~\"cilium.*\", rule=\"parse_errors\"})",
               "yaxis": 2
             },
             {
               "alias": "parse errors",
               "yaxis": 2
             }
@@ -4949,13 +4949,13 @@

               "interval": "",
               "intervalFactor": 1,
               "legendFormat": "{{scope}}",
               "refId": "A"
             },
             {
-              "expr": "avg(cilium_policy_l7_parse_errors_total{k8s_app=\"cilium\", pod=~\"$pod\"}) by (pod)",
+              "expr": "avg(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"parse_errors\"}) by (pod)",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "parse errors",
               "refId": "B"
             }
           ],
@@ -5307,13 +5307,13 @@

               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "Max {{scope}}",
               "refId": "B"
             },
             {
-              "expr": "max(rate(cilium_policy_l7_parse_errors_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m])) by (pod)",
+              "expr": "max(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"parse_errors\"}[1m])) by (pod)",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "parse errors",
               "refId": "A"
             }
           ],
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-config

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-config

@@ -7,20 +7,18 @@

 data:
   identity-allocation-mode: crd
   identity-heartbeat-timeout: 30m0s
   identity-gc-interval: 15m0s
   cilium-endpoint-gc-interval: 5m0s
   nodes-gc-interval: 5m0s
-  skip-cnp-status-startup-clean: 'false'
   debug: 'false'
   debug-verbose: ''
   enable-policy: default
   policy-cidr-match-mode: ''
   prometheus-serve-addr: :9962
   controller-group-metrics: write-cni-file sync-host-ips sync-lb-maps-with-k8s-services
-  proxy-prometheus-port: '9964'
   operator-prometheus-serve-addr: :9963
   enable-metrics: 'true'
   enable-ipv4: 'true'
   enable-ipv6: 'false'
   custom-cni-conf: 'false'
   enable-bpf-clock-probe: 'false'
@@ -28,76 +26,86 @@

   monitor-aggregation-interval: 5s
   monitor-aggregation-flags: all
   bpf-map-dynamic-size-ratio: '0.0025'
   bpf-policy-map-max: '16384'
   bpf-lb-map-max: '65536'
   bpf-lb-external-clusterip: 'false'
+  bpf-events-drop-enabled: 'true'
+  bpf-events-policy-verdict-enabled: 'true'
+  bpf-events-trace-enabled: 'true'
   preallocate-bpf-maps: 'false'
-  sidecar-istio-proxy-image: cilium/istio_proxy
   cluster-name: pythonesque-cluster
   cluster-id: '1'
   routing-mode: native
   service-no-backend-response: reject
   enable-l7-proxy: 'true'
   enable-ipv4-masquerade: 'true'
   enable-ipv4-big-tcp: 'false'
   enable-ipv6-big-tcp: 'false'
   enable-ipv6-masquerade: 'true'
+  enable-tcx: 'true'
+  datapath-mode: veth
   enable-bpf-masquerade: 'false'
   enable-masquerade-to-route-source: 'false'
   enable-xt-socket-fallback: 'true'
   install-no-conntrack-iptables-rules: 'false'
   auto-direct-node-routes: 'true'
+  direct-routing-skip-unreachable: 'false'
   enable-local-redirect-policy: 'true'
   ipv4-native-routing-cidr: 10.69.0.0/16
+  enable-runtime-device-detection: 'true'
   kube-proxy-replacement: 'true'
   kube-proxy-replacement-healthz-bind-address: 0.0.0.0:10256
   bpf-lb-sock: 'false'
+  bpf-lb-sock-terminate-pod-connections: 'false'
+  nodeport-addresses: ''
   enable-health-check-nodeport: 'true'
   enable-health-check-loadbalancer-ip: 'false'
   node-port-bind-protection: 'true'
   enable-auto-protect-node-port-range: 'true'
   bpf-lb-mode: snat
   bpf-lb-algorithm: maglev
   bpf-lb-acceleration: disabled
   enable-svc-source-range-check: 'true'
   enable-l2-neigh-discovery: 'true'
   arping-refresh-period: 30s
+  k8s-require-ipv4-pod-cidr: 'false'
+  k8s-require-ipv6-pod-cidr: 'false'
   enable-endpoint-routes: 'true'
   enable-k8s-networkpolicy: 'true'
   write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
   cni-exclusive: 'false'
   cni-log-file: /var/run/cilium/cilium-cni.log
   enable-endpoint-health-checking: 'true'
   enable-health-checking: 'true'
   enable-well-known-identities: 'false'
-  enable-remote-node-identity: 'true'
+  enable-node-selector-labels: 'false'
   synchronize-k8s-nodes: 'true'
   operator-api-serve-addr: 127.0.0.1:9234
   ipam: kubernetes
   ipam-cilium-node-update-rate: 15s
   egress-gateway-reconciliation-trigger-interval: 1s
   enable-vtep: 'false'
   vtep-endpoint: ''
   vtep-cidr: ''
   vtep-mask: ''
   vtep-mac: ''
   enable-l2-announcements: 'true'
-  enable-bgp-control-plane: 'false'
   procfs: /host/proc
   bpf-root: /sys/fs/bpf
   cgroup-root: /sys/fs/cgroup
   enable-k8s-terminating-endpoint: 'true'
   enable-sctp: 'false'
   k8s-client-qps: '10'
   k8s-client-burst: '20'
   remove-cilium-node-taints: 'true'
   set-cilium-node-taints: 'true'
   set-cilium-is-up-condition: 'true'
   unmanaged-pod-watcher-interval: '15'
   dnsproxy-enable-transparent-mode: 'true'
+  dnsproxy-socket-linger-timeout: '10'
   tofqdns-dns-reject-response-code: refused
   tofqdns-enable-dns-compression: 'true'
   tofqdns-endpoint-max-ip-per-hostname: '50'
   tofqdns-idle-connection-grace-period: 0s
   tofqdns-max-deferred-connection-deletes: '10000'
   tofqdns-proxy-response-max-delay: 100ms
@@ -109,9 +117,13 @@

   proxy-xff-num-trusted-hops-ingress: '0'
   proxy-xff-num-trusted-hops-egress: '0'
   proxy-connect-timeout: '2'
   proxy-max-requests-per-connection: '0'
   proxy-max-connection-duration-seconds: '0'
   proxy-idle-timeout-seconds: '60'
-  external-envoy-proxy: 'false'
+  external-envoy-proxy: 'true'
+  envoy-base-id: '0'
+  envoy-keep-cap-netbindservice: 'false'
   max-connected-clusters: '255'
+  clustermesh-enable-endpoint-sync: 'false'
+  clustermesh-enable-mcs-api: 'false'
 
--- HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium

+++ HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium

@@ -106,14 +106,12 @@

   verbs:
   - get
   - update
 - apiGroups:
   - cilium.io
   resources:
-  - ciliumnetworkpolicies/status
-  - ciliumclusterwidenetworkpolicies/status
   - ciliumendpoints/status
   - ciliumendpoints
   - ciliuml2announcementpolicies/status
   - ciliumbgpnodeconfigs/status
   verbs:
   - patch
--- HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium-operator

+++ HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium-operator

@@ -170,12 +170,13 @@

   - ciliumpodippools.cilium.io
 - apiGroups:
   - cilium.io
   resources:
   - ciliumloadbalancerippools
   - ciliumpodippools
+  - ciliumbgppeeringpolicies
   - ciliumbgpclusterconfigs
   - ciliumbgpnodeconfigoverrides
   verbs:
   - get
   - list
   - watch
--- HelmRelease: kube-system/cilium Service: kube-system/cilium-agent

+++ HelmRelease: kube-system/cilium Service: kube-system/cilium-agent

@@ -15,11 +15,7 @@

     k8s-app: cilium
   ports:
   - name: metrics
     port: 9962
     protocol: TCP
     targetPort: prometheus
-  - name: envoy-metrics
-    port: 9964
-    protocol: TCP
-    targetPort: envoy-metrics
 
--- HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium

+++ HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium

@@ -16,24 +16,24 @@

     rollingUpdate:
       maxUnavailable: 2
     type: RollingUpdate
   template:
     metadata:
       annotations:
-        cilium.io/cilium-configmap-checksum: 7c7b38cdc22f6485dffac5e01ef612310bb3ad5f0c93d2f1ed59fe7aa56e87a1
+        cilium.io/cilium-configmap-checksum: 0d75f80daf232f92f386875cbbb5b1441a8ef9cd65f2b580a540aa863b6ee20c
       labels:
         k8s-app: cilium
         app.kubernetes.io/name: cilium-agent
         app.kubernetes.io/part-of: cilium
     spec:
       securityContext:
         appArmorProfile:
           type: Unconfined
       containers:
       - name: cilium-agent
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         command:
         - cilium-agent
         args:
         - --config-dir=/tmp/cilium/config-map
         startupProbe:
@@ -133,16 +133,12 @@

           hostPort: 4244
           protocol: TCP
         - name: prometheus
           containerPort: 9962
           hostPort: 9962
           protocol: TCP
-        - name: envoy-metrics
-          containerPort: 9964
-          hostPort: 9964
-          protocol: TCP
         securityContext:
           seLinuxOptions:
             level: s0
             type: spc_t
           capabilities:
             add:
@@ -158,12 +154,15 @@

             - SETGID
             - SETUID
             drop:
             - ALL
         terminationMessagePolicy: FallbackToLogsOnError
         volumeMounts:
+        - name: envoy-sockets
+          mountPath: /var/run/cilium/envoy/sockets
+          readOnly: false
         - mountPath: /host/proc/sys/net
           name: host-proc-sys-net
         - mountPath: /host/proc/sys/kernel
           name: host-proc-sys-kernel
         - name: bpf-maps
           mountPath: /sys/fs/bpf
@@ -183,13 +182,13 @@

         - name: xtables-lock
           mountPath: /run/xtables.lock
         - name: tmp
           mountPath: /tmp
       initContainers:
       - name: config
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         command:
         - cilium-dbg
         - build-config
         env:
         - name: K8S_NODE_NAME
@@ -208,13 +207,13 @@

           value: '7445'
         volumeMounts:
         - name: tmp
           mountPath: /tmp
         terminationMessagePolicy: FallbackToLogsOnError
       - name: mount-cgroup
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         env:
         - name: CGROUP_ROOT
           value: /sys/fs/cgroup
         - name: BIN_PATH
           value: /opt/cni/bin
@@ -240,13 +239,13 @@

             - SYS_ADMIN
             - SYS_CHROOT
             - SYS_PTRACE
             drop:
             - ALL
       - name: apply-sysctl-overwrites
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         env:
         - name: BIN_PATH
           value: /opt/cni/bin
         command:
         - sh
@@ -270,13 +269,13 @@

             - SYS_ADMIN
             - SYS_CHROOT
             - SYS_PTRACE
             drop:
             - ALL
       - name: mount-bpf-fs
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         args:
         - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
         command:
         - /bin/bash
         - -c
@@ -286,13 +285,13 @@

           privileged: true
         volumeMounts:
         - name: bpf-maps
           mountPath: /sys/fs/bpf
           mountPropagation: Bidirectional
       - name: clean-cilium-state
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         command:
         - /init-container.sh
         env:
         - name: CILIUM_ALL_STATE
           valueFrom:
@@ -334,13 +333,13 @@

         - name: cilium-cgroup
           mountPath: /sys/fs/cgroup
           mountPropagation: HostToContainer
         - name: cilium-run
           mountPath: /var/run/cilium
       - name: install-cni-binaries
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         command:
         - /install-plugin.sh
         resources:
           requests:
             cpu: 100m
@@ -355,13 +354,12 @@

         terminationMessagePolicy: FallbackToLogsOnError
         volumeMounts:
         - name: cni-path
           mountPath: /host/opt/cni/bin
       restartPolicy: Always
       priorityClassName: system-node-critical
-      serviceAccount: cilium
       serviceAccountName: cilium
       automountServiceAccountToken: true
       terminationGracePeriodSeconds: 1
       hostNetwork: true
       affinity:
         podAntiAffinity:
@@ -405,12 +403,16 @@

         hostPath:
           path: /lib/modules
       - name: xtables-lock
         hostPath:
           path: /run/xtables.lock
           type: FileOrCreate
+      - name: envoy-sockets
+        hostPath:
+          path: /var/run/cilium/envoy/sockets
+          type: DirectoryOrCreate
       - name: clustermesh-secrets
         projected:
           defaultMode: 256
           sources:
           - secret:
               name: cilium-clustermesh
@@ -422,12 +424,22 @@

               - key: tls.key
                 path: common-etcd-client.key
               - key: tls.crt
                 path: common-etcd-client.crt
               - key: ca.crt
                 path: common-etcd-client-ca.crt
+          - secret:
+              name: clustermesh-apiserver-local-cert
+              optional: true
+              items:
+              - key: tls.key
+                path: local-etcd-client.key
+              - key: tls.crt
+                path: local-etcd-client.crt
+              - key: ca.crt
+                path: local-etcd-client-ca.crt
       - name: host-proc-sys-net
         hostPath:
           path: /proc/sys/net
           type: Directory
       - name: host-proc-sys-kernel
         hostPath:
--- HelmRelease: kube-system/cilium Deployment: kube-system/cilium-operator

+++ HelmRelease: kube-system/cilium Deployment: kube-system/cilium-operator

@@ -20,24 +20,24 @@

       maxSurge: 25%
       maxUnavailable: 100%
     type: RollingUpdate
   template:
     metadata:
       annotations:
-        cilium.io/cilium-configmap-checksum: 7c7b38cdc22f6485dffac5e01ef612310bb3ad5f0c93d2f1ed59fe7aa56e87a1
+        cilium.io/cilium-configmap-checksum: 0d75f80daf232f92f386875cbbb5b1441a8ef9cd65f2b580a540aa863b6ee20c
         prometheus.io/port: '9963'
         prometheus.io/scrape: 'true'
       labels:
         io.cilium/app: operator
         name: cilium-operator
         app.kubernetes.io/part-of: cilium
         app.kubernetes.io/name: cilium-operator
     spec:
       containers:
       - name: cilium-operator
-        image: quay.io/cilium/operator-generic:v1.15.7@sha256:6840a6dde703b3e73dd31e03390327a9184fcb888efbad9d9d098d65b9035b54
+        image: quay.io/cilium/operator-generic:v1.16.0@sha256:d6621c11c4e4943bf2998af7febe05be5ed6fdcf812b27ad4388f47022190316
         imagePullPolicy: IfNotPresent
         command:
         - cilium-operator-generic
         args:
         - --config-dir=/tmp/cilium/config-map
         - --debug=$(CILIUM_DEBUG)
@@ -91,13 +91,12 @@

           mountPath: /tmp/cilium/config-map
           readOnly: true
         terminationMessagePolicy: FallbackToLogsOnError
       hostNetwork: true
       restartPolicy: Always
       priorityClassName: system-cluster-critical
-      serviceAccount: cilium-operator
       serviceAccountName: cilium-operator
       automountServiceAccountToken: true
       affinity:
         podAntiAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
           - labelSelector:
--- HelmRelease: kube-system/cilium ServiceAccount: kube-system/cilium-envoy

+++ HelmRelease: kube-system/cilium ServiceAccount: kube-system/cilium-envoy

@@ -0,0 +1,7 @@

+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: cilium-envoy
+  namespace: kube-system
+
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-envoy-config

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-envoy-config

@@ -0,0 +1,326 @@

+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: cilium-envoy-config
+  namespace: kube-system
+data:
+  bootstrap-config.json: |
+    {
+      "node": {
+        "id": "host~127.0.0.1~no-id~localdomain",
+        "cluster": "ingress-cluster"
+      },
+      "staticResources": {
+        "listeners": [
+          {
+            "name": "envoy-prometheus-metrics-listener",
+            "address": {
+              "socket_address": {
+                "address": "0.0.0.0",
+                "port_value": 9964
+              }
+            },
+            "filter_chains": [
+              {
+                "filters": [
+                  {
+                    "name": "envoy.filters.network.http_connection_manager",
+                    "typed_config": {
+                      "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
+                      "stat_prefix": "envoy-prometheus-metrics-listener",
+                      "route_config": {
+                        "virtual_hosts": [
+                          {
+                            "name": "prometheus_metrics_route",
+                            "domains": [
+                              "*"
+                            ],
+                            "routes": [
+                              {
+                                "name": "prometheus_metrics_route",
+                                "match": {
+                                  "prefix": "/metrics"
+                                },
+                                "route": {
+                                  "cluster": "/envoy-admin",
+                                  "prefix_rewrite": "/stats/prometheus"
+                                }
+                              }
+                            ]
+                          }
+                        ]
+                      },
+                      "http_filters": [
+                        {
+                          "name": "envoy.filters.http.router",
+                          "typed_config": {
+                            "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
+                          }
+                        }
+                      ],
+                      "stream_idle_timeout": "0s"
+                    }
+                  }
+                ]
+              }
+            ]
+          },
+          {
+            "name": "envoy-health-listener",
+            "address": {
+              "socket_address": {
+                "address": "127.0.0.1",
+                "port_value": 9878
+              }
+            },
+            "filter_chains": [
+              {
+                "filters": [
+                  {
+                    "name": "envoy.filters.network.http_connection_manager",
+                    "typed_config": {
+                      "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
+                      "stat_prefix": "envoy-health-listener",
+                      "route_config": {
+                        "virtual_hosts": [
+                          {
+                            "name": "health",
+                            "domains": [
+                              "*"
+                            ],
+                            "routes": [
+                              {
+                                "name": "health",
+                                "match": {
+                                  "prefix": "/healthz"
+                                },
+                                "route": {
+                                  "cluster": "/envoy-admin",
+                                  "prefix_rewrite": "/ready"
+                                }
+                              }
+                            ]
+                          }
+                        ]
+                      },
+                      "http_filters": [
+                        {
+                          "name": "envoy.filters.http.router",
+                          "typed_config": {
+                            "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
+                          }
+                        }
+                      ],
+                      "stream_idle_timeout": "0s"
+                    }
+                  }
+                ]
+              }
+            ]
+          }
+        ],
+        "clusters": [
+          {
+            "name": "ingress-cluster",
+            "type": "ORIGINAL_DST",
+            "connectTimeout": "2s",
+            "lbPolicy": "CLUSTER_PROVIDED",
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "commonHttpProtocolOptions": {
+                  "idleTimeout": "60s",
+                  "maxConnectionDuration": "0s",
+                  "maxRequestsPerConnection": 0
+                },
+                "useDownstreamProtocolConfig": {}
+              }
+            },
+            "cleanupInterval": "2.500s"
+          },
+          {
+            "name": "egress-cluster-tls",
+            "type": "ORIGINAL_DST",
+            "connectTimeout": "2s",
+            "lbPolicy": "CLUSTER_PROVIDED",
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "commonHttpProtocolOptions": {
+                  "idleTimeout": "60s",
+                  "maxConnectionDuration": "0s",
+                  "maxRequestsPerConnection": 0
+                },
+                "upstreamHttpProtocolOptions": {},
+                "useDownstreamProtocolConfig": {}
+              }
+            },
+            "cleanupInterval": "2.500s",
+            "transportSocket": {
+              "name": "cilium.tls_wrapper",
+              "typedConfig": {
+                "@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
+              }
+            }
+          },
+          {
+            "name": "egress-cluster",
+            "type": "ORIGINAL_DST",
+            "connectTimeout": "2s",
+            "lbPolicy": "CLUSTER_PROVIDED",
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "commonHttpProtocolOptions": {
+                  "idleTimeout": "60s",
+                  "maxConnectionDuration": "0s",
+                  "maxRequestsPerConnection": 0
+                },
+                "useDownstreamProtocolConfig": {}
+              }
+            },
+            "cleanupInterval": "2.500s"
+          },
+          {
+            "name": "ingress-cluster-tls",
+            "type": "ORIGINAL_DST",
+            "connectTimeout": "2s",
+            "lbPolicy": "CLUSTER_PROVIDED",
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "commonHttpProtocolOptions": {
+                  "idleTimeout": "60s",
+                  "maxConnectionDuration": "0s",
+                  "maxRequestsPerConnection": 0
+                },
+                "upstreamHttpProtocolOptions": {},
+                "useDownstreamProtocolConfig": {}
+              }
+            },
+            "cleanupInterval": "2.500s",
+            "transportSocket": {
+              "name": "cilium.tls_wrapper",
+              "typedConfig": {
+                "@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
+              }
+            }
+          },
+          {
+            "name": "xds-grpc-cilium",
+            "type": "STATIC",
+            "connectTimeout": "2s",
+            "loadAssignment": {
+              "clusterName": "xds-grpc-cilium",
+              "endpoints": [
+                {
+                  "lbEndpoints": [
+                    {
+                      "endpoint": {
+                        "address": {
+                          "pipe": {
+                            "path": "/var/run/cilium/envoy/sockets/xds.sock"
+                          }
+                        }
+                      }
+                    }
+                  ]
+                }
+              ]
+            },
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "explicitHttpConfig": {
+                  "http2ProtocolOptions": {}
+                }
+              }
+            }
+          },
+          {
+            "name": "/envoy-admin",
+            "type": "STATIC",
+            "connectTimeout": "2s",
+            "loadAssignment": {
+              "clusterName": "/envoy-admin",
+              "endpoints": [
+                {
+                  "lbEndpoints": [
+                    {
+                      "endpoint": {
+                        "address": {
+                          "pipe": {
+                            "path": "/var/run/cilium/envoy/sockets/admin.sock"
+                          }
+                        }
+                      }
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      },
+      "dynamicResources": {
+        "ldsConfig": {
+          "apiConfigSource": {
+            "apiType": "GRPC",
+            "transportApiVersion": "V3",
+            "grpcServices": [
+              {
+                "envoyGrpc": {
[Diff truncated by flux-local]
--- HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium-envoy

+++ HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium-envoy

@@ -0,0 +1,171 @@

+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: cilium-envoy
+  namespace: kube-system
+  labels:
+    k8s-app: cilium-envoy
+    app.kubernetes.io/part-of: cilium
+    app.kubernetes.io/name: cilium-envoy
+    name: cilium-envoy
+spec:
+  selector:
+    matchLabels:
+      k8s-app: cilium-envoy
+  updateStrategy:
+    rollingUpdate:
+      maxUnavailable: 2
+    type: RollingUpdate
+  template:
+    metadata:
+      annotations:
+        prometheus.io/port: '9964'
+        prometheus.io/scrape: 'true'
+      labels:
+        k8s-app: cilium-envoy
+        name: cilium-envoy
+        app.kubernetes.io/name: cilium-envoy
+        app.kubernetes.io/part-of: cilium
+    spec:
+      securityContext:
+        appArmorProfile:
+          type: Unconfined
+      containers:
+      - name: cilium-envoy
+        image: quay.io/cilium/cilium-envoy:v1.29.7-39a2a56bbd5b3a591f69dbca51d3e30ef97e0e51@sha256:bd5ff8c66716080028f414ec1cb4f7dc66f40d2fb5a009fff187f4a9b90b566b
+        imagePullPolicy: IfNotPresent
+        command:
+        - /usr/bin/cilium-envoy-starter
+        args:
+        - --
+        - -c /var/run/cilium/envoy/bootstrap-config.json
+        - --base-id 0
+        - --log-level info
+        - --log-format [%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v
+        startupProbe:
+          httpGet:
+            host: 127.0.0.1
+            path: /healthz
+            port: 9878
+            scheme: HTTP
+          failureThreshold: 105
+          periodSeconds: 2
+          successThreshold: 1
+          initialDelaySeconds: 5
+        livenessProbe:
+          httpGet:
+            host: 127.0.0.1
+            path: /healthz
+            port: 9878
+            scheme: HTTP
+          periodSeconds: 30
+          successThreshold: 1
+          failureThreshold: 10
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            host: 127.0.0.1
+            path: /healthz
+            port: 9878
+            scheme: HTTP
+          periodSeconds: 30
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        env:
+        - name: K8S_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: CILIUM_K8S_NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        - name: KUBERNETES_SERVICE_HOST
+          value: 127.0.0.1
+        - name: KUBERNETES_SERVICE_PORT
+          value: '7445'
+        ports:
+        - name: envoy-metrics
+          containerPort: 9964
+          hostPort: 9964
+          protocol: TCP
+        securityContext:
+          seLinuxOptions:
+            level: s0
+            type: spc_t
+          capabilities:
+            add:
+            - NET_ADMIN
+            - SYS_ADMIN
+            drop:
+            - ALL
+        terminationMessagePolicy: FallbackToLogsOnError
+        volumeMounts:
+        - name: envoy-sockets
+          mountPath: /var/run/cilium/envoy/sockets
+          readOnly: false
+        - name: envoy-artifacts
+          mountPath: /var/run/cilium/envoy/artifacts
+          readOnly: true
+        - name: envoy-config
+          mountPath: /var/run/cilium/envoy/
+          readOnly: true
+        - name: bpf-maps
+          mountPath: /sys/fs/bpf
+          mountPropagation: HostToContainer
+      restartPolicy: Always
+      priorityClassName: system-node-critical
+      serviceAccountName: cilium-envoy
+      automountServiceAccountToken: true
+      terminationGracePeriodSeconds: 1
+      hostNetwork: true
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: cilium.io/no-schedule
+                operator: NotIn
+                values:
+                - 'true'
+        podAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchLabels:
+                k8s-app: cilium
+            topologyKey: kubernetes.io/hostname
+        podAntiAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchLabels:
+                k8s-app: cilium-envoy
+            topologyKey: kubernetes.io/hostname
+      nodeSelector:
+        kubernetes.io/os: linux
+      tolerations:
+      - operator: Exists
+      volumes:
+      - name: envoy-sockets
+        hostPath:
+          path: /var/run/cilium/envoy/sockets
+          type: DirectoryOrCreate
+      - name: envoy-artifacts
+        hostPath:
+          path: /var/run/cilium/envoy/artifacts
+          type: DirectoryOrCreate
+      - name: envoy-config
+        configMap:
+          name: cilium-envoy-config
+          defaultMode: 256
+          items:
+          - key: bootstrap-config.json
+            path: bootstrap-config.json
+      - name: bpf-maps
+        hostPath:
+          path: /sys/fs/bpf
+          type: DirectoryOrCreate
+

@KeyboardDabbler KeyboardDabbler merged commit 4207629 into main Aug 4, 2024
5 checks passed
@KeyboardDabbler KeyboardDabbler deleted the renovate/cilium-1.x branch August 4, 2024 02:11
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

1 participant