From 51cd7e6a92eca55017659689e18bb452dc705dbd Mon Sep 17 00:00:00 2001 From: Fabian von Feilitzsch Date: Thu, 5 Nov 2020 16:42:40 -0500 Subject: [PATCH] Revert "Dedupe on-prem templates" --- pkg/controller/template/render.go | 101 +---------- .../files/NetworkManager-mdns-hostname.yaml | 0 .../NetworkManager-resolv-prepender.yaml | 4 +- .../baremetal-NetworkManager-kni-conf.yaml} | 2 +- .../files/baremetal-coredns-corefile.yaml} | 6 +- .../files/baremetal-coredns.yaml} | 12 +- .../files/baremetal-keepalived.yaml} | 14 +- .../files/baremetal-mdns-publisher.yaml} | 8 +- .../baremetal-system-connections-mount.yaml | 0 .../units/nodeip-configuration.service.yaml | 2 +- .../files/NetworkManager-mdns-hostname.yaml | 21 +++ .../NetworkManager-resolv-prepender.yaml | 52 ++++++ .../files/openstack-NetworkManager-conf.yaml | 9 + ...e.yaml => openstack-coredns-corefile.yaml} | 0 .../openstack/files/openstack-coredns-db.yaml | 6 +- .../openstack/files/openstack-coredns.yaml | 88 +++++++++ .../openstack/files/openstack-keepalived.yaml | 159 ++++++++++++++++ .../files/openstack-mdns-publisher.yaml | 104 +++++++++++ .../units/afterburn-hostname.service.yaml | 0 .../units/nodeip-configuration.service.yaml | 47 +++++ .../NetworkManager-resolv-prepender.yaml | 53 ++++++ .../files/ovirt-NetworkManager-kni-conf.yaml | 9 + ...efile.yaml => ovirt-coredns-corefile.yaml} | 0 .../common/ovirt/files/ovirt-coredns-db.yaml | 6 +- .../common/ovirt/files/ovirt-coredns.yaml | 88 +++++++++ .../common/ovirt/files/ovirt-keepalived.yaml | 115 ++++++++++++ .../ovirt/files/ovirt-mdns-publisher.yaml | 72 ++++++++ .../files/NetworkManager-mdns-hostname.yaml | 31 ++++ .../NetworkManager-resolv-prepender.yaml | 64 +++++++ .../vsphere/files/coredns-corefile.yaml | 24 --- .../vsphere-NetworkManager-kni-conf.yaml | 19 ++ .../files/vsphere-coredns-corefile.yaml | 32 ++++ .../common/vsphere/files/vsphere-coredns.yaml | 128 +++++++++++++ .../vsphere/files/vsphere-keepalived.yaml | 169 ++++++++++++++++++ .../vsphere/files/vsphere-mdns-publisher.yaml | 113 ++++++++++++ .../units/nodeip-configuration.service.yaml | 57 ++++++ .../files/baremetal-haproxy-haproxy.yaml} | 0 .../files/baremetal-haproxy.yaml} | 6 +- .../baremetal-keepalived-keepalived.yaml} | 0 .../baremetal-keepalived-script-both.yaml} | 0 .../files/baremetal-keepalived-script.yaml} | 0 .../files/baremetal-mdns-config.yaml} | 0 .../files/openstack-haproxy-haproxy.yaml | 39 ++++ .../openstack/files/openstack-haproxy.yaml | 144 +++++++++++++++ .../openstack-keepalived-keepalived.yaml | 80 +++++++++ .../openstack-keepalived-script-both.yaml | 6 + .../files/openstack-keepalived-script.yaml | 6 + .../files/openstack-mdns-config.yaml | 14 ++ .../ovirt/files/ovirt-haproxy-haproxy.yaml | 39 ++++ .../00-master/ovirt/files/ovirt-haproxy.yaml | 124 +++++++++++++ .../files/ovirt-keepalived-keepalived.yaml | 58 ++++++ .../ovirt/files/ovirt-keepalived-script.yaml | 6 + .../ovirt/files/ovirt-mdns-config.yaml} | 0 .../files/vsphere-haproxy-haproxy.yaml | 49 +++++ .../vsphere/files/vsphere-haproxy.yaml | 156 ++++++++++++++++ .../files/vsphere-keepalived-keepalived.yaml | 109 +++++++++++ .../files/vsphere-keepalived-script-both.yaml | 6 + .../files/vsphere-keepalived-script.yaml | 6 + .../vsphere/files/vsphere-mdns-config.yaml | 25 +++ .../units/kubelet.service.yaml | 0 .../openstack/units/kubelet.service.yaml | 41 +++++ .../vsphere/units/kubelet.service.yaml | 51 ++++++ .../baremetal-keepalived-keepalived.yaml} | 0 .../files/baremetal-mdns-config.yaml | 15 ++ .../openstack-keepalived-keepalived.yaml | 28 +++ .../files/openstack-mdns-config.yaml | 14 ++ .../files/ovirt-keepalived-keepalived.yaml | 29 +++ .../ovirt/files/ovirt-mdns-config.yaml | 15 ++ .../files/vsphere-keepalived-keepalived.yaml | 39 ++++ .../vsphere/files/vsphere-mdns-config.yaml | 25 +++ .../units/kubelet.service.yaml | 0 .../openstack/units/kubelet.service.yaml | 40 +++++ .../vsphere/units/kubelet.service.yaml | 50 ++++++ 73 files changed, 2682 insertions(+), 153 deletions(-) rename templates/common/{on-prem => baremetal}/files/NetworkManager-mdns-hostname.yaml (100%) rename templates/common/{on-prem => baremetal}/files/NetworkManager-resolv-prepender.yaml (94%) rename templates/common/{on-prem/files/NetworkManager-onprem.conf.yaml => baremetal/files/baremetal-NetworkManager-kni-conf.yaml} (72%) rename templates/common/{on-prem/files/coredns-corefile.yaml => baremetal/files/baremetal-coredns-corefile.yaml} (87%) rename templates/common/{on-prem/files/coredns.yaml => baremetal/files/baremetal-coredns.yaml} (89%) rename templates/common/{on-prem/files/keepalived.yaml => baremetal/files/baremetal-keepalived.yaml} (92%) rename templates/common/{on-prem/files/mdns-publisher.yaml => baremetal/files/baremetal-mdns-publisher.yaml} (92%) rename templates/common/{on-prem => baremetal}/units/baremetal-system-connections-mount.yaml (100%) rename templates/common/{on-prem => baremetal}/units/nodeip-configuration.service.yaml (95%) create mode 100644 templates/common/openstack/files/NetworkManager-mdns-hostname.yaml create mode 100644 templates/common/openstack/files/NetworkManager-resolv-prepender.yaml create mode 100644 templates/common/openstack/files/openstack-NetworkManager-conf.yaml rename templates/common/openstack/files/{coredns-corefile.yaml => openstack-coredns-corefile.yaml} (100%) create mode 100644 templates/common/openstack/files/openstack-coredns.yaml create mode 100644 templates/common/openstack/files/openstack-keepalived.yaml create mode 100644 templates/common/openstack/files/openstack-mdns-publisher.yaml rename templates/common/{on-prem => openstack}/units/afterburn-hostname.service.yaml (100%) create mode 100644 templates/common/openstack/units/nodeip-configuration.service.yaml create mode 100644 templates/common/ovirt/files/NetworkManager-resolv-prepender.yaml create mode 100644 templates/common/ovirt/files/ovirt-NetworkManager-kni-conf.yaml rename templates/common/ovirt/files/{coredns-corefile.yaml => ovirt-coredns-corefile.yaml} (100%) create mode 100644 templates/common/ovirt/files/ovirt-coredns.yaml create mode 100644 templates/common/ovirt/files/ovirt-keepalived.yaml create mode 100644 templates/common/ovirt/files/ovirt-mdns-publisher.yaml create mode 100644 templates/common/vsphere/files/NetworkManager-mdns-hostname.yaml create mode 100644 templates/common/vsphere/files/NetworkManager-resolv-prepender.yaml delete mode 100644 templates/common/vsphere/files/coredns-corefile.yaml create mode 100644 templates/common/vsphere/files/vsphere-NetworkManager-kni-conf.yaml create mode 100644 templates/common/vsphere/files/vsphere-coredns-corefile.yaml create mode 100644 templates/common/vsphere/files/vsphere-coredns.yaml create mode 100644 templates/common/vsphere/files/vsphere-keepalived.yaml create mode 100644 templates/common/vsphere/files/vsphere-mdns-publisher.yaml create mode 100644 templates/common/vsphere/units/nodeip-configuration.service.yaml rename templates/master/00-master/{on-prem/files/haproxy-haproxy.yaml => baremetal/files/baremetal-haproxy-haproxy.yaml} (100%) rename templates/master/00-master/{on-prem/files/haproxy.yaml => baremetal/files/baremetal-haproxy.yaml} (95%) rename templates/master/00-master/{on-prem/files/keepalived-keepalived.yaml => baremetal/files/baremetal-keepalived-keepalived.yaml} (100%) rename templates/master/00-master/{on-prem/files/keepalived-script-both.yaml => baremetal/files/baremetal-keepalived-script-both.yaml} (100%) rename templates/master/00-master/{on-prem/files/keepalived-script.yaml => baremetal/files/baremetal-keepalived-script.yaml} (100%) rename templates/master/00-master/{on-prem/files/mdns-config.yaml => baremetal/files/baremetal-mdns-config.yaml} (100%) create mode 100644 templates/master/00-master/openstack/files/openstack-haproxy-haproxy.yaml create mode 100644 templates/master/00-master/openstack/files/openstack-haproxy.yaml create mode 100644 templates/master/00-master/openstack/files/openstack-keepalived-keepalived.yaml create mode 100644 templates/master/00-master/openstack/files/openstack-keepalived-script-both.yaml create mode 100644 templates/master/00-master/openstack/files/openstack-keepalived-script.yaml create mode 100644 templates/master/00-master/openstack/files/openstack-mdns-config.yaml create mode 100644 templates/master/00-master/ovirt/files/ovirt-haproxy-haproxy.yaml create mode 100644 templates/master/00-master/ovirt/files/ovirt-haproxy.yaml create mode 100644 templates/master/00-master/ovirt/files/ovirt-keepalived-keepalived.yaml create mode 100644 templates/master/00-master/ovirt/files/ovirt-keepalived-script.yaml rename templates/{worker/00-worker/on-prem/files/mdns-config.yaml => master/00-master/ovirt/files/ovirt-mdns-config.yaml} (100%) create mode 100644 templates/master/00-master/vsphere/files/vsphere-haproxy-haproxy.yaml create mode 100644 templates/master/00-master/vsphere/files/vsphere-haproxy.yaml create mode 100644 templates/master/00-master/vsphere/files/vsphere-keepalived-keepalived.yaml create mode 100644 templates/master/00-master/vsphere/files/vsphere-keepalived-script-both.yaml create mode 100644 templates/master/00-master/vsphere/files/vsphere-keepalived-script.yaml create mode 100644 templates/master/00-master/vsphere/files/vsphere-mdns-config.yaml rename templates/master/01-master-kubelet/{on-prem => baremetal}/units/kubelet.service.yaml (100%) create mode 100644 templates/master/01-master-kubelet/openstack/units/kubelet.service.yaml create mode 100644 templates/master/01-master-kubelet/vsphere/units/kubelet.service.yaml rename templates/worker/00-worker/{on-prem/files/keepalived-keepalived.yaml => baremetal/files/baremetal-keepalived-keepalived.yaml} (100%) create mode 100644 templates/worker/00-worker/baremetal/files/baremetal-mdns-config.yaml create mode 100644 templates/worker/00-worker/openstack/files/openstack-keepalived-keepalived.yaml create mode 100644 templates/worker/00-worker/openstack/files/openstack-mdns-config.yaml create mode 100644 templates/worker/00-worker/ovirt/files/ovirt-keepalived-keepalived.yaml create mode 100644 templates/worker/00-worker/ovirt/files/ovirt-mdns-config.yaml create mode 100644 templates/worker/00-worker/vsphere/files/vsphere-keepalived-keepalived.yaml create mode 100644 templates/worker/00-worker/vsphere/files/vsphere-mdns-config.yaml rename templates/worker/01-worker-kubelet/{on-prem => baremetal}/units/kubelet.service.yaml (100%) create mode 100644 templates/worker/01-worker-kubelet/openstack/units/kubelet.service.yaml create mode 100644 templates/worker/01-worker-kubelet/vsphere/units/kubelet.service.yaml diff --git a/pkg/controller/template/render.go b/pkg/controller/template/render.go index da9630f575..231a519373 100644 --- a/pkg/controller/template/render.go +++ b/pkg/controller/template/render.go @@ -27,10 +27,9 @@ type RenderConfig struct { } const ( - filesDir = "files" - unitsDir = "units" - platformBase = "_base" - platformOnPrem = "on-prem" + filesDir = "files" + unitsDir = "units" + platformBase = "_base" ) // generateTemplateMachineConfigs returns MachineConfig objects from the templateDir and a config object @@ -193,10 +192,7 @@ func generateMachineConfigForName(config *RenderConfig, role, name, templateDir, platformDirs := []string{} if !*commonAdded { // Loop over templates/common which applies everywhere - for _, dir := range []string{platformBase, platformOnPrem, platformString} { - if dir == platformOnPrem && !onPremPlatform(config.Infra.Status.PlatformStatus.Type) { - continue - } + for _, dir := range []string{platformBase, platformString} { basePath := filepath.Join(templateDir, "common", dir) exists, err := existsDir(basePath) if err != nil { @@ -209,12 +205,8 @@ func generateMachineConfigForName(config *RenderConfig, role, name, templateDir, } *commonAdded = true } - // And now over the target e.g. templates/master/00-master,01-master-container-runtime,01-master-kubelet - for _, dir := range []string{platformBase, platformOnPrem, platformString} { - if dir == platformOnPrem && !onPremPlatform(config.Infra.Status.PlatformStatus.Type) { - continue - } + for _, dir := range []string{platformBase, platformString} { platformPath := filepath.Join(path, dir) exists, err := existsDir(platformPath) if err != nil { @@ -291,10 +283,6 @@ func renderTemplate(config RenderConfig, path string, b []byte) ([]byte, error) funcs["skip"] = skipMissing funcs["cloudProvider"] = cloudProvider funcs["cloudConfigFlag"] = cloudConfigFlag - funcs["onPremPlatformAPIServerInternalIP"] = onPremPlatformAPIServerInternalIP - funcs["onPremPlatformIngressIP"] = onPremPlatformIngressIP - funcs["onPremPlatformShortName"] = onPremPlatformShortName - funcs["onPremPlatformKeepalivedEnableUnicast"] = onPremPlatformKeepalivedEnableUnicast tmpl, err := template.New(path).Funcs(funcs).Parse(string(b)) if err != nil { return nil, fmt.Errorf("failed to parse template %s: %v", path, err) @@ -368,76 +356,6 @@ func cloudConfigFlag(cfg RenderConfig) interface{} { } } -func onPremPlatformShortName(cfg RenderConfig) interface{} { - if cfg.Infra.Status.PlatformStatus != nil { - switch cfg.Infra.Status.PlatformStatus.Type { - case configv1.BareMetalPlatformType: - return "kni" - case configv1.OvirtPlatformType: - return "ovirt" - case configv1.OpenStackPlatformType: - return "openstack" - case configv1.VSpherePlatformType: - return "vsphere" - default: - return "" - } - } else { - return "" - } -} - -func onPremPlatformKeepalivedEnableUnicast(cfg RenderConfig) (interface{}, error) { - if cfg.Infra.Status.PlatformStatus != nil { - switch cfg.Infra.Status.PlatformStatus.Type { - case configv1.BareMetalPlatformType: - return "yes", nil - default: - return "no", nil - } - } else { - return "no", nil - } -} - -func onPremPlatformIngressIP(cfg RenderConfig) (interface{}, error) { - if cfg.Infra.Status.PlatformStatus != nil { - switch cfg.Infra.Status.PlatformStatus.Type { - case configv1.BareMetalPlatformType: - return cfg.Infra.Status.PlatformStatus.BareMetal.IngressIP, nil - case configv1.OvirtPlatformType: - return cfg.Infra.Status.PlatformStatus.Ovirt.IngressIP, nil - case configv1.OpenStackPlatformType: - return cfg.Infra.Status.PlatformStatus.OpenStack.IngressIP, nil - case configv1.VSpherePlatformType: - return cfg.Infra.Status.PlatformStatus.VSphere.IngressIP, nil - default: - return nil, fmt.Errorf("invalid platform for Ingress IP") - } - } else { - return nil, fmt.Errorf("") - } -} - -func onPremPlatformAPIServerInternalIP(cfg RenderConfig) (interface{}, error) { - if cfg.Infra.Status.PlatformStatus != nil { - switch cfg.Infra.Status.PlatformStatus.Type { - case configv1.BareMetalPlatformType: - return cfg.Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP, nil - case configv1.OvirtPlatformType: - return cfg.Infra.Status.PlatformStatus.Ovirt.APIServerInternalIP, nil - case configv1.OpenStackPlatformType: - return cfg.Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP, nil - case configv1.VSpherePlatformType: - return cfg.Infra.Status.PlatformStatus.VSphere.APIServerInternalIP, nil - default: - return nil, fmt.Errorf("invalid platform for API Server Internal IP") - } - } else { - return nil, fmt.Errorf("") - } -} - // existsDir returns true if path exists and is a directory, false if the path // does not exist, and error if there is a runtime error or the path is not a directory func existsDir(path string) (bool, error) { @@ -453,12 +371,3 @@ func existsDir(path string) (bool, error) { } return true, nil } - -func onPremPlatform(platformString configv1.PlatformType) bool { - switch platformString { - case configv1.BareMetalPlatformType, configv1.OvirtPlatformType, configv1.OpenStackPlatformType, configv1.VSpherePlatformType: - return true - default: - return false - } -} diff --git a/templates/common/on-prem/files/NetworkManager-mdns-hostname.yaml b/templates/common/baremetal/files/NetworkManager-mdns-hostname.yaml similarity index 100% rename from templates/common/on-prem/files/NetworkManager-mdns-hostname.yaml rename to templates/common/baremetal/files/NetworkManager-mdns-hostname.yaml diff --git a/templates/common/on-prem/files/NetworkManager-resolv-prepender.yaml b/templates/common/baremetal/files/NetworkManager-resolv-prepender.yaml similarity index 94% rename from templates/common/on-prem/files/NetworkManager-resolv-prepender.yaml rename to templates/common/baremetal/files/NetworkManager-resolv-prepender.yaml index 974406a5c8..4021886175 100644 --- a/templates/common/on-prem/files/NetworkManager-resolv-prepender.yaml +++ b/templates/common/baremetal/files/NetworkManager-resolv-prepender.yaml @@ -47,8 +47,8 @@ contents: {{ .Images.baremetalRuntimeCfgImage }} \ node-ip \ show \ - "{{ onPremPlatformAPIServerInternalIP . }}" \ - "{{ onPremPlatformIngressIP . }}") + "{{.Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP}}" \ + "{{.Infra.Status.PlatformStatus.BareMetal.IngressIP}}") DOMAIN="{{.DNS.Spec.BaseDomain}}" if [[ -n "$NAMESERVER_IP" ]]; then >&2 echo "NM resolv-prepender: Prepending 'nameserver $NAMESERVER_IP' to /etc/resolv.conf (other nameservers from /var/run/NetworkManager/resolv.conf)" diff --git a/templates/common/on-prem/files/NetworkManager-onprem.conf.yaml b/templates/common/baremetal/files/baremetal-NetworkManager-kni-conf.yaml similarity index 72% rename from templates/common/on-prem/files/NetworkManager-onprem.conf.yaml rename to templates/common/baremetal/files/baremetal-NetworkManager-kni-conf.yaml index 2e2eae359d..62007be856 100644 --- a/templates/common/on-prem/files/NetworkManager-onprem.conf.yaml +++ b/templates/common/baremetal/files/baremetal-NetworkManager-kni-conf.yaml @@ -1,5 +1,5 @@ mode: 0644 -path: "/etc/NetworkManager/conf.d/99-{{ onPremPlatformShortName . }}.conf" +path: "/etc/NetworkManager/conf.d/99-kni.conf" contents: inline: | [main] diff --git a/templates/common/on-prem/files/coredns-corefile.yaml b/templates/common/baremetal/files/baremetal-coredns-corefile.yaml similarity index 87% rename from templates/common/on-prem/files/coredns-corefile.yaml rename to templates/common/baremetal/files/baremetal-coredns-corefile.yaml index 58fb6a6b4b..03657fe653 100644 --- a/templates/common/on-prem/files/coredns-corefile.yaml +++ b/templates/common/baremetal/files/baremetal-coredns-corefile.yaml @@ -11,7 +11,7 @@ contents: reload template IN {{`{{ .Cluster.IngressVIPRecordType }}`}} {{ .DNS.Spec.BaseDomain }} { match .*.apps.{{ .DNS.Spec.BaseDomain }} - answer "{{`{{"{{ .Name }}"}}`}} 60 in {{`{{"{{ .Type }}"}}`}} {{ onPremPlatformIngressIP . }}" + answer "{{`{{"{{ .Name }}"}}`}} 60 in {{`{{"{{ .Type }}"}}`}} {{ .Infra.Status.PlatformStatus.BareMetal.IngressIP }}" fallthrough } template IN {{`{{ .Cluster.IngressVIPEmptyType }}`}} {{ .DNS.Spec.BaseDomain }} { @@ -20,7 +20,7 @@ contents: } template IN {{`{{ .Cluster.APIVIPRecordType }}`}} {{ .DNS.Spec.BaseDomain }} { match api.{{ .DNS.Spec.BaseDomain }} - answer "{{`{{"{{ .Name }}"}}`}} 60 in {{`{{"{{ .Type }}"}}`}} {{ onPremPlatformAPIServerInternalIP . }}" + answer "{{`{{"{{ .Name }}"}}`}} 60 in {{`{{"{{ .Type }}"}}`}} {{ .Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP }}" fallthrough } template IN {{`{{ .Cluster.APIVIPEmptyType }}`}} {{ .DNS.Spec.BaseDomain }} { @@ -29,7 +29,7 @@ contents: } template IN {{`{{ .Cluster.APIVIPRecordType }}`}} {{ .DNS.Spec.BaseDomain }} { match api-int.{{ .DNS.Spec.BaseDomain }} - answer "{{`{{"{{ .Name }}"}}`}} 60 in {{`{{"{{ .Type }}"}}`}} {{ onPremPlatformAPIServerInternalIP . }}" + answer "{{`{{"{{ .Name }}"}}`}} 60 in {{`{{"{{ .Type }}"}}`}} {{ .Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP }}" fallthrough } template IN {{`{{ .Cluster.APIVIPEmptyType }}`}} {{ .DNS.Spec.BaseDomain }} { diff --git a/templates/common/on-prem/files/coredns.yaml b/templates/common/baremetal/files/baremetal-coredns.yaml similarity index 89% rename from templates/common/on-prem/files/coredns.yaml rename to templates/common/baremetal/files/baremetal-coredns.yaml index 4e4ddf010b..72abdecfcf 100644 --- a/templates/common/on-prem/files/coredns.yaml +++ b/templates/common/baremetal/files/baremetal-coredns.yaml @@ -6,11 +6,11 @@ contents: apiVersion: v1 metadata: name: coredns - namespace: openshift-{{ onPremPlatformShortName . }}-infra + namespace: openshift-kni-infra creationTimestamp: deletionGracePeriodSeconds: 65 labels: - app: {{ onPremPlatformShortName . }}-infra-mdns + app: kni-infra-mdns spec: volumes: - name: resource-dir @@ -33,9 +33,9 @@ contents: - render - "/etc/kubernetes/kubeconfig" - "--api-vip" - - "{{ onPremPlatformAPIServerInternalIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP }}" - "--ingress-vip" - - "{{ onPremPlatformIngressIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.IngressIP }}" - "/config" - "--out-dir" - "/etc/coredns" @@ -94,9 +94,9 @@ contents: - "/config/Corefile.tmpl" - "/etc/coredns/Corefile" - "--api-vip" - - "{{ onPremPlatformAPIServerInternalIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP }}" - "--ingress-vip" - - "{{ onPremPlatformIngressIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.IngressIP }}" resources: requests: cpu: 100m diff --git a/templates/common/on-prem/files/keepalived.yaml b/templates/common/baremetal/files/baremetal-keepalived.yaml similarity index 92% rename from templates/common/on-prem/files/keepalived.yaml rename to templates/common/baremetal/files/baremetal-keepalived.yaml index 753cf4c3b2..d0ae8113f2 100644 --- a/templates/common/on-prem/files/keepalived.yaml +++ b/templates/common/baremetal/files/baremetal-keepalived.yaml @@ -6,11 +6,11 @@ contents: apiVersion: v1 metadata: name: keepalived - namespace: openshift-{{ onPremPlatformShortName . }}-infra + namespace: openshift-kni-infra creationTimestamp: deletionGracePeriodSeconds: 65 labels: - app: {{ onPremPlatformShortName . }}-infra-vrrp + app: kni-infra-vrrp spec: volumes: - name: resource-dir @@ -41,9 +41,9 @@ contents: - render - "/etc/kubernetes/kubeconfig" - "--api-vip" - - "{{ onPremPlatformAPIServerInternalIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP }}" - "--ingress-vip" - - "{{ onPremPlatformIngressIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.IngressIP }}" - "/config" - "--out-dir" - "/etc/keepalived" @@ -124,7 +124,7 @@ contents: image: {{ .Images.baremetalRuntimeCfgImage }} env: - name: ENABLE_UNICAST - value: "{{ onPremPlatformKeepalivedEnableUnicast . }}" + value: "yes" - name: IS_BOOTSTRAP value: "no" command: @@ -133,9 +133,9 @@ contents: - "/config/keepalived.conf.tmpl" - "/etc/keepalived/keepalived.conf" - "--api-vip" - - "{{ onPremPlatformAPIServerInternalIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP }}" - "--ingress-vip" - - "{{ onPremPlatformIngressIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.IngressIP }}" resources: requests: cpu: 100m diff --git a/templates/common/on-prem/files/mdns-publisher.yaml b/templates/common/baremetal/files/baremetal-mdns-publisher.yaml similarity index 92% rename from templates/common/on-prem/files/mdns-publisher.yaml rename to templates/common/baremetal/files/baremetal-mdns-publisher.yaml index 03ac02508f..dd888c7b41 100644 --- a/templates/common/on-prem/files/mdns-publisher.yaml +++ b/templates/common/baremetal/files/baremetal-mdns-publisher.yaml @@ -6,11 +6,11 @@ contents: apiVersion: v1 metadata: name: mdns-publisher - namespace: openshift-{{ onPremPlatformShortName . }}-infra + namespace: openshift-kni-infra creationTimestamp: deletionGracePeriodSeconds: 65 labels: - app: {{ onPremPlatformShortName . }}-infra-mdns + app: kni-infra-mdns spec: volumes: - name: resource-dir @@ -60,9 +60,9 @@ contents: - render - "/etc/kubernetes/kubeconfig" - "--api-vip" - - "{{ onPremPlatformAPIServerInternalIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP }}" - "--ingress-vip" - - "{{ onPremPlatformIngressIP . }}" + - "{{ .Infra.Status.PlatformStatus.BareMetal.IngressIP }}" - "/config" - "--out-dir" - "/etc/mdns" diff --git a/templates/common/on-prem/units/baremetal-system-connections-mount.yaml b/templates/common/baremetal/units/baremetal-system-connections-mount.yaml similarity index 100% rename from templates/common/on-prem/units/baremetal-system-connections-mount.yaml rename to templates/common/baremetal/units/baremetal-system-connections-mount.yaml diff --git a/templates/common/on-prem/units/nodeip-configuration.service.yaml b/templates/common/baremetal/units/nodeip-configuration.service.yaml similarity index 95% rename from templates/common/on-prem/units/nodeip-configuration.service.yaml rename to templates/common/baremetal/units/nodeip-configuration.service.yaml index 6c693166f3..fa2104bf66 100644 --- a/templates/common/on-prem/units/nodeip-configuration.service.yaml +++ b/templates/common/baremetal/units/nodeip-configuration.service.yaml @@ -25,7 +25,7 @@ contents: | {{ .Images.baremetalRuntimeCfgImage }} \ node-ip \ set --retry-on-failure \ - {{ onPremPlatformAPIServerInternalIP . }}; \ + {{.Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP }}; \ do \ sleep 5; \ done" diff --git a/templates/common/openstack/files/NetworkManager-mdns-hostname.yaml b/templates/common/openstack/files/NetworkManager-mdns-hostname.yaml new file mode 100644 index 0000000000..42c010552f --- /dev/null +++ b/templates/common/openstack/files/NetworkManager-mdns-hostname.yaml @@ -0,0 +1,21 @@ +mode: 0755 +path: "/etc/NetworkManager/dispatcher.d/40-mdns-hostname" +contents: + inline: | + #!/bin/bash + STATUS=$2 + case "$STATUS" in + up|down|dhcp4-change|dhcp6-change|hostname) + logger -s "NM mdns-hostname triggered by ${2}." + set +e + t_hostname=$(hostname) + if [ -z "${t_hostname}" ]; then + t_hostname="localhost" + fi + mkdir -p /etc/mdns + echo "${t_hostname}">/etc/mdns/hostname + logger -s "Hostname changed: ${t_hostname}" + ;; + *) + ;; + esac diff --git a/templates/common/openstack/files/NetworkManager-resolv-prepender.yaml b/templates/common/openstack/files/NetworkManager-resolv-prepender.yaml new file mode 100644 index 0000000000..daab1c7f2d --- /dev/null +++ b/templates/common/openstack/files/NetworkManager-resolv-prepender.yaml @@ -0,0 +1,52 @@ +mode: 0755 +path: "/etc/NetworkManager/dispatcher.d/30-resolv-prepender" +contents: + inline: | + #!/bin/bash + set -eo pipefail + IFACE=$1 + STATUS=$2 + + {{if .Proxy -}} + {{if .Proxy.HTTPProxy -}} + HTTP_PROXY={{.Proxy.HTTPProxy}} + {{end -}} + {{if .Proxy.HTTPSProxy -}} + HTTPS_PROXY={{.Proxy.HTTPSProxy}} + {{end -}} + {{if .Proxy.NoProxy -}} + NO_PROXY={{.Proxy.NoProxy}} + {{end -}} + {{end -}} + + case "$STATUS" in + up|down|dhcp4-change|dhcp6-change) + logger -s "NM resolv-prepender triggered by ${1} ${2}." + + # Ensure resolv.conf exists before we try to run podman + if [[ ! -e /etc/resolv.conf ]] || ! grep -q nameserver /etc/resolv.conf; then + cp /var/run/NetworkManager/resolv.conf /etc/resolv.conf + fi + + NAMESERVER_IP=$(/usr/bin/podman run --rm \ + --authfile /var/lib/kubelet/config.json \ + --net=host \ + {{ .Images.baremetalRuntimeCfgImage }} \ + node-ip \ + show \ + "{{.Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP}}" \ + "{{.Infra.Status.PlatformStatus.OpenStack.IngressIP}}") + DOMAIN="{{.DNS.Spec.BaseDomain}}" + if [[ -n "$NAMESERVER_IP" ]]; then + logger -s "NM resolv-prepender: Prepending 'nameserver $NAMESERVER_IP' to /etc/resolv.conf (other nameservers from /var/run/NetworkManager/resolv.conf)" + sed -e "/^search/d" \ + -e "/Generated by/c# Generated by OpenStack resolv prepender NM dispatcher script\nsearch $DOMAIN\nnameserver $NAMESERVER_IP" \ + /var/run/NetworkManager/resolv.conf > /etc/resolv.tmp + fi + # Only leave the first 3 nameservers in /etc/resolv.conf + sed -i ':a $!{N; ba}; s/\(^\|\n\)nameserver/\n# nameserver/4g' /etc/resolv.tmp + mv -f /etc/resolv.tmp /etc/resolv.conf + ;; + *) + ;; + esac diff --git a/templates/common/openstack/files/openstack-NetworkManager-conf.yaml b/templates/common/openstack/files/openstack-NetworkManager-conf.yaml new file mode 100644 index 0000000000..064def6a12 --- /dev/null +++ b/templates/common/openstack/files/openstack-NetworkManager-conf.yaml @@ -0,0 +1,9 @@ +mode: 0644 +path: "/etc/NetworkManager/conf.d/99-openstack.conf" +contents: + inline: | + [main] + rc-manager=unmanaged + [connection] + ipv6.dhcp-duid=ll + ipv6.dhcp-iaid=mac diff --git a/templates/common/openstack/files/coredns-corefile.yaml b/templates/common/openstack/files/openstack-coredns-corefile.yaml similarity index 100% rename from templates/common/openstack/files/coredns-corefile.yaml rename to templates/common/openstack/files/openstack-coredns-corefile.yaml diff --git a/templates/common/openstack/files/openstack-coredns-db.yaml b/templates/common/openstack/files/openstack-coredns-db.yaml index c06e20bbad..aae8f92d87 100644 --- a/templates/common/openstack/files/openstack-coredns-db.yaml +++ b/templates/common/openstack/files/openstack-coredns-db.yaml @@ -10,7 +10,7 @@ contents: 1209600 ; expire (2 weeks) 3600 ; minimum (1 hour) ) - api-int IN A {{ onPremPlatformAPIServerInternalIP . }} - api IN A {{ onPremPlatformAPIServerInternalIP . }} + api-int IN A {{ .Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP }} + api IN A {{ .Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP }} - *.apps IN A {{ onPremPlatformIngressIP . }} + *.apps IN A {{ .Infra.Status.PlatformStatus.OpenStack.IngressIP }} diff --git a/templates/common/openstack/files/openstack-coredns.yaml b/templates/common/openstack/files/openstack-coredns.yaml new file mode 100644 index 0000000000..d97341464a --- /dev/null +++ b/templates/common/openstack/files/openstack-coredns.yaml @@ -0,0 +1,88 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/coredns.yaml" +contents: + inline: | + kind: Pod + apiVersion: v1 + metadata: + name: coredns + namespace: openshift-openstack-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: openstack-infra-mdns + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/coredns" + - name: kubeconfig + hostPath: + path: "/etc/kubernetes/kubeconfig" + - name: conf-dir + hostPath: + path: "/etc/coredns" + initContainers: + - name: render-config-coredns + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - runtimecfg + - render + - "/etc/kubernetes/kubeconfig" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.OpenStack.IngressIP }}" + - "/config" + - "--out-dir" + - "/etc/coredns" + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubernetes/kubeconfig" + - name: resource-dir + mountPath: "/config" + - name: conf-dir + mountPath: "/etc/coredns" + imagePullPolicy: IfNotPresent + containers: + - name: coredns + securityContext: + privileged: true + image: {{.Images.corednsImage}} + args: + - "--conf" + - "/etc/coredns/Corefile" + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/coredns" + readinessProbe: + httpGet: + path: /health + port: 18080 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: /health + port: 18080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} diff --git a/templates/common/openstack/files/openstack-keepalived.yaml b/templates/common/openstack/files/openstack-keepalived.yaml new file mode 100644 index 0000000000..82712ddab7 --- /dev/null +++ b/templates/common/openstack/files/openstack-keepalived.yaml @@ -0,0 +1,159 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/keepalived.yaml" +contents: + inline: | + kind: Pod + apiVersion: v1 + metadata: + name: keepalived + namespace: openshift-openstack-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: openstack-infra-vrrp + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/keepalived" + - name: script-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/keepalived/scripts" + - name: kubeconfig + hostPath: + path: "/etc/kubernetes" + - name: kubeconfigvarlib + hostPath: + path: "/var/lib/kubelet" + - name: conf-dir + hostPath: + path: "/etc/keepalived" + - name: run-dir + empty-dir: {} + - name: chroot-host + hostPath: + path: "/" + initContainers: + - name: render-config-keepalived + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - runtimecfg + - render + - "/etc/kubernetes/kubeconfig" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.OpenStack.IngressIP }}" + - "/config" + - "--out-dir" + - "/etc/keepalived" + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubernetes" + - name: script-dir + mountPath: "/config" + - name: conf-dir + mountPath: "/etc/keepalived" + imagePullPolicy: IfNotPresent + containers: + - name: keepalived + securityContext: + privileged: true + image: {{.Images.keepalivedImage}} + env: + - name: NSS_SDB_USE_CACHE + value: "no" + command: + - /bin/bash + - -c + - | + #/bin/bash + reload_keepalived() + { + if pid=$(pgrep -o keepalived); then + kill -s SIGHUP "$pid" + else + /usr/sbin/keepalived -f /etc/keepalived/keepalived.conf --dont-fork --vrrp --log-detail --log-console & + fi + } + + msg_handler() + { + while read -r line; do + echo "The client sent: $line" >&2 + # currently only 'reload' msg is supported + if [ "$line" = reload ]; then + reload_keepalived + fi + done + } + + set -ex + declare -r keepalived_sock="/var/run/keepalived/keepalived.sock" + export -f msg_handler + export -f reload_keepalived + if [ -s "/etc/keepalived/keepalived.conf" ]; then + /usr/sbin/keepalived -f /etc/keepalived/keepalived.conf --dont-fork --vrrp --log-detail --log-console & + fi + + rm -f "$keepalived_sock" + socat UNIX-LISTEN:${keepalived_sock},fork system:'bash -c msg_handler' + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/keepalived" + - name: run-dir + mountPath: "/var/run/keepalived" + livenessProbe: + exec: + command: + - /bin/bash + - -c + - | + kill -s SIGUSR1 "$(pgrep -o keepalived)" && ! grep -q "State = FAULT" /tmp/keepalived.data + initialDelaySeconds: 20 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + - name: keepalived-monitor + securityContext: + privileged: true + image: {{ .Images.baremetalRuntimeCfgImage }} + env: + - name: ENABLE_UNICAST + value: "no" + - name: IS_BOOTSTRAP + value: "no" + command: + - dynkeepalived + - "/var/lib/kubelet/kubeconfig" + - "/config/keepalived.conf.tmpl" + - "/etc/keepalived/keepalived.conf" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.OpenStack.IngressIP }}" + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: resource-dir + mountPath: "/config" + - name: kubeconfigvarlib + mountPath: "/var/lib/kubelet" + - name: conf-dir + mountPath: "/etc/keepalived" + - name: run-dir + mountPath: "/var/run/keepalived" + - name: chroot-host + mountPath: "/host" + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} diff --git a/templates/common/openstack/files/openstack-mdns-publisher.yaml b/templates/common/openstack/files/openstack-mdns-publisher.yaml new file mode 100644 index 0000000000..19d8bca2f3 --- /dev/null +++ b/templates/common/openstack/files/openstack-mdns-publisher.yaml @@ -0,0 +1,104 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/mdns-publisher.yaml" +contents: + inline: | + kind: Pod + apiVersion: v1 + metadata: + name: mdns-publisher + namespace: openshift-openstack-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: openstack-infra-mdns + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/mdns" + - name: kubeconfig + hostPath: + path: "/etc/kubernetes/kubeconfig" + - name: conf-dir + hostPath: + path: "/etc/mdns" + initContainers: + - name: verify-hostname + image: {{ .Images.baremetalRuntimeCfgImage }} + env: + - name: DEFAULT_LOCAL_HOSTNAME + value: "localhost" + - name: RUNTIMECFG_HOSTNAME_PATH + value: "/etc/mdns/hostname" + command: + - "/bin/bash" + - "-c" + - | + #/bin/bash + function get_hostname() + { + if [[ -s $RUNTIMECFG_HOSTNAME_PATH ]]; then + cat $RUNTIMECFG_HOSTNAME_PATH + else + # if hostname wasn't updated by NM script, read hostname + hostname + fi + } + while [ "$(get_hostname)" == "$DEFAULT_LOCAL_HOSTNAME" ] + do + echo "hostname is still ${DEFAULT_LOCAL_HOSTNAME}" + sleep 1 + done + volumeMounts: + - name: conf-dir + mountPath: "/etc/mdns" + - name: render-config-mdns-publisher + image: {{ .Images.baremetalRuntimeCfgImage }} + env: + - name: RUNTIMECFG_HOSTNAME_PATH + value: "/etc/mdns/hostname" + command: + - runtimecfg + - render + - "/etc/kubernetes/kubeconfig" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.OpenStack.IngressIP }}" + - "/config" + - "--out-dir" + - "/etc/mdns" + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubernetes/kubeconfig" + - name: resource-dir + mountPath: "/config" + - name: conf-dir + mountPath: "/etc/mdns" + imagePullPolicy: IfNotPresent + containers: + - name: mdns-publisher + image: {{.Images.mdnsPublisherImage}} + args: + - "--debug" + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/mdns" + livenessProbe: + exec: + command: + - pgrep + - mdns-publisher + initialDelaySeconds: 10 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} diff --git a/templates/common/on-prem/units/afterburn-hostname.service.yaml b/templates/common/openstack/units/afterburn-hostname.service.yaml similarity index 100% rename from templates/common/on-prem/units/afterburn-hostname.service.yaml rename to templates/common/openstack/units/afterburn-hostname.service.yaml diff --git a/templates/common/openstack/units/nodeip-configuration.service.yaml b/templates/common/openstack/units/nodeip-configuration.service.yaml new file mode 100644 index 0000000000..be2b92b0f4 --- /dev/null +++ b/templates/common/openstack/units/nodeip-configuration.service.yaml @@ -0,0 +1,47 @@ +name: nodeip-configuration.service +enabled: true +contents: | + [Unit] + Description=Writes IP address configuration so that kubelet and crio services select a valid node IP + # This only applies to VIP managing environments where the kubelet and crio IP + # address picking logic is flawed and may end up selecting an address from a + # different subnet or a deprecated address + Wants=network-online.target + After=network-online.target ignition-firstboot-complete.service + Before=kubelet.service crio.service + + [Service] + # Need oneshot to delay kubelet + Type=oneshot + # Would prefer to do Restart=on-failure instead of this bash retry loop, but + # the version of systemd we have right now doesn't support it. It should be + # available in systemd v244 and higher. + ExecStart=/bin/bash -c " \ + until \ + /usr/bin/podman run --rm \ + --authfile /var/lib/kubelet/config.json \ + --volume /etc/systemd/system:/etc/systemd/system:z \ + --net=host \ + {{ .Images.baremetalRuntimeCfgImage }} \ + node-ip \ + set --retry-on-failure \ + {{.Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP }}; \ + do \ + sleep 5; \ + done" + + {{if .Proxy -}} + {{if .Proxy.HTTPProxy -}} + Environment=HTTP_PROXY={{.Proxy.HTTPProxy}} + {{end -}} + {{if .Proxy.HTTPSProxy -}} + Environment=HTTPS_PROXY={{.Proxy.HTTPSProxy}} + {{end -}} + {{if .Proxy.NoProxy -}} + Environment=NO_PROXY={{.Proxy.NoProxy}} + {{end -}} + {{end -}} + + [Install] + WantedBy=multi-user.target + diff --git a/templates/common/ovirt/files/NetworkManager-resolv-prepender.yaml b/templates/common/ovirt/files/NetworkManager-resolv-prepender.yaml new file mode 100644 index 0000000000..1e56c333c8 --- /dev/null +++ b/templates/common/ovirt/files/NetworkManager-resolv-prepender.yaml @@ -0,0 +1,53 @@ +filesystem: "root" +mode: 0755 +path: "/etc/NetworkManager/dispatcher.d/30-resolv-prepender" +contents: + inline: | + #!/bin/bash + set -eo pipefail + IFACE=$1 + STATUS=$2 + + {{if .Proxy -}} + {{if .Proxy.HTTPProxy -}} + HTTP_PROXY={{.Proxy.HTTPProxy}} + {{end -}} + {{if .Proxy.HTTPSProxy -}} + HTTPS_PROXY={{.Proxy.HTTPSProxy}} + {{end -}} + {{if .Proxy.NoProxy -}} + NO_PROXY={{.Proxy.NoProxy}} + {{end -}} + {{end -}} + + case "$STATUS" in + up|down|dhcp4-change|dhcp6-change) + logger -s "NM resolv-prepender triggered by ${1} ${2}." + + # Ensure resolv.conf exists before we try to run podman + if [[ ! -e /etc/resolv.conf ]] || ! grep -q nameserver /etc/resolv.conf; then + cp /var/run/NetworkManager/resolv.conf /etc/resolv.conf + fi + + NAMESERVER_IP=$(/usr/bin/podman run --rm \ + --authfile /var/lib/kubelet/config.json \ + --net=host \ + {{ .Images.baremetalRuntimeCfgImage }} \ + node-ip \ + show \ + "{{.Infra.Status.PlatformStatus.Ovirt.APIServerInternalIP}}" \ + "{{.Infra.Status.PlatformStatus.Ovirt.IngressIP}}") + DOMAIN="{{.DNS.Spec.BaseDomain}}" + if [[ -n "$NAMESERVER_IP" ]]; then + logger -s "NM resolv-prepender: Prepending 'nameserver $NAMESERVER_IP' to /etc/resolv.conf (other nameservers from /var/run/NetworkManager/resolv.conf)" + sed -e "/^search/d" \ + -e "/Generated by/c# Generated by Ovirt resolv prepender NM dispatcher script\nsearch $DOMAIN\nnameserver $NAMESERVER_IP" \ + /var/run/NetworkManager/resolv.conf > /etc/resolv.tmp + fi + # Only leave the first 3 nameservers in /etc/resolv.conf + sed -i ':a $!{N; ba}; s/\(^\|\n\)nameserver/\n# nameserver/4g' /etc/resolv.tmp + mv -f /etc/resolv.tmp /etc/resolv.conf + ;; + *) + ;; + esac diff --git a/templates/common/ovirt/files/ovirt-NetworkManager-kni-conf.yaml b/templates/common/ovirt/files/ovirt-NetworkManager-kni-conf.yaml new file mode 100644 index 0000000000..5a40b885a8 --- /dev/null +++ b/templates/common/ovirt/files/ovirt-NetworkManager-kni-conf.yaml @@ -0,0 +1,9 @@ +mode: 0644 +path: "/etc/NetworkManager/conf.d/99-kni.conf" +contents: + inline: | + [main] + rc-manager=unmanaged + [connection] + ipv6.dhcp-duid=ll + ipv6.dhcp-iaid=mac diff --git a/templates/common/ovirt/files/coredns-corefile.yaml b/templates/common/ovirt/files/ovirt-coredns-corefile.yaml similarity index 100% rename from templates/common/ovirt/files/coredns-corefile.yaml rename to templates/common/ovirt/files/ovirt-coredns-corefile.yaml diff --git a/templates/common/ovirt/files/ovirt-coredns-db.yaml b/templates/common/ovirt/files/ovirt-coredns-db.yaml index c06e20bbad..e5dff59df8 100644 --- a/templates/common/ovirt/files/ovirt-coredns-db.yaml +++ b/templates/common/ovirt/files/ovirt-coredns-db.yaml @@ -10,7 +10,7 @@ contents: 1209600 ; expire (2 weeks) 3600 ; minimum (1 hour) ) - api-int IN A {{ onPremPlatformAPIServerInternalIP . }} - api IN A {{ onPremPlatformAPIServerInternalIP . }} + api-int IN A {{ .Infra.Status.PlatformStatus.Ovirt.APIServerInternalIP }} + api IN A {{ .Infra.Status.PlatformStatus.Ovirt.APIServerInternalIP }} - *.apps IN A {{ onPremPlatformIngressIP . }} + *.apps IN A {{ .Infra.Status.PlatformStatus.Ovirt.IngressIP }} diff --git a/templates/common/ovirt/files/ovirt-coredns.yaml b/templates/common/ovirt/files/ovirt-coredns.yaml new file mode 100644 index 0000000000..79812743ea --- /dev/null +++ b/templates/common/ovirt/files/ovirt-coredns.yaml @@ -0,0 +1,88 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/coredns.yaml" +contents: + inline: | + kind: Pod + apiVersion: v1 + metadata: + name: coredns + namespace: openshift-ovirt-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: ovirt-infra-mdns + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/coredns" + - name: kubeconfig + hostPath: + path: "/etc/kubernetes/kubeconfig" + - name: conf-dir + hostPath: + path: "/etc/coredns" + initContainers: + - name: render-config-coredns + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - runtimecfg + - render + - "/etc/kubernetes/kubeconfig" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.Ovirt.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.Ovirt.IngressIP }}" + - "/config" + - "--out-dir" + - "/etc/coredns" + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubernetes/kubeconfig" + - name: resource-dir + mountPath: "/config" + - name: conf-dir + mountPath: "/etc/coredns" + imagePullPolicy: IfNotPresent + containers: + - name: coredns + securityContext: + privileged: true + image: {{.Images.corednsImage}} + args: + - "--conf" + - "/etc/coredns/Corefile" + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/coredns" + readinessProbe: + httpGet: + path: /health + port: 18080 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: /health + port: 18080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} diff --git a/templates/common/ovirt/files/ovirt-keepalived.yaml b/templates/common/ovirt/files/ovirt-keepalived.yaml new file mode 100644 index 0000000000..5464488473 --- /dev/null +++ b/templates/common/ovirt/files/ovirt-keepalived.yaml @@ -0,0 +1,115 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/keepalived.yaml" +contents: + inline: | + kind: Pod + apiVersion: v1 + metadata: + name: keepalived + namespace: openshift-ovirt-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: ovirt-infra-vrrp + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/keepalived" + - name: kubeconfig + hostPath: + path: "/etc/kubernetes/kubeconfig" + - name: conf-dir + hostPath: + path: "/etc/keepalived" + - name: run-dir + empty-dir: {} + containers: + - name: keepalived + securityContext: + privileged: true + image: {{.Images.keepalivedImage}} + env: + - name: NSS_SDB_USE_CACHE + value: "no" + command: + - /bin/bash + - -c + - | + #/bin/bash + reload_keepalived() + { + if pid=$(pgrep -o keepalived); then + kill -s SIGHUP "$pid" + else + /usr/sbin/keepalived -f /etc/keepalived/keepalived.conf --dont-fork --vrrp --log-detail --log-console & + fi + } + + msg_handler() + { + while read -r line; do + echo "The client sent: $line" >&2 + # currently only 'reload' msg is supported + if [ "$line" = reload ]; then + reload_keepalived + fi + done + } + + set -ex + declare -r keepalived_sock="/var/run/keepalived/keepalived.sock" + export -f msg_handler + export -f reload_keepalived + if [ -s "/etc/keepalived/keepalived.conf" ]; then + /usr/sbin/keepalived -f /etc/keepalived/keepalived.conf --dont-fork --vrrp --log-detail --log-console & + fi + + rm -f "$keepalived_sock" + socat UNIX-LISTEN:${keepalived_sock},fork system:'bash -c msg_handler' + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/keepalived" + - name: run-dir + mountPath: "/var/run/keepalived" + livenessProbe: + exec: + command: + - /bin/sh + - -c + - | + [[ -s /etc/keepalived/keepalived.conf ]] || \ + kill -s SIGUSR1 "$(pgrep -o keepalived)" && ! grep -q "State = FAULT" /tmp/keepalived.data + initialDelaySeconds: 10 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + - name: keepalived-monitor + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - dynkeepalived + - "/etc/kubernetes/kubeconfig" + - "/config/keepalived.conf.tmpl" + - "/etc/keepalived/keepalived.conf" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.Ovirt.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.Ovirt.IngressIP }}" + volumeMounts: + - name: resource-dir + mountPath: "/config" + - name: kubeconfig + mountPath: "/etc/kubernetes/kubeconfig" + - name: conf-dir + mountPath: "/etc/keepalived" + - name: run-dir + mountPath: "/var/run/keepalived" + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} diff --git a/templates/common/ovirt/files/ovirt-mdns-publisher.yaml b/templates/common/ovirt/files/ovirt-mdns-publisher.yaml new file mode 100644 index 0000000000..8a60d00ec7 --- /dev/null +++ b/templates/common/ovirt/files/ovirt-mdns-publisher.yaml @@ -0,0 +1,72 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/mdns-publisher.yaml" +contents: + inline: | + kind: Pod + apiVersion: v1 + metadata: + name: mdns-publisher + namespace: openshift-ovirt-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: ovirt-infra-mdns + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/mdns" + - name: kubeconfig + hostPath: + path: "/etc/kubernetes/kubeconfig" + - name: conf-dir + hostPath: + path: "/etc/mdns" + initContainers: + - name: render-config-mdns-publisher + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - runtimecfg + - render + - "/etc/kubernetes/kubeconfig" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.Ovirt.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.Ovirt.IngressIP }}" + - "/config" + - "--out-dir" + - "/etc/mdns" + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubernetes/kubeconfig" + - name: resource-dir + mountPath: "/config" + - name: conf-dir + mountPath: "/etc/mdns" + imagePullPolicy: IfNotPresent + containers: + - name: mdns-publisher + image: {{.Images.mdnsPublisherImage}} + args: + - "--debug" + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/mdns" + livenessProbe: + exec: + command: + - pgrep + - mdns-publisher + initialDelaySeconds: 10 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} diff --git a/templates/common/vsphere/files/NetworkManager-mdns-hostname.yaml b/templates/common/vsphere/files/NetworkManager-mdns-hostname.yaml new file mode 100644 index 0000000000..3fd33389e9 --- /dev/null +++ b/templates/common/vsphere/files/NetworkManager-mdns-hostname.yaml @@ -0,0 +1,31 @@ +mode: 0755 +path: "/etc/NetworkManager/dispatcher.d/40-mdns-hostname" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + #!/bin/bash + STATUS=$2 + case "$STATUS" in + up|down|dhcp4-change|dhcp6-change|hostname) + logger -s "NM mdns-hostname triggered by ${2}." + set +e + t_hostname=$(hostname) + if [ -z "${t_hostname}" ]; then + t_hostname="localhost" + fi + mkdir -p /etc/mdns + echo "${t_hostname}">/etc/mdns/hostname + logger -s "Hostname changed: ${t_hostname}" + ;; + *) + ;; + esac + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/common/vsphere/files/NetworkManager-resolv-prepender.yaml b/templates/common/vsphere/files/NetworkManager-resolv-prepender.yaml new file mode 100644 index 0000000000..b14e8c6a0a --- /dev/null +++ b/templates/common/vsphere/files/NetworkManager-resolv-prepender.yaml @@ -0,0 +1,64 @@ +mode: 0755 +path: "/etc/NetworkManager/dispatcher.d/30-resolv-prepender" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + #!/bin/bash + set -eo pipefail + IFACE=$1 + STATUS=$2 + + {{if .Proxy -}} + {{if .Proxy.HTTPProxy -}} + HTTP_PROXY={{.Proxy.HTTPProxy}} + {{end -}} + {{if .Proxy.HTTPSProxy -}} + HTTPS_PROXY={{.Proxy.HTTPSProxy}} + {{end -}} + {{if .Proxy.NoProxy -}} + NO_PROXY={{.Proxy.NoProxy}} + {{end -}} + {{end -}} + + # If $DHCP6_FQDN_FQDN is not empty and is not localhost.localdomain + [[ -n "$DHCP6_FQDN_FQDN" && "$DHCP6_FQDN_FQDN" != "localhost.localdomain" && "$DHCP6_FQDN_FQDN" =~ "." ]] && hostnamectl set-hostname --static --transient $DHCP6_FQDN_FQDN + case "$STATUS" in + up|down|dhcp4-change|dhcp6-change) + logger -s "NM resolv-prepender triggered by ${1} ${2}." + + # Ensure resolv.conf exists before we try to run podman + if [[ ! -e /etc/resolv.conf ]] || ! grep -q nameserver /etc/resolv.conf; then + cp /var/run/NetworkManager/resolv.conf /etc/resolv.conf + fi + + NAMESERVER_IP=$(/usr/bin/podman run --rm \ + --authfile /var/lib/kubelet/config.json \ + --net=host \ + {{ .Images.baremetalRuntimeCfgImage }} \ + node-ip \ + show \ + "{{.Infra.Status.PlatformStatus.VSphere.APIServerInternalIP}}" \ + "{{.Infra.Status.PlatformStatus.VSphere.IngressIP}}") + DOMAIN="{{.DNS.Spec.BaseDomain}}" + if [[ -n "$NAMESERVER_IP" ]]; then + logger -s "NM resolv-prepender: Prepending 'nameserver $NAMESERVER_IP' to /etc/resolv.conf (other nameservers from /var/run/NetworkManager/resolv.conf)" + sed -e "/^search/d" \ + -e "/Generated by/c# Generated by KNI resolv prepender NM dispatcher script\nsearch $DOMAIN\nnameserver $NAMESERVER_IP" \ + /var/run/NetworkManager/resolv.conf > /etc/resolv.tmp + fi + # Only leave the first 3 nameservers in /etc/resolv.conf + sed -i ':a $!{N; ba}; s/\(^\|\n\)nameserver/\n# nameserver/4g' /etc/resolv.tmp + mv -f /etc/resolv.tmp /etc/resolv.conf + ;; + *) + ;; + esac + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/common/vsphere/files/coredns-corefile.yaml b/templates/common/vsphere/files/coredns-corefile.yaml deleted file mode 100644 index 7c9f8ad2a8..0000000000 --- a/templates/common/vsphere/files/coredns-corefile.yaml +++ /dev/null @@ -1,24 +0,0 @@ -mode: 0644 -path: "/etc/kubernetes/static-pod-resources/coredns/Corefile.tmpl" -contents: - inline: | - {{ if (onPremPlatformAPIServerInternalIP .) -}} - . { - errors - health :18080 - mdns {{ .DNS.Spec.BaseDomain }} 0 {{`{{.Cluster.Name}}`}} {{`{{.NonVirtualIP}}`}} - forward . {{`{{- range $upstream := .DNSUpstreams}} {{$upstream}}{{- end}}`}} - cache 30 - reload - hosts { - {{ onPremPlatformAPIServerInternalIP . }} api-int.{{ .DNS.Spec.BaseDomain }} - {{ onPremPlatformAPIServerInternalIP . }} api.{{ .DNS.Spec.BaseDomain }} - fallthrough - } - template IN A {{ .DNS.Spec.BaseDomain }} { - match .*.apps.{{ .DNS.Spec.BaseDomain }} - answer "{{`{{"{{ .Name }}"}}`}} 60 in a {{ onPremPlatformIngressIP . }}" - fallthrough - } - } - {{ end -}} diff --git a/templates/common/vsphere/files/vsphere-NetworkManager-kni-conf.yaml b/templates/common/vsphere/files/vsphere-NetworkManager-kni-conf.yaml new file mode 100644 index 0000000000..78dd1be349 --- /dev/null +++ b/templates/common/vsphere/files/vsphere-NetworkManager-kni-conf.yaml @@ -0,0 +1,19 @@ +mode: 0644 +path: "/etc/NetworkManager/conf.d/99-kni.conf" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + [main] + rc-manager=unmanaged + [connection] + ipv6.dhcp-duid=ll + ipv6.dhcp-iaid=mac + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/common/vsphere/files/vsphere-coredns-corefile.yaml b/templates/common/vsphere/files/vsphere-coredns-corefile.yaml new file mode 100644 index 0000000000..c628002cf7 --- /dev/null +++ b/templates/common/vsphere/files/vsphere-coredns-corefile.yaml @@ -0,0 +1,32 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/coredns/Corefile.tmpl" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + . { + errors + health :18080 + mdns {{ .DNS.Spec.BaseDomain }} 0 {{`{{.Cluster.Name}}`}} {{`{{.NonVirtualIP}}`}} + forward . {{`{{- range $upstream := .DNSUpstreams}} {{$upstream}}{{- end}}`}} + cache 30 + reload + hosts { + {{ .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP }} api-int.{{ .DNS.Spec.BaseDomain }} + {{ .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP }} api.{{ .DNS.Spec.BaseDomain }} + fallthrough + } + template IN A {{ .DNS.Spec.BaseDomain }} { + match .*.apps.{{ .DNS.Spec.BaseDomain }} + answer "{{`{{"{{ .Name }}"}}`}} 60 in a {{ .Infra.Status.PlatformStatus.VSphere.IngressIP }}" + fallthrough + } + } + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/common/vsphere/files/vsphere-coredns.yaml b/templates/common/vsphere/files/vsphere-coredns.yaml new file mode 100644 index 0000000000..6b519af3c9 --- /dev/null +++ b/templates/common/vsphere/files/vsphere-coredns.yaml @@ -0,0 +1,128 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/coredns.yaml" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + kind: Pod + apiVersion: v1 + metadata: + name: coredns + namespace: openshift-vsphere-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: vsphere-infra-mdns + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/coredns" + - name: kubeconfig + hostPath: + path: "/etc/kubernetes/kubeconfig" + - name: conf-dir + hostPath: + path: "/etc/coredns" + - name: nm-resolv + hostPath: + path: "/var/run/NetworkManager" + initContainers: + - name: render-config-coredns + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - runtimecfg + - render + - "/etc/kubernetes/kubeconfig" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.IngressIP }}" + - "/config" + - "--out-dir" + - "/etc/coredns" + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubernetes/kubeconfig" + - name: resource-dir + mountPath: "/config" + - name: conf-dir + mountPath: "/etc/coredns" + imagePullPolicy: IfNotPresent + containers: + - name: coredns + securityContext: + privileged: true + image: {{.Images.corednsImage}} + args: + - "--conf" + - "/etc/coredns/Corefile" + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/coredns" + readinessProbe: + httpGet: + path: /health + port: 18080 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: /health + port: 18080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + - name: coredns-monitor + securityContext: + privileged: true + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - corednsmonitor + - "/etc/kubernetes/kubeconfig" + - "/config/Corefile.tmpl" + - "/etc/coredns/Corefile" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.IngressIP }}" + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubernetes/kubeconfig" + - name: resource-dir + mountPath: "/config" + - name: conf-dir + mountPath: "/etc/coredns" + - name: nm-resolv + mountPath: "/var/run/NetworkManager" + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/common/vsphere/files/vsphere-keepalived.yaml b/templates/common/vsphere/files/vsphere-keepalived.yaml new file mode 100644 index 0000000000..077e0b70e2 --- /dev/null +++ b/templates/common/vsphere/files/vsphere-keepalived.yaml @@ -0,0 +1,169 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/keepalived.yaml" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + kind: Pod + apiVersion: v1 + metadata: + name: keepalived + namespace: openshift-vsphere-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: vsphere-infra-vrrp + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/keepalived" + - name: script-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/keepalived/scripts" + - name: kubeconfig + hostPath: + path: "/etc/kubernetes" + - name: kubeconfigvarlib + hostPath: + path: "/var/lib/kubelet" + - name: conf-dir + hostPath: + path: "/etc/keepalived" + - name: run-dir + empty-dir: {} + - name: chroot-host + hostPath: + path: "/" + initContainers: + - name: render-config-keepalived + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - runtimecfg + - render + - "/etc/kubernetes/kubeconfig" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.IngressIP }}" + - "/config" + - "--out-dir" + - "/etc/keepalived" + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubernetes" + - name: script-dir + mountPath: "/config" + - name: conf-dir + mountPath: "/etc/keepalived" + imagePullPolicy: IfNotPresent + containers: + - name: keepalived + securityContext: + privileged: true + image: {{.Images.keepalivedImage}} + env: + - name: NSS_SDB_USE_CACHE + value: "no" + command: + - /bin/bash + - -c + - | + #/bin/bash + reload_keepalived() + { + if pid=$(pgrep -o keepalived); then + kill -s SIGHUP "$pid" + else + /usr/sbin/keepalived -f /etc/keepalived/keepalived.conf --dont-fork --vrrp --log-detail --log-console & + fi + } + + msg_handler() + { + while read -r line; do + echo "The client sent: $line" >&2 + # currently only 'reload' msg is supported + if [ "$line" = reload ]; then + reload_keepalived + fi + done + } + + set -ex + declare -r keepalived_sock="/var/run/keepalived/keepalived.sock" + export -f msg_handler + export -f reload_keepalived + if [ -s "/etc/keepalived/keepalived.conf" ]; then + /usr/sbin/keepalived -f /etc/keepalived/keepalived.conf --dont-fork --vrrp --log-detail --log-console & + fi + + rm -f "$keepalived_sock" + socat UNIX-LISTEN:${keepalived_sock},fork system:'bash -c msg_handler' + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/keepalived" + - name: run-dir + mountPath: "/var/run/keepalived" + livenessProbe: + exec: + command: + - /bin/bash + - -c + - | + kill -s SIGUSR1 "$(pgrep -o keepalived)" && ! grep -q "State = FAULT" /tmp/keepalived.data + initialDelaySeconds: 20 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + - name: keepalived-monitor + securityContext: + privileged: true + image: {{ .Images.baremetalRuntimeCfgImage }} + env: + - name: ENABLE_UNICAST + value: "no" + - name: IS_BOOTSTRAP + value: "no" + command: + - dynkeepalived + - "/var/lib/kubelet/kubeconfig" + - "/config/keepalived.conf.tmpl" + - "/etc/keepalived/keepalived.conf" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.IngressIP }}" + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: resource-dir + mountPath: "/config" + - name: kubeconfigvarlib + mountPath: "/var/lib/kubelet" + - name: conf-dir + mountPath: "/etc/keepalived" + - name: run-dir + mountPath: "/var/run/keepalived" + - name: chroot-host + mountPath: "/host" + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/common/vsphere/files/vsphere-mdns-publisher.yaml b/templates/common/vsphere/files/vsphere-mdns-publisher.yaml new file mode 100644 index 0000000000..11e0855c63 --- /dev/null +++ b/templates/common/vsphere/files/vsphere-mdns-publisher.yaml @@ -0,0 +1,113 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/mdns-publisher.yaml" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + kind: Pod + apiVersion: v1 + metadata: + name: mdns-publisher + namespace: openshift-vsphere-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: vsphere-infra-mdns + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/mdns" + - name: kubeconfig + hostPath: + path: "/etc/kubernetes/kubeconfig" + - name: conf-dir + hostPath: + path: "/etc/mdns" + initContainers: + - name: verify-hostname + image: {{ .Images.baremetalRuntimeCfgImage }} + env: + - name: RUNTIMECFG_HOSTNAME_PATH + value: "/etc/mdns/hostname" + command: + - "/bin/bash" + - "-c" + - | + #!/bin/bash + set -xv + function get_hostname() + { + if [[ -s $RUNTIMECFG_HOSTNAME_PATH ]]; then + cat $RUNTIMECFG_HOSTNAME_PATH + else + # if hostname wasn't updated by NM script, read hostname + hostname + fi + } + while [[ "$(get_hostname)" =~ ^localhost(.localdomain)?$ ]]; do + echo "hostname is still set to a default value" + sleep 1 + done + volumeMounts: + - name: conf-dir + mountPath: "/etc/mdns" + - name: render-config-mdns-publisher + image: {{ .Images.baremetalRuntimeCfgImage }} + env: + - name: RUNTIMECFG_HOSTNAME_PATH + value: "/etc/mdns/hostname" + command: + - runtimecfg + - render + - "/etc/kubernetes/kubeconfig" + - "--api-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP }}" + - "--ingress-vip" + - "{{ .Infra.Status.PlatformStatus.VSphere.IngressIP }}" + - "/config" + - "--out-dir" + - "/etc/mdns" + - "--verbose" + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/kubernetes/kubeconfig" + - name: resource-dir + mountPath: "/config" + - name: conf-dir + mountPath: "/etc/mdns" + imagePullPolicy: IfNotPresent + containers: + - name: mdns-publisher + image: {{.Images.mdnsPublisherImage}} + args: + - "--debug" + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/mdns" + livenessProbe: + exec: + command: + - pgrep + - mdns-publisher + initialDelaySeconds: 10 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/common/vsphere/units/nodeip-configuration.service.yaml b/templates/common/vsphere/units/nodeip-configuration.service.yaml new file mode 100644 index 0000000000..b11dc42a29 --- /dev/null +++ b/templates/common/vsphere/units/nodeip-configuration.service.yaml @@ -0,0 +1,57 @@ +name: nodeip-configuration.service +enabled: true +contents: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + [Unit] + Description=Writes IP address configuration so that kubelet and crio services select a valid node IP + # This only applies to VIP managing environments where the kubelet and crio IP + # address picking logic is flawed and may end up selecting an address from a + # different subnet or a deprecated address + Wants=network-online.target + After=network-online.target ignition-firstboot-complete.service + Before=kubelet.service crio.service + + [Service] + # Need oneshot to delay kubelet + Type=oneshot + # Would prefer to do Restart=on-failure instead of this bash retry loop, but + # the version of systemd we have right now doesn't support it. It should be + # available in systemd v244 and higher. + ExecStart=/bin/bash -c " \ + until \ + /usr/bin/podman run --rm \ + --authfile /var/lib/kubelet/config.json \ + --net=host \ + --volume /etc/systemd/system:/etc/systemd/system:z \ + {{ .Images.baremetalRuntimeCfgImage }} \ + node-ip \ + set --retry-on-failure \ + {{.Infra.Status.PlatformStatus.VSphere.APIServerInternalIP }}; \ + do \ + sleep 5; \ + done" + ExecStart=/bin/systemctl daemon-reload + + {{if .Proxy -}} + {{if .Proxy.HTTPProxy -}} + Environment=HTTP_PROXY={{.Proxy.HTTPProxy}} + {{end -}} + {{if .Proxy.HTTPSProxy -}} + Environment=HTTPS_PROXY={{.Proxy.HTTPSProxy}} + {{end -}} + {{if .Proxy.NoProxy -}} + Environment=NO_PROXY={{.Proxy.NoProxy}} + {{end -}} + {{end -}} + + [Install] + WantedBy=multi-user.target + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/master/00-master/on-prem/files/haproxy-haproxy.yaml b/templates/master/00-master/baremetal/files/baremetal-haproxy-haproxy.yaml similarity index 100% rename from templates/master/00-master/on-prem/files/haproxy-haproxy.yaml rename to templates/master/00-master/baremetal/files/baremetal-haproxy-haproxy.yaml diff --git a/templates/master/00-master/on-prem/files/haproxy.yaml b/templates/master/00-master/baremetal/files/baremetal-haproxy.yaml similarity index 95% rename from templates/master/00-master/on-prem/files/haproxy.yaml rename to templates/master/00-master/baremetal/files/baremetal-haproxy.yaml index e24c26738f..f5d8bb05af 100644 --- a/templates/master/00-master/on-prem/files/haproxy.yaml +++ b/templates/master/00-master/baremetal/files/baremetal-haproxy.yaml @@ -6,11 +6,11 @@ contents: apiVersion: v1 metadata: name: haproxy - namespace: openshift-{{ onPremPlatformShortName . }}-infra + namespace: openshift-kni-infra creationTimestamp: deletionGracePeriodSeconds: 65 labels: - app: {{ onPremPlatformShortName . }}-infra-api-lb + app: kni-infra-api-lb spec: volumes: - name: resource-dir @@ -113,7 +113,7 @@ contents: - "-c" - | cp /host/etc/resolv.conf /etc/resolv.conf - monitor /var/lib/kubelet/kubeconfig /config/haproxy.cfg.tmpl /etc/haproxy/haproxy.cfg --api-vip {{ onPremPlatformAPIServerInternalIP . }} + monitor /var/lib/kubelet/kubeconfig /config/haproxy.cfg.tmpl /etc/haproxy/haproxy.cfg --api-vip {{ .Infra.Status.PlatformStatus.BareMetal.APIServerInternalIP }} resources: requests: cpu: 100m diff --git a/templates/master/00-master/on-prem/files/keepalived-keepalived.yaml b/templates/master/00-master/baremetal/files/baremetal-keepalived-keepalived.yaml similarity index 100% rename from templates/master/00-master/on-prem/files/keepalived-keepalived.yaml rename to templates/master/00-master/baremetal/files/baremetal-keepalived-keepalived.yaml diff --git a/templates/master/00-master/on-prem/files/keepalived-script-both.yaml b/templates/master/00-master/baremetal/files/baremetal-keepalived-script-both.yaml similarity index 100% rename from templates/master/00-master/on-prem/files/keepalived-script-both.yaml rename to templates/master/00-master/baremetal/files/baremetal-keepalived-script-both.yaml diff --git a/templates/master/00-master/on-prem/files/keepalived-script.yaml b/templates/master/00-master/baremetal/files/baremetal-keepalived-script.yaml similarity index 100% rename from templates/master/00-master/on-prem/files/keepalived-script.yaml rename to templates/master/00-master/baremetal/files/baremetal-keepalived-script.yaml diff --git a/templates/master/00-master/on-prem/files/mdns-config.yaml b/templates/master/00-master/baremetal/files/baremetal-mdns-config.yaml similarity index 100% rename from templates/master/00-master/on-prem/files/mdns-config.yaml rename to templates/master/00-master/baremetal/files/baremetal-mdns-config.yaml diff --git a/templates/master/00-master/openstack/files/openstack-haproxy-haproxy.yaml b/templates/master/00-master/openstack/files/openstack-haproxy-haproxy.yaml new file mode 100644 index 0000000000..0dc35d4b0d --- /dev/null +++ b/templates/master/00-master/openstack/files/openstack-haproxy-haproxy.yaml @@ -0,0 +1,39 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/haproxy/haproxy.cfg.tmpl" +contents: + inline: | + defaults + maxconn 20000 + mode tcp + log /var/run/haproxy/haproxy-log.sock local0 + option dontlognull + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 86400s + timeout server 86400s + timeout tunnel 86400s + frontend main + bind :::{{`{{ .LBConfig.LbPort }}`}} v4v6 + default_backend masters + listen health_check_http_url + bind :::50936 v4v6 + mode http + monitor-uri /haproxy_ready + option dontlognull + listen stats + bind localhost:{{`{{ .LBConfig.StatPort }}`}} + mode http + stats enable + stats hide-version + stats uri /haproxy_stats + stats refresh 30s + stats auth Username:Password + backend masters + option httpchk GET /readyz HTTP/1.0 + option log-health-checks + balance roundrobin + {{`{{- range .LBConfig.Backends }} + server {{ .Host }} {{ .Address }}:{{ .Port }} weight 1 verify none check check-ssl inter 1s fall 2 rise 3 + {{- end }}`}} diff --git a/templates/master/00-master/openstack/files/openstack-haproxy.yaml b/templates/master/00-master/openstack/files/openstack-haproxy.yaml new file mode 100644 index 0000000000..c28e20466f --- /dev/null +++ b/templates/master/00-master/openstack/files/openstack-haproxy.yaml @@ -0,0 +1,144 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/haproxy.yaml" +contents: + inline: | + kind: Pod + apiVersion: v1 + metadata: + name: haproxy + namespace: openshift-openstack-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: openstack-infra-api-lb + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/haproxy" + - name: kubeconfigvarlib + hostPath: + path: "/var/lib/kubelet" + - name: run-dir + empty-dir: {} + - name: conf-dir + hostPath: + path: "/etc/haproxy" + - name: chroot-host + hostPath: + path: "/" + containers: + - name: haproxy + image: {{.Images.haproxyImage}} + env: + - name: OLD_HAPROXY_PS_FORCE_DEL_TIMEOUT + value: "120" + command: + - "/bin/bash" + - "-c" + - | + #/bin/bash + verify_old_haproxy_ps_being_deleted() + { + local prev_pids + prev_pids="$1" + sleep $OLD_HAPROXY_PS_FORCE_DEL_TIMEOUT + cur_pids=$(pidof haproxy) + for val in $prev_pids; do + if [[ $cur_pids =~ (^|[[:space:]])"$val"($|[[:space:]]) ]] ; then + kill $val + fi + done + } + + reload_haproxy() + { + old_pids=$(pidof haproxy) + if [ -n "$old_pids" ]; then + /usr/sbin/haproxy -W -db -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/run/haproxy.pid -x /var/lib/haproxy/run/haproxy.sock -sf $old_pids & + #There seems to be some cases where HAProxy doesn't drain properly. + #To handle that case, SIGTERM signal being sent to old HAProxy processes which haven't terminated. + verify_old_haproxy_ps_being_deleted "$old_pids" & + else + /usr/sbin/haproxy -W -db -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/run/haproxy.pid & + fi + } + + msg_handler() + { + while read -r line; do + echo "The client send: $line" >&2 + # currently only 'reload' msg is supported + if [ "$line" = reload ]; then + reload_haproxy + fi + done + } + set -ex + declare -r haproxy_sock="/var/run/haproxy/haproxy-master.sock" + declare -r haproxy_log_sock="/var/run/haproxy/haproxy-log.sock" + export -f msg_handler + export -f reload_haproxy + export -f verify_old_haproxy_ps_being_deleted + rm -f "$haproxy_sock" "$haproxy_log_sock" + socat UNIX-RECV:${haproxy_log_sock} STDOUT & + if [ -s "/etc/haproxy/haproxy.cfg" ]; then + /usr/sbin/haproxy -W -db -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/run/haproxy.pid & + fi + socat UNIX-LISTEN:${haproxy_sock},fork system:'bash -c msg_handler' + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/haproxy" + - name: run-dir + mountPath: "/var/run/haproxy" + livenessProbe: + initialDelaySeconds: 50 + httpGet: + path: /haproxy_ready + port: 50936 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + - name: haproxy-monitor + securityContext: + privileged: true + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - "/bin/bash" + - "-c" + - | + cp /host/etc/resolv.conf /etc/resolv.conf + monitor /var/lib/kubelet/kubeconfig /config/haproxy.cfg.tmpl /etc/haproxy/haproxy.cfg --api-vip {{ .Infra.Status.PlatformStatus.OpenStack.APIServerInternalIP }} + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/haproxy" + - name: run-dir + mountPath: "/var/run/haproxy" + - name: resource-dir + mountPath: "/config" + - name: chroot-host + mountPath: "/host" + - name: kubeconfigvarlib + mountPath: "/var/lib/kubelet" + livenessProbe: + initialDelaySeconds: 10 + exec: + command: + - /bin/bash + - -c + - | + cmp /host/etc/resolv.conf /etc/resolv.conf + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} diff --git a/templates/master/00-master/openstack/files/openstack-keepalived-keepalived.yaml b/templates/master/00-master/openstack/files/openstack-keepalived-keepalived.yaml new file mode 100644 index 0000000000..e7a6034830 --- /dev/null +++ b/templates/master/00-master/openstack/files/openstack-keepalived-keepalived.yaml @@ -0,0 +1,80 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/keepalived/keepalived.conf.tmpl" +contents: + inline: | + global_defs { + enable_script_security + script_user root + } + + # These are separate checks to provide the following behavior: + # If the loadbalanced endpoint is responding then all is well regardless + # of what the local api status is. Both checks will return success and + # we'll have the maximum priority. This means as long as there is a node + # with a functional loadbalancer it will get the VIP. + # If all of the loadbalancers go down but the local api is still running, + # the _both check will still succeed and allow any node with a functional + # api to take the VIP. This isn't preferred because it means all api + # traffic will go through one node, but at least it keeps the api available. + vrrp_script chk_ocp_lb { + script "/usr/bin/timeout 1.9 /etc/keepalived/chk_ocp_script.sh" + interval 2 + weight 20 + rise 3 + fall 2 + } + + vrrp_script chk_ocp_both { + script "/usr/bin/timeout 1.9 /etc/keepalived/chk_ocp_script_both.sh" + interval 2 + # Use a smaller weight for this check so it won't trigger the move from + # bootstrap to master by itself. + weight 5 + rise 3 + fall 2 + } + + # TODO: Improve this check. The port is assumed to be alive. + # Need to assess what is the ramification if the port is not there. + vrrp_script chk_ingress { + script "/usr/bin/timeout 0.9 /usr/bin/curl -o /dev/null -Lfs http://localhost:1936/healthz/ready" + interval 1 + weight 50 + } + + vrrp_instance {{`{{ .Cluster.Name }}`}}_API { + state BACKUP + interface {{`{{ .VRRPInterface }}`}} + virtual_router_id {{`{{ .Cluster.APIVirtualRouterID }}`}} + priority 40 + advert_int 1 + authentication { + auth_type PASS + auth_pass {{`{{ .Cluster.Name }}`}}_api_vip + } + virtual_ipaddress { + {{`{{ .Cluster.APIVIP }}`}}/{{`{{ .Cluster.VIPNetmask }}`}} + } + track_script { + chk_ocp_lb + chk_ocp_both + } + } + + vrrp_instance {{`{{ .Cluster.Name }}`}}_INGRESS { + state BACKUP + interface {{`{{ .VRRPInterface }}`}} + virtual_router_id {{`{{ .Cluster.IngressVirtualRouterID }}`}} + priority 40 + advert_int 1 + authentication { + auth_type PASS + auth_pass {{`{{ .Cluster.Name }}`}}_ingress_vip + } + virtual_ipaddress { + {{`{{ .Cluster.IngressVIP }}`}}/{{`{{ .Cluster.VIPNetmask }}`}} + } + track_script { + chk_ingress + } + } diff --git a/templates/master/00-master/openstack/files/openstack-keepalived-script-both.yaml b/templates/master/00-master/openstack/files/openstack-keepalived-script-both.yaml new file mode 100644 index 0000000000..1b4e597711 --- /dev/null +++ b/templates/master/00-master/openstack/files/openstack-keepalived-script-both.yaml @@ -0,0 +1,6 @@ +mode: 0755 +path: "/etc/kubernetes/static-pod-resources/keepalived/scripts/chk_ocp_script_both.sh.tmpl" +contents: + inline: | + #!/bin/bash + /usr/bin/curl -o /dev/null -kLfs https://localhost:{{`{{ .LBConfig.LbPort }}`}}/readyz && [ -e /var/run/keepalived/iptables-rule-exists ] || /usr/bin/curl -kLfs https://localhost:{{`{{ .LBConfig.ApiPort }}`}}/readyz diff --git a/templates/master/00-master/openstack/files/openstack-keepalived-script.yaml b/templates/master/00-master/openstack/files/openstack-keepalived-script.yaml new file mode 100644 index 0000000000..c500a60d2f --- /dev/null +++ b/templates/master/00-master/openstack/files/openstack-keepalived-script.yaml @@ -0,0 +1,6 @@ +mode: 0755 +path: "/etc/kubernetes/static-pod-resources/keepalived/scripts/chk_ocp_script.sh.tmpl" +contents: + inline: | + #!/bin/bash + /usr/bin/curl -o /dev/null -kLfs https://localhost:{{`{{ .LBConfig.LbPort }}`}}/readyz && [ -e /var/run/keepalived/iptables-rule-exists ] diff --git a/templates/master/00-master/openstack/files/openstack-mdns-config.yaml b/templates/master/00-master/openstack/files/openstack-mdns-config.yaml new file mode 100644 index 0000000000..ad82c2e45d --- /dev/null +++ b/templates/master/00-master/openstack/files/openstack-mdns-config.yaml @@ -0,0 +1,14 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/mdns/config.hcl.tmpl" +contents: + inline: | + bind_address = "{{`{{ .NonVirtualIP }}`}}" + collision_avoidance = "hostname" + service { + name = "{{`{{ .Cluster.Name }}`}} Workstation" + host_name = "{{`{{ .ShortHostname }}`}}.local." + type = "_workstation._tcp" + domain = "local." + port = 42424 + ttl = 3200 + } diff --git a/templates/master/00-master/ovirt/files/ovirt-haproxy-haproxy.yaml b/templates/master/00-master/ovirt/files/ovirt-haproxy-haproxy.yaml new file mode 100644 index 0000000000..d8eefab317 --- /dev/null +++ b/templates/master/00-master/ovirt/files/ovirt-haproxy-haproxy.yaml @@ -0,0 +1,39 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/haproxy/haproxy.cfg.tmpl" +contents: + inline: | + defaults + maxconn 20000 + mode tcp + log /var/run/haproxy/haproxy-log.sock local0 + option dontlognull + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 86400s + timeout server 86400s + timeout tunnel 86400s + frontend main + bind :::{{`{{ .LBConfig.LbPort }}`}} v4v6 + default_backend masters + listen health_check_http_url + bind :::50936 v4v6 + mode http + monitor-uri /readyz + option dontlognull + listen stats + bind 127.0.0.1:{{`{{ .LBConfig.StatPort }}`}} + mode http + stats enable + stats hide-version + stats uri /haproxy_stats + stats refresh 30s + stats auth Username:Password + backend masters + option httpchk GET /readyz HTTP/1.0 + option log-health-checks + balance roundrobin + {{`{{- range .LBConfig.Backends }} + server {{ .Host }} {{ .Address }}:{{ .Port }} weight 1 verify none check check-ssl inter 3s fall 2 rise 3 + {{- end }}`}} diff --git a/templates/master/00-master/ovirt/files/ovirt-haproxy.yaml b/templates/master/00-master/ovirt/files/ovirt-haproxy.yaml new file mode 100644 index 0000000000..1d06ad28d0 --- /dev/null +++ b/templates/master/00-master/ovirt/files/ovirt-haproxy.yaml @@ -0,0 +1,124 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/haproxy.yaml" +contents: + inline: | + kind: Pod + apiVersion: v1 + metadata: + name: haproxy + namespace: openshift-ovirt-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: ovirt-infra-api-lb + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/haproxy" + - name: kubeconfigvarlib + hostPath: + path: "/var/lib/kubelet" + - name: run-dir + empty-dir: {} + - name: conf-dir + hostPath: + path: "/etc/haproxy" + - name: chroot-host + hostPath: + path: "/" + containers: + - name: haproxy + image: {{.Images.haproxyImage}} + command: + - "/bin/bash" + - "-c" + - | + #/bin/bash + reload_haproxy() + { + old_pids=$(pidof haproxy) + if [ -n "$old_pids" ]; then + /usr/sbin/haproxy -W -db -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/run/haproxy.pid -x /var/lib/haproxy/run/haproxy.sock -sf $old_pids & + else + /usr/sbin/haproxy -W -db -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/run/haproxy.pid & + fi + } + + msg_handler() + { + while read -r line; do + echo "The client send: $line" >&2 + # currently only 'reload' msg is supported + if [ "$line" = reload ]; then + reload_haproxy + fi + done + } + set -ex + declare -r haproxy_sock="/var/run/haproxy/haproxy-master.sock" + declare -r haproxy_log_sock="/var/run/haproxy/haproxy-log.sock" + export -f msg_handler + export -f reload_haproxy + rm -f "$haproxy_sock" "$haproxy_log_sock" + socat UNIX-RECV:${haproxy_log_sock} STDOUT & + if [ -s "/etc/haproxy/haproxy.cfg" ]; then + /usr/sbin/haproxy -W -db -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/run/haproxy.pid & + fi + socat UNIX-LISTEN:${haproxy_sock},fork system:'bash -c msg_handler' + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/haproxy" + - name: run-dir + mountPath: "/var/run/haproxy" + livenessProbe: + initialDelaySeconds: 50 + httpGet: + path: /readyz + port: 50936 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + - name: haproxy-monitor + securityContext: + privileged: true + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - "/bin/bash" + - "-c" + - | + cp /host/etc/resolv.conf /etc/resolv.conf + monitor /var/lib/kubelet/kubeconfig /config/haproxy.cfg.tmpl /etc/haproxy/haproxy.cfg --api-vip {{ .Infra.Status.PlatformStatus.Ovirt.APIServerInternalIP }} + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/haproxy" + - name: run-dir + mountPath: "/var/run/haproxy" + - name: resource-dir + mountPath: "/config" + - name: chroot-host + mountPath: "/host" + - name: kubeconfigvarlib + mountPath: "/var/lib/kubelet" + livenessProbe: + initialDelaySeconds: 10 + exec: + command: + - /bin/bash + - -c + - | + cmp /host/etc/resolv.conf /etc/resolv.conf + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} diff --git a/templates/master/00-master/ovirt/files/ovirt-keepalived-keepalived.yaml b/templates/master/00-master/ovirt/files/ovirt-keepalived-keepalived.yaml new file mode 100644 index 0000000000..e879638f98 --- /dev/null +++ b/templates/master/00-master/ovirt/files/ovirt-keepalived-keepalived.yaml @@ -0,0 +1,58 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/keepalived/keepalived.conf.tmpl" +contents: + inline: | + global_defs { + enable_script_security + script_user root + } + + vrrp_script chk_ocp { + script "/usr/bin/timeout 0.9 /etc/keepalived/chk_ocp_script.sh" + interval 1 + weight 50 + } + + # TODO: Improve this check. The port is assumed to be alive. + # Need to assess what is the ramification if the port is not there. + vrrp_script chk_ingress { + script "/usr/bin/timeout 0.9 /usr/bin/curl -o /dev/null -Lfs http://localhost:1936/healthz/ready" + interval 1 + weight 50 + } + + vrrp_instance {{`{{ .Cluster.Name }}`}}_API { + state BACKUP + interface {{`{{ .VRRPInterface }}`}} + virtual_router_id {{`{{ .Cluster.APIVirtualRouterID }}`}} + priority 40 + advert_int 1 + authentication { + auth_type PASS + auth_pass {{`{{ .Cluster.Name }}`}}_api_vip + } + virtual_ipaddress { + {{`{{ .Cluster.APIVIP }}`}}/{{`{{ .Cluster.VIPNetmask }}`}} + } + track_script { + chk_ocp + } + } + + vrrp_instance {{`{{ .Cluster.Name }}`}}_INGRESS { + state BACKUP + interface {{`{{ .VRRPInterface }}`}} + virtual_router_id {{`{{ .Cluster.IngressVirtualRouterID }}`}} + priority 40 + advert_int 1 + authentication { + auth_type PASS + auth_pass {{`{{ .Cluster.Name }}`}}_ingress_vip + } + virtual_ipaddress { + {{`{{ .Cluster.IngressVIP }}`}}/{{`{{ .Cluster.VIPNetmask }}`}} + } + track_script { + chk_ingress + } + } diff --git a/templates/master/00-master/ovirt/files/ovirt-keepalived-script.yaml b/templates/master/00-master/ovirt/files/ovirt-keepalived-script.yaml new file mode 100644 index 0000000000..e0d45b34bf --- /dev/null +++ b/templates/master/00-master/ovirt/files/ovirt-keepalived-script.yaml @@ -0,0 +1,6 @@ +mode: 0755 +path: "/etc/keepalived/chk_ocp_script.sh" +contents: + inline: | + #!/bin/bash + /usr/bin/curl -o /dev/null -kLfs https://localhost:6443/readyz && /usr/bin/curl -o /dev/null -kLfs http://localhost:50936/readyz diff --git a/templates/worker/00-worker/on-prem/files/mdns-config.yaml b/templates/master/00-master/ovirt/files/ovirt-mdns-config.yaml similarity index 100% rename from templates/worker/00-worker/on-prem/files/mdns-config.yaml rename to templates/master/00-master/ovirt/files/ovirt-mdns-config.yaml diff --git a/templates/master/00-master/vsphere/files/vsphere-haproxy-haproxy.yaml b/templates/master/00-master/vsphere/files/vsphere-haproxy-haproxy.yaml new file mode 100644 index 0000000000..9aca92d169 --- /dev/null +++ b/templates/master/00-master/vsphere/files/vsphere-haproxy-haproxy.yaml @@ -0,0 +1,49 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/haproxy/haproxy.cfg.tmpl" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + defaults + maxconn 20000 + mode tcp + log /var/run/haproxy/haproxy-log.sock local0 + option dontlognull + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 86400s + timeout server 86400s + timeout tunnel 86400s + frontend main + bind :::{{`{{ .LBConfig.LbPort }}`}} v4v6 + default_backend masters + listen health_check_http_url + bind :::50936 v4v6 + mode http + monitor-uri /haproxy_ready + option dontlognull + listen stats + bind localhost:{{`{{ .LBConfig.StatPort }}`}} + mode http + stats enable + stats hide-version + stats uri /haproxy_stats + stats refresh 30s + stats auth Username:Password + backend masters + option httpchk GET /readyz HTTP/1.0 + option log-health-checks + balance roundrobin + {{`{{- range .LBConfig.Backends }} + server {{ .Host }} {{ .Address }}:{{ .Port }} weight 1 verify none check check-ssl inter 1s fall 2 rise 3 + {{- end }}`}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/master/00-master/vsphere/files/vsphere-haproxy.yaml b/templates/master/00-master/vsphere/files/vsphere-haproxy.yaml new file mode 100644 index 0000000000..cb4580a276 --- /dev/null +++ b/templates/master/00-master/vsphere/files/vsphere-haproxy.yaml @@ -0,0 +1,156 @@ +mode: 0644 +path: "/etc/kubernetes/manifests/haproxy.yaml" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + kind: Pod + apiVersion: v1 + metadata: + name: haproxy + namespace: openshift-vsphere-infra + creationTimestamp: + deletionGracePeriodSeconds: 65 + labels: + app: vsphere-infra-api-lb + spec: + volumes: + - name: resource-dir + hostPath: + path: "/etc/kubernetes/static-pod-resources/haproxy" + - name: kubeconfigvarlib + hostPath: + path: "/var/lib/kubelet" + - name: run-dir + empty-dir: {} + - name: conf-dir + hostPath: + path: "/etc/haproxy" + - name: chroot-host + hostPath: + path: "/" + containers: + - name: haproxy + image: {{.Images.haproxyImage}} + env: + - name: OLD_HAPROXY_PS_FORCE_DEL_TIMEOUT + value: "120" + command: + - "/bin/bash" + - "-c" + - | + #/bin/bash + verify_old_haproxy_ps_being_deleted() + { + local prev_pids + + prev_pids="$1" + sleep $OLD_HAPROXY_PS_FORCE_DEL_TIMEOUT + cur_pids=$(pidof haproxy) + + for val in $prev_pids; do + if [[ $cur_pids =~ (^|[[:space:]])"$val"($|[[:space:]]) ]] ; then + kill $val + fi + done + } + + reload_haproxy() + { + old_pids=$(pidof haproxy) + if [ -n "$old_pids" ]; then + /usr/sbin/haproxy -W -db -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/run/haproxy.pid -x /var/lib/haproxy/run/haproxy.sock -sf $old_pids & + #There seems to be some cases where HAProxy doesn't drain properly. + #To handle that case, SIGTERM signal being sent to old HAProxy processes which haven't terminated. + verify_old_haproxy_ps_being_deleted "$old_pids" & + else + /usr/sbin/haproxy -W -db -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/run/haproxy.pid & + fi + } + + msg_handler() + { + while read -r line; do + echo "The client send: $line" >&2 + # currently only 'reload' msg is supported + if [ "$line" = reload ]; then + reload_haproxy + fi + done + } + set -ex + declare -r haproxy_sock="/var/run/haproxy/haproxy-master.sock" + declare -r haproxy_log_sock="/var/run/haproxy/haproxy-log.sock" + export -f msg_handler + export -f reload_haproxy + export -f verify_old_haproxy_ps_being_deleted + rm -f "$haproxy_sock" "$haproxy_log_sock" + socat UNIX-RECV:${haproxy_log_sock} STDOUT & + if [ -s "/etc/haproxy/haproxy.cfg" ]; then + /usr/sbin/haproxy -W -db -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/run/haproxy.pid & + fi + socat UNIX-LISTEN:${haproxy_sock},fork system:'bash -c msg_handler' + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/haproxy" + - name: run-dir + mountPath: "/var/run/haproxy" + livenessProbe: + initialDelaySeconds: 50 + httpGet: + path: /haproxy_ready + port: 50936 + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + - name: haproxy-monitor + securityContext: + privileged: true + image: {{ .Images.baremetalRuntimeCfgImage }} + command: + - "/bin/bash" + - "-c" + - | + cp /host/etc/resolv.conf /etc/resolv.conf + monitor /var/lib/kubelet/kubeconfig /config/haproxy.cfg.tmpl /etc/haproxy/haproxy.cfg --api-vip {{ .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP }} + resources: + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: conf-dir + mountPath: "/etc/haproxy" + - name: run-dir + mountPath: "/var/run/haproxy" + - name: resource-dir + mountPath: "/config" + - name: chroot-host + mountPath: "/host" + - name: kubeconfigvarlib + mountPath: "/var/lib/kubelet" + livenessProbe: + initialDelaySeconds: 10 + exec: + command: + - /bin/bash + - -c + - | + cmp /host/etc/resolv.conf /etc/resolv.conf + terminationMessagePolicy: FallbackToLogsOnError + imagePullPolicy: IfNotPresent + hostNetwork: true + tolerations: + - operator: Exists + priorityClassName: system-node-critical + status: {} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/master/00-master/vsphere/files/vsphere-keepalived-keepalived.yaml b/templates/master/00-master/vsphere/files/vsphere-keepalived-keepalived.yaml new file mode 100644 index 0000000000..313eebe867 --- /dev/null +++ b/templates/master/00-master/vsphere/files/vsphere-keepalived-keepalived.yaml @@ -0,0 +1,109 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/keepalived/keepalived.conf.tmpl" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + global_defs { + enable_script_security + script_user root + } + + # These are separate checks to provide the following behavior: + # If the loadbalanced endpoint is responding then all is well regardless + # of what the local api status is. Both checks will return success and + # we'll have the maximum priority. This means as long as there is a node + # with a functional loadbalancer it will get the VIP. + # If all of the loadbalancers go down but the local api is still running, + # the _both check will still succeed and allow any node with a functional + # api to take the VIP. This isn't preferred because it means all api + # traffic will go through one node, but at least it keeps the api available. + vrrp_script chk_ocp_lb { + script "/usr/bin/timeout 1.9 /etc/keepalived/chk_ocp_script.sh" + interval 2 + weight 20 + rise 3 + fall 2 + } + + vrrp_script chk_ocp_both { + script "/usr/bin/timeout 1.9 /etc/keepalived/chk_ocp_script_both.sh" + interval 2 + # Use a smaller weight for this check so it won't trigger the move from + # bootstrap to master by itself. + weight 5 + rise 3 + fall 2 + } + + # TODO: Improve this check. The port is assumed to be alive. + # Need to assess what is the ramification if the port is not there. + vrrp_script chk_ingress { + script "/usr/bin/timeout 0.9 /usr/bin/curl -o /dev/null -Lfs http://localhost:1936/healthz/ready" + interval 1 + weight 50 + } + + {{`{{$nonVirtualIP := .NonVirtualIP}}`}} + + vrrp_instance {{`{{ .Cluster.Name }}`}}_API { + state BACKUP + interface {{`{{ .VRRPInterface }}`}} + virtual_router_id {{`{{ .Cluster.APIVirtualRouterID }}`}} + priority 40 + advert_int 1 + {{`{{if .EnableUnicast}}`}} + unicast_src_ip {{`{{.NonVirtualIP}}`}} + unicast_peer { + {{`{{ .BootstrapIP }}`}} + {{`{{range .LBConfig.Backends}} + {{if ne $nonVirtualIP .Address}}{{.Address}}{{end}} + {{end}}`}} + } + {{`{{end}}`}} + authentication { + auth_type PASS + auth_pass {{`{{ .Cluster.Name }}`}}_api_vip + } + virtual_ipaddress { + {{`{{ .Cluster.APIVIP }}`}}/{{`{{ .Cluster.VIPNetmask }}`}} + } + track_script { + chk_ocp_lb + chk_ocp_both + } + } + + vrrp_instance {{`{{ .Cluster.Name }}`}}_INGRESS { + state BACKUP + interface {{`{{ .VRRPInterface }}`}} + virtual_router_id {{`{{ .Cluster.IngressVirtualRouterID }}`}} + priority 40 + advert_int 1 + {{`{{if .EnableUnicast}}`}} + unicast_src_ip {{`{{.NonVirtualIP}}`}} + unicast_peer { + {{`{{range .IngressConfig.Peers}} + {{if ne $nonVirtualIP .}}{{.}}{{end}} + {{end}}`}} + } + {{`{{end}}`}} + authentication { + auth_type PASS + auth_pass {{`{{ .Cluster.Name }}`}}_ingress_vip + } + virtual_ipaddress { + {{`{{ .Cluster.IngressVIP }}`}}/{{`{{ .Cluster.VIPNetmask }}`}} + } + track_script { + chk_ingress + } + } + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/master/00-master/vsphere/files/vsphere-keepalived-script-both.yaml b/templates/master/00-master/vsphere/files/vsphere-keepalived-script-both.yaml new file mode 100644 index 0000000000..1b4e597711 --- /dev/null +++ b/templates/master/00-master/vsphere/files/vsphere-keepalived-script-both.yaml @@ -0,0 +1,6 @@ +mode: 0755 +path: "/etc/kubernetes/static-pod-resources/keepalived/scripts/chk_ocp_script_both.sh.tmpl" +contents: + inline: | + #!/bin/bash + /usr/bin/curl -o /dev/null -kLfs https://localhost:{{`{{ .LBConfig.LbPort }}`}}/readyz && [ -e /var/run/keepalived/iptables-rule-exists ] || /usr/bin/curl -kLfs https://localhost:{{`{{ .LBConfig.ApiPort }}`}}/readyz diff --git a/templates/master/00-master/vsphere/files/vsphere-keepalived-script.yaml b/templates/master/00-master/vsphere/files/vsphere-keepalived-script.yaml new file mode 100644 index 0000000000..c500a60d2f --- /dev/null +++ b/templates/master/00-master/vsphere/files/vsphere-keepalived-script.yaml @@ -0,0 +1,6 @@ +mode: 0755 +path: "/etc/kubernetes/static-pod-resources/keepalived/scripts/chk_ocp_script.sh.tmpl" +contents: + inline: | + #!/bin/bash + /usr/bin/curl -o /dev/null -kLfs https://localhost:{{`{{ .LBConfig.LbPort }}`}}/readyz && [ -e /var/run/keepalived/iptables-rule-exists ] diff --git a/templates/master/00-master/vsphere/files/vsphere-mdns-config.yaml b/templates/master/00-master/vsphere/files/vsphere-mdns-config.yaml new file mode 100644 index 0000000000..082d8c2453 --- /dev/null +++ b/templates/master/00-master/vsphere/files/vsphere-mdns-config.yaml @@ -0,0 +1,25 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/mdns/config.hcl.tmpl" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + bind_address = "{{`{{ .NonVirtualIP }}`}}" + collision_avoidance = "hostname" + + service { + name = "{{`{{ .Cluster.Name }}`}} Workstation" + host_name = "{{`{{ .ShortHostname }}`}}.local." + type = "_workstation._tcp" + domain = "local." + port = 42424 + ttl = 3200 + } + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/master/01-master-kubelet/on-prem/units/kubelet.service.yaml b/templates/master/01-master-kubelet/baremetal/units/kubelet.service.yaml similarity index 100% rename from templates/master/01-master-kubelet/on-prem/units/kubelet.service.yaml rename to templates/master/01-master-kubelet/baremetal/units/kubelet.service.yaml diff --git a/templates/master/01-master-kubelet/openstack/units/kubelet.service.yaml b/templates/master/01-master-kubelet/openstack/units/kubelet.service.yaml new file mode 100644 index 0000000000..726075348b --- /dev/null +++ b/templates/master/01-master-kubelet/openstack/units/kubelet.service.yaml @@ -0,0 +1,41 @@ +name: kubelet.service +enabled: true +contents: | + [Unit] + Description=Kubernetes Kubelet + Wants=rpc-statd.service network-online.target crio.service + After=network-online.target crio.service + + [Service] + Type=notify + ExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests + ExecStartPre=/bin/rm -f /var/lib/kubelet/cpu_manager_state + Environment="KUBELET_LOG_LEVEL=4" + EnvironmentFile=/etc/os-release + EnvironmentFile=-/etc/kubernetes/kubelet-workaround + EnvironmentFile=-/etc/kubernetes/kubelet-env + + ExecStart=/usr/bin/hyperkube \ + kubelet \ + --config=/etc/kubernetes/kubelet.conf \ + --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --container-runtime=remote \ + --container-runtime-endpoint=/var/run/crio/crio.sock \ + --runtime-cgroups=/system.slice/crio.service \ + --node-labels=node-role.kubernetes.io/master,node.openshift.io/os_id=${ID} \ + --node-ip=${KUBELET_NODE_IP} \ + --address=${KUBELET_NODE_IP} \ + --minimum-container-ttl-duration=6m0s \ + --cloud-provider={{cloudProvider .}} \ + --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \ + {{cloudConfigFlag . }} \ + --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \ + --pod-infra-container-image={{.Images.infraImageKey}} \ + --v=${KUBELET_LOG_LEVEL} + + Restart=always + RestartSec=10 + + [Install] + WantedBy=multi-user.target diff --git a/templates/master/01-master-kubelet/vsphere/units/kubelet.service.yaml b/templates/master/01-master-kubelet/vsphere/units/kubelet.service.yaml new file mode 100644 index 0000000000..fae4c529ae --- /dev/null +++ b/templates/master/01-master-kubelet/vsphere/units/kubelet.service.yaml @@ -0,0 +1,51 @@ +name: kubelet.service +enabled: true +contents: | + [Unit] + Description=Kubernetes Kubelet + Wants=rpc-statd.service network-online.target crio.service + After=network-online.target crio.service + + [Service] + Type=notify + ExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests + ExecStartPre=/bin/rm -f /var/lib/kubelet/cpu_manager_state + Environment="KUBELET_LOG_LEVEL=4" + EnvironmentFile=/etc/os-release + EnvironmentFile=-/etc/kubernetes/kubelet-workaround + EnvironmentFile=-/etc/kubernetes/kubelet-env + + ExecStart=/usr/bin/hyperkube \ + kubelet \ + --config=/etc/kubernetes/kubelet.conf \ + --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --container-runtime=remote \ + --container-runtime-endpoint=/var/run/crio/crio.sock \ + --runtime-cgroups=/system.slice/crio.service \ + --node-labels=node-role.kubernetes.io/master,node.openshift.io/os_id=${ID} \ + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + --node-ip=${KUBELET_NODE_IP} \ + --address=${KUBELET_NODE_IP} \ + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + --minimum-container-ttl-duration=6m0s \ + --cloud-provider={{cloudProvider .}} \ + --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \ + {{cloudConfigFlag . }} \ + --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \ + --pod-infra-container-image={{.Images.infraImageKey}} \ + --v=${KUBELET_LOG_LEVEL} + + Restart=always + RestartSec=10 + + [Install] + WantedBy=multi-user.target diff --git a/templates/worker/00-worker/on-prem/files/keepalived-keepalived.yaml b/templates/worker/00-worker/baremetal/files/baremetal-keepalived-keepalived.yaml similarity index 100% rename from templates/worker/00-worker/on-prem/files/keepalived-keepalived.yaml rename to templates/worker/00-worker/baremetal/files/baremetal-keepalived-keepalived.yaml diff --git a/templates/worker/00-worker/baremetal/files/baremetal-mdns-config.yaml b/templates/worker/00-worker/baremetal/files/baremetal-mdns-config.yaml new file mode 100644 index 0000000000..a050972ac6 --- /dev/null +++ b/templates/worker/00-worker/baremetal/files/baremetal-mdns-config.yaml @@ -0,0 +1,15 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/mdns/config.hcl.tmpl" +contents: + inline: | + bind_address = "{{`{{ .NonVirtualIP }}`}}" + collision_avoidance = "hostname" + + service { + name = "{{`{{ .Cluster.Name }}`}} Workstation" + host_name = "{{`{{ .ShortHostname }}`}}.local." + type = "_workstation._tcp" + domain = "local." + port = 42424 + ttl = 3200 + } diff --git a/templates/worker/00-worker/openstack/files/openstack-keepalived-keepalived.yaml b/templates/worker/00-worker/openstack/files/openstack-keepalived-keepalived.yaml new file mode 100644 index 0000000000..d5ec06da0a --- /dev/null +++ b/templates/worker/00-worker/openstack/files/openstack-keepalived-keepalived.yaml @@ -0,0 +1,28 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/keepalived/keepalived.conf.tmpl" +contents: + inline: | + # TODO: Improve this check. The port is assumed to be alive. + # Need to assess what is the ramification if the port is not there. + vrrp_script chk_ingress { + script "/usr/bin/timeout 0.9 /usr/bin/curl -o /dev/null -Lfs http://localhost:1936/healthz/ready" + interval 1 + weight 50 + } + vrrp_instance {{`{{ .Cluster.Name }}`}}_INGRESS { + state BACKUP + interface {{`{{ .VRRPInterface }}`}} + virtual_router_id {{`{{ .Cluster.IngressVirtualRouterID }}`}} + priority 40 + advert_int 1 + authentication { + auth_type PASS + auth_pass {{`{{ .Cluster.Name }}`}}_ingress_vip + } + virtual_ipaddress { + {{`{{ .Cluster.IngressVIP }}`}}/{{`{{ .Cluster.VIPNetmask }}`}} + } + track_script { + chk_ingress + } + } diff --git a/templates/worker/00-worker/openstack/files/openstack-mdns-config.yaml b/templates/worker/00-worker/openstack/files/openstack-mdns-config.yaml new file mode 100644 index 0000000000..ad82c2e45d --- /dev/null +++ b/templates/worker/00-worker/openstack/files/openstack-mdns-config.yaml @@ -0,0 +1,14 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/mdns/config.hcl.tmpl" +contents: + inline: | + bind_address = "{{`{{ .NonVirtualIP }}`}}" + collision_avoidance = "hostname" + service { + name = "{{`{{ .Cluster.Name }}`}} Workstation" + host_name = "{{`{{ .ShortHostname }}`}}.local." + type = "_workstation._tcp" + domain = "local." + port = 42424 + ttl = 3200 + } diff --git a/templates/worker/00-worker/ovirt/files/ovirt-keepalived-keepalived.yaml b/templates/worker/00-worker/ovirt/files/ovirt-keepalived-keepalived.yaml new file mode 100644 index 0000000000..37f64d04f7 --- /dev/null +++ b/templates/worker/00-worker/ovirt/files/ovirt-keepalived-keepalived.yaml @@ -0,0 +1,29 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/keepalived/keepalived.conf.tmpl" +contents: + inline: | + # TODO: Improve this check. The port is assumed to be alive. + # Need to assess what is the ramification if the port is not there. + vrrp_script chk_ingress { + script "/usr/bin/timeout 0.9 /usr/bin/curl -o /dev/null -Lfs http://localhost:1936/healthz/ready" + interval 1 + weight 50 + } + + vrrp_instance {{`{{ .Cluster.Name }}`}}_INGRESS { + state BACKUP + interface {{`{{ .VRRPInterface }}`}} + virtual_router_id {{`{{ .Cluster.IngressVirtualRouterID }}`}} + priority 40 + advert_int 1 + authentication { + auth_type PASS + auth_pass {{`{{ .Cluster.Name }}`}}_ingress_vip + } + virtual_ipaddress { + {{`{{ .Cluster.IngressVIP }}`}}/{{`{{ .Cluster.VIPNetmask }}`}} + } + track_script { + chk_ingress + } + } diff --git a/templates/worker/00-worker/ovirt/files/ovirt-mdns-config.yaml b/templates/worker/00-worker/ovirt/files/ovirt-mdns-config.yaml new file mode 100644 index 0000000000..a050972ac6 --- /dev/null +++ b/templates/worker/00-worker/ovirt/files/ovirt-mdns-config.yaml @@ -0,0 +1,15 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/mdns/config.hcl.tmpl" +contents: + inline: | + bind_address = "{{`{{ .NonVirtualIP }}`}}" + collision_avoidance = "hostname" + + service { + name = "{{`{{ .Cluster.Name }}`}} Workstation" + host_name = "{{`{{ .ShortHostname }}`}}.local." + type = "_workstation._tcp" + domain = "local." + port = 42424 + ttl = 3200 + } diff --git a/templates/worker/00-worker/vsphere/files/vsphere-keepalived-keepalived.yaml b/templates/worker/00-worker/vsphere/files/vsphere-keepalived-keepalived.yaml new file mode 100644 index 0000000000..1a430edfcb --- /dev/null +++ b/templates/worker/00-worker/vsphere/files/vsphere-keepalived-keepalived.yaml @@ -0,0 +1,39 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/keepalived/keepalived.conf.tmpl" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + # TODO: Improve this check. The port is assumed to be alive. + # Need to assess what is the ramification if the port is not there. + vrrp_script chk_ingress { + script "/usr/bin/timeout 0.9 /usr/bin/curl -o /dev/null -Lfs http://localhost:1936/healthz/ready" + interval 1 + weight 50 + } + + vrrp_instance {{`{{ .Cluster.Name }}`}}_INGRESS { + state BACKUP + interface {{`{{ .VRRPInterface }}`}} + virtual_router_id {{`{{ .Cluster.IngressVirtualRouterID }}`}} + priority 40 + advert_int 1 + authentication { + auth_type PASS + auth_pass {{`{{ .Cluster.Name }}`}}_ingress_vip + } + virtual_ipaddress { + {{`{{ .Cluster.IngressVIP }}`}}/{{`{{ .Cluster.VIPNetmask }}`}} + } + track_script { + chk_ingress + } + } + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/worker/00-worker/vsphere/files/vsphere-mdns-config.yaml b/templates/worker/00-worker/vsphere/files/vsphere-mdns-config.yaml new file mode 100644 index 0000000000..082d8c2453 --- /dev/null +++ b/templates/worker/00-worker/vsphere/files/vsphere-mdns-config.yaml @@ -0,0 +1,25 @@ +mode: 0644 +path: "/etc/kubernetes/static-pod-resources/mdns/config.hcl.tmpl" +contents: + inline: | + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + bind_address = "{{`{{ .NonVirtualIP }}`}}" + collision_avoidance = "hostname" + + service { + name = "{{`{{ .Cluster.Name }}`}} Workstation" + host_name = "{{`{{ .ShortHostname }}`}}.local." + type = "_workstation._tcp" + domain = "local." + port = 42424 + ttl = 3200 + } + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} diff --git a/templates/worker/01-worker-kubelet/on-prem/units/kubelet.service.yaml b/templates/worker/01-worker-kubelet/baremetal/units/kubelet.service.yaml similarity index 100% rename from templates/worker/01-worker-kubelet/on-prem/units/kubelet.service.yaml rename to templates/worker/01-worker-kubelet/baremetal/units/kubelet.service.yaml diff --git a/templates/worker/01-worker-kubelet/openstack/units/kubelet.service.yaml b/templates/worker/01-worker-kubelet/openstack/units/kubelet.service.yaml new file mode 100644 index 0000000000..00375bc825 --- /dev/null +++ b/templates/worker/01-worker-kubelet/openstack/units/kubelet.service.yaml @@ -0,0 +1,40 @@ +name: kubelet.service +enabled: true +contents: | + [Unit] + Description=Kubernetes Kubelet + Wants=rpc-statd.service network-online.target crio.service + After=network-online.target crio.service + + [Service] + Type=notify + ExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests + ExecStartPre=/bin/rm -f /var/lib/kubelet/cpu_manager_state + Environment="KUBELET_LOG_LEVEL=4" + EnvironmentFile=/etc/os-release + EnvironmentFile=-/etc/kubernetes/kubelet-workaround + EnvironmentFile=-/etc/kubernetes/kubelet-env + + ExecStart=/usr/bin/hyperkube \ + kubelet \ + --config=/etc/kubernetes/kubelet.conf \ + --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --container-runtime=remote \ + --container-runtime-endpoint=/var/run/crio/crio.sock \ + --runtime-cgroups=/system.slice/crio.service \ + --node-labels=node-role.kubernetes.io/worker,node.openshift.io/os_id=${ID} \ + --node-ip=${KUBELET_NODE_IP} \ + --address=${KUBELET_NODE_IP} \ + --minimum-container-ttl-duration=6m0s \ + --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \ + --cloud-provider={{cloudProvider .}} \ + {{cloudConfigFlag . }} \ + --pod-infra-container-image={{.Images.infraImageKey}} \ + --v=${KUBELET_LOG_LEVEL} + + Restart=always + RestartSec=10 + + [Install] + WantedBy=multi-user.target diff --git a/templates/worker/01-worker-kubelet/vsphere/units/kubelet.service.yaml b/templates/worker/01-worker-kubelet/vsphere/units/kubelet.service.yaml new file mode 100644 index 0000000000..281a0183c7 --- /dev/null +++ b/templates/worker/01-worker-kubelet/vsphere/units/kubelet.service.yaml @@ -0,0 +1,50 @@ +name: kubelet.service +enabled: true +contents: | + [Unit] + Description=Kubernetes Kubelet + Wants=rpc-statd.service network-online.target crio.service + After=network-online.target crio.service + + [Service] + Type=notify + ExecStartPre=/bin/mkdir --parents /etc/kubernetes/manifests + ExecStartPre=/bin/rm -f /var/lib/kubelet/cpu_manager_state + Environment="KUBELET_LOG_LEVEL=4" + EnvironmentFile=/etc/os-release + EnvironmentFile=-/etc/kubernetes/kubelet-workaround + EnvironmentFile=-/etc/kubernetes/kubelet-env + + ExecStart=/usr/bin/hyperkube \ + kubelet \ + --config=/etc/kubernetes/kubelet.conf \ + --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --container-runtime=remote \ + --container-runtime-endpoint=/var/run/crio/crio.sock \ + --runtime-cgroups=/system.slice/crio.service \ + --node-labels=node-role.kubernetes.io/worker,node.openshift.io/os_id=${ID} \ + {{ if .Infra -}} + {{ if .Infra.Status -}} + {{ if .Infra.Status.PlatformStatus -}} + {{ if .Infra.Status.PlatformStatus.VSphere -}} + {{ if .Infra.Status.PlatformStatus.VSphere.APIServerInternalIP -}} + --node-ip=${KUBELET_NODE_IP} \ + --address=${KUBELET_NODE_IP} \ + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + {{ end -}} + --minimum-container-ttl-duration=6m0s \ + --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec \ + --cloud-provider={{cloudProvider .}} \ + {{cloudConfigFlag . }} \ + --pod-infra-container-image={{.Images.infraImageKey}} \ + --v=${KUBELET_LOG_LEVEL} + + Restart=always + RestartSec=10 + + [Install] + WantedBy=multi-user.target