diff --git a/cmd/gcp-routes-controller/README.md b/cmd/gcp-routes-controller/README.md new file mode 100644 index 0000000000..86db108248 --- /dev/null +++ b/cmd/gcp-routes-controller/README.md @@ -0,0 +1,23 @@ +# gcp-routes-controller + +## Background + +Google cloud load balancer is a L3LB that is special. It doesn't do DNAT; instead, it +just redirects traffic to backends and preserves the VIP as the destination IP. + +So, an agent exists on the node. It programs the node (either via iptables or routing tables) to +accept traffic destined for the VIP. However, this has a problem: all hairpin traffic +to the balanced servce is *always* handled by that backend, even if it is down +or otherwise out of rotation. + +We want to withdraw the internal API service from google-routes redirection when +it's down, or else the node (i.e. kubelet) loses access to the apiserver VIP +and becomes unmanagable. + +## Functionality + +The gcp-routes-controller is installed on all the masters and monitors the +apiserver process /readyz. + +When /readyz fails, stops the VIP routing by writing `/run/gcp-routes/VIP.down`, +which tells openshift-gcp-routes to skip that vip diff --git a/cmd/gcp-routes-controller/run.go b/cmd/gcp-routes-controller/run.go index be2255d01b..cda8ea9c6b 100644 --- a/cmd/gcp-routes-controller/run.go +++ b/cmd/gcp-routes-controller/run.go @@ -4,11 +4,13 @@ import ( "crypto/tls" "flag" "fmt" + "net" "net/http" "net/url" "os" "os/exec" "os/signal" + "path" "sync" "syscall" "time" @@ -32,16 +34,33 @@ var ( runOpts struct { gcpRoutesService string rootMount string - - healthCheckURL string + healthCheckURL string + vip string } ) +// downFileDir is the directory in which gcp-routes will look for a flag-file that +// indicates the route to the VIP should be withdrawn. +const downFileDir = "/run/gcp-routes" + func init() { rootCmd.AddCommand(runCmd) - runCmd.PersistentFlags().StringVar(&runOpts.gcpRoutesService, "gcp-routes-service", "gcp-routes.service", "The name for the service controlling gcp routes on host") - runCmd.PersistentFlags().StringVar(&runOpts.rootMount, "root-mount", "/rootfs", "where the nodes root filesystem is mounted for chroot and file manipulation.") + runCmd.PersistentFlags().StringVar(&runOpts.gcpRoutesService, "gcp-routes-service", "openshift-gcp-routes.service", "The name for the service controlling gcp routes on host") + runCmd.PersistentFlags().StringVar(&runOpts.rootMount, "root-mount", "/rootfs", "where the nodes root filesystem is mounted for writing down files or chrooting.") runCmd.PersistentFlags().StringVar(&runOpts.healthCheckURL, "health-check-url", "", "HTTP(s) URL for the health check") + runCmd.PersistentFlags().StringVar(&runOpts.vip, "vip", "", "The VIP to remove if the health check fails. Determined from URL if not provided") +} + +type downMode int + +const ( + modeStopService = iota + modeDownFile +) + +type handler struct { + mode downMode + vip string } func runRunCmd(cmd *cobra.Command, args []string) error { @@ -51,18 +70,6 @@ func runRunCmd(cmd *cobra.Command, args []string) error { // To help debugging, immediately log version glog.Infof("Version: %+v (%s)", version.Raw, version.Hash) - if runOpts.rootMount != "" { - glog.Infof(`Calling chroot("%s")`, runOpts.rootMount) - if err := syscall.Chroot(runOpts.rootMount); err != nil { - return fmt.Errorf("Unable to chroot to %s: %s", runOpts.rootMount, err) - } - - glog.V(2).Infof("Moving to / inside the chroot") - if err := os.Chdir("/"); err != nil { - return fmt.Errorf("Unable to change directory to /: %s", err) - } - } - uri, err := url.Parse(runOpts.healthCheckURL) if err != nil { return fmt.Errorf("failed to parse health-check-url: %v", err) @@ -71,6 +78,14 @@ func runRunCmd(cmd *cobra.Command, args []string) error { return fmt.Errorf("invalid URI %q (no scheme)", uri) } + handler, err := newHandler(uri) + if err != nil { + return err + } + + // The health check should always connect to localhost, not be load-balanced + uri.Host = net.JoinHostPort("localhost", uri.Port()) + httpCheck, err := checkers.NewHTTP(&checkers.HTTPConfig{ URL: uri, Client: &http.Client{Transport: &http.Transport{ @@ -82,22 +97,27 @@ func runRunCmd(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("failed to create httpCheck: %v", err) } - errCh := make(chan error) + + // careful: the timing here needs to correspond to the load balancer's + // parameters. We need to remove routes just after we've been removed + // as a backend in the load-balancer, and add routes before we've been + // re-added. + // see openshift/installer/data/data/gcp/network/lb-private.tf tracker := &healthTracker{ state: unknownTrackerState, ErrCh: errCh, - SuccessThreshold: 2, - FailureThreshold: 10, - OnFailure: func() error { return exec.Command("systemctl", "stop", runOpts.gcpRoutesService).Run() }, - OnSuccess: func() error { return exec.Command("systemctl", "start", runOpts.gcpRoutesService).Run() }, + SuccessThreshold: 1, + FailureThreshold: 8, // LB = 6 seconds, plus 10 seconds for propagation + OnFailure: handler.onFailure, + OnSuccess: handler.onSuccess, } h := health.New() h.AddChecks([]*health.Config{{ Name: "dependency-check", Checker: httpCheck, - Interval: time.Duration(5) * time.Second, + Interval: time.Duration(2) * time.Second, Fatal: true, OnComplete: tracker.OnComplete, }}) @@ -111,11 +131,10 @@ func runRunCmd(cmd *cobra.Command, args []string) error { go func() { for sig := range c { glog.Infof("Signal %s received: shutting down gcp routes service", sig) - if err := exec.Command("systemctl", "stop", runOpts.gcpRoutesService).Run(); err != nil { - glog.Infof("Failed to terminate gcp routes service on signal: %s", err) - } else { - break + if err := handler.onFailure(); err != nil { + glog.Infof("Failed to mark service down on signal: %s", err) } + os.Exit(0) } }() @@ -129,6 +148,90 @@ func runRunCmd(cmd *cobra.Command, args []string) error { } } +func newHandler(uri *url.URL) (*handler, error) { + h := handler{} + + // determine mode: if /run/gcp-routes exists, we can us the downfile mode + realPath := path.Join(runOpts.rootMount, downFileDir) + fi, err := os.Stat(realPath) + if err == nil && fi.IsDir() { + glog.Infof("%s exists, starting in downfile mode", realPath) + h.mode = modeDownFile + } else { + glog.Infof("%s not accessible, will stop gcp-routes.service on health failure", realPath) + h.mode = modeStopService + } + + // if StopService mode and rootfs specified, chroot + if h.mode == modeStopService && runOpts.rootMount != "" { + glog.Infof(`Calling chroot("%s")`, runOpts.rootMount) + if err := syscall.Chroot(runOpts.rootMount); err != nil { + return nil, fmt.Errorf("unable to chroot to %s: %s", runOpts.rootMount, err) + } + + glog.V(2).Infof("Moving to / inside the chroot") + if err := os.Chdir("/"); err != nil { + return nil, fmt.Errorf("unable to change directory to /: %s", err) + } + } + + // otherwise, resolve vip + if h.mode == modeDownFile { + if runOpts.vip != "" { + h.vip = runOpts.vip + } else { + addrs, err := net.LookupHost(uri.Hostname()) + if err != nil { + return nil, fmt.Errorf("failed to lookup host %s: %v", uri.Hostname(), err) + } + if len(addrs) != 1 { + return nil, fmt.Errorf("hostname %s has %d addresses, expected 1 - aborting", uri.Hostname(), len(addrs)) + } + h.vip = addrs[0] + glog.Infof("Using VIP %s", h.vip) + } + } + + return &h, nil +} + +// onFailure: either stop the routes service, or write downfile +func (h *handler) onFailure() error { + if h.mode == modeDownFile { + downFile := path.Join(runOpts.rootMount, downFileDir, fmt.Sprintf("%s.down", h.vip)) + fp, err := os.OpenFile(downFile, os.O_CREATE, 0644) + if err != nil { + return fmt.Errorf("failed to create downfile (%s): %v", downFile, err) + } + _ = fp.Close() + glog.Infof("healthcheck failed, created downfile %s", downFile) + } else { + if err := exec.Command("systemctl", "stop", runOpts.gcpRoutesService).Run(); err != nil { + return fmt.Errorf("Failed to terminate gcp routes service %v", err) + } + glog.Infof("healthcheck failed, stopped %s", runOpts.gcpRoutesService) + } + return nil +} + +// onSuccess: either start routes service, or remove down file +func (h *handler) onSuccess() error { + if h.mode == modeDownFile { + downFile := path.Join(runOpts.rootMount, downFileDir, fmt.Sprintf("%s.down", h.vip)) + err := os.Remove(downFile) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove downfile (%s): %v", downFile, err) + } + glog.Infof("healthcheck succeeded, removed downfile %s", downFile) + } else { + if err := exec.Command("systemctl", "start", runOpts.gcpRoutesService).Run(); err != nil { + return fmt.Errorf("Failed to terminate gcp routes service %v", err) + } + glog.Infof("healthcheck succeeded, started %s", runOpts.gcpRoutesService) + } + return nil +} + type trackerState int const ( diff --git a/pkg/controller/template/test_data/controller_config_aws.yaml b/pkg/controller/template/test_data/controller_config_aws.yaml index 6ccc2e141b..bf6583e45e 100644 --- a/pkg/controller/template/test_data/controller_config_aws.yaml +++ b/pkg/controller/template/test_data/controller_config_aws.yaml @@ -15,3 +15,11 @@ spec: setupEtcdEnv: image/setupEtcdEnv:1 infraImage: image/infraImage:1 kubeClientAgentImage: image/kubeClientAgentImage:1 + infra: + apiVersion: config.openshift.io/v1 + kind: Infrastructure + status: + apiServerInternalURI: https://api-int.my-test-cluster.installer.team.coreos.systems:6443 + apiServerURL: https://api.my-test-cluster.installer.team.coreos.systems:6443 + etcdDiscoveryDomain: my-test-cluster.installer.team.coreos.systems + infrastructureName: my-test-cluster diff --git a/pkg/controller/template/test_data/controller_config_baremetal.yaml b/pkg/controller/template/test_data/controller_config_baremetal.yaml index 6853694942..17102dd35d 100644 --- a/pkg/controller/template/test_data/controller_config_baremetal.yaml +++ b/pkg/controller/template/test_data/controller_config_baremetal.yaml @@ -16,7 +16,17 @@ spec: infraImage: image/infraImage:1 kubeClientAgentImage: image/kubeClientAgentImage:1 infra: + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + cloudConfig: + key: config + name: cloud-provider-config status: + apiServerInternalURI: https://api-int.my-test-cluster.installer.team.coreos.systems:6443 + apiServerURL: https://api.my-test-cluster.installer.team.coreos.systems:6443 + etcdDiscoveryDomain: my-test-cluster.installer.team.coreos.systems + infrastructureName: my-test-cluster platformStatus: baremetal: apiServerInternalIP: 10.0.0.1 diff --git a/pkg/controller/template/test_data/controller_config_gcp.yaml b/pkg/controller/template/test_data/controller_config_gcp.yaml index 4341bebca0..5c0d984e6f 100644 --- a/pkg/controller/template/test_data/controller_config_gcp.yaml +++ b/pkg/controller/template/test_data/controller_config_gcp.yaml @@ -14,4 +14,12 @@ spec: etcd: image/etcd:1 setupEtcdEnv: image/setupEtcdEnv:1 infraImage: image/infraImage:1 - kubeClientAgentImage: image/kubeClientAgentImage:1 \ No newline at end of file + kubeClientAgentImage: image/kubeClientAgentImage:1 + infra: + apiVersion: config.openshift.io/v1 + kind: Infrastructure + status: + apiServerInternalURI: https://api-int.my-test-cluster.installer.team.coreos.systems:6443 + apiServerURL: https://api.my-test-cluster.installer.team.coreos.systems:6443 + etcdDiscoveryDomain: my-test-cluster.installer.team.coreos.systems + infrastructureName: my-test-cluster diff --git a/pkg/controller/template/test_data/controller_config_libvirt.yaml b/pkg/controller/template/test_data/controller_config_libvirt.yaml index 20e29a9a3c..adb4f36a19 100644 --- a/pkg/controller/template/test_data/controller_config_libvirt.yaml +++ b/pkg/controller/template/test_data/controller_config_libvirt.yaml @@ -15,3 +15,11 @@ spec: setupEtcdEnv: image/setupEtcdEnv:1 infraImage: image/infraImage:1 kubeClientAgentImage: image/kubeClientAgentImage:1 + infra: + apiVersion: config.openshift.io/v1 + kind: Infrastructure + status: + apiServerInternalURI: https://api-int.my-test-cluster.installer.team.coreos.systems:6443 + apiServerURL: https://api.my-test-cluster.installer.team.coreos.systems:6443 + etcdDiscoveryDomain: my-test-cluster.installer.team.coreos.systems + infrastructureName: my-test-cluster diff --git a/pkg/controller/template/test_data/controller_config_none.yaml b/pkg/controller/template/test_data/controller_config_none.yaml index e5b9812b74..8efb03f85a 100644 --- a/pkg/controller/template/test_data/controller_config_none.yaml +++ b/pkg/controller/template/test_data/controller_config_none.yaml @@ -15,3 +15,11 @@ spec: setupEtcdEnv: image/setupEtcdEnv:1 infraImage: image/infraImage:1 kubeClientAgentImage: image/kubeClientAgentImage:1 + infra: + apiVersion: config.openshift.io/v1 + kind: Infrastructure + status: + apiServerInternalURI: https://api-int.my-test-cluster.installer.team.coreos.systems:6443 + apiServerURL: https://api.my-test-cluster.installer.team.coreos.systems:6443 + etcdDiscoveryDomain: my-test-cluster.installer.team.coreos.systems + infrastructureName: my-test-cluster diff --git a/pkg/controller/template/test_data/controller_config_openstack.yaml b/pkg/controller/template/test_data/controller_config_openstack.yaml index 22e7d9cd28..ea6940ecd9 100644 --- a/pkg/controller/template/test_data/controller_config_openstack.yaml +++ b/pkg/controller/template/test_data/controller_config_openstack.yaml @@ -20,7 +20,13 @@ spec: infraImage: image/infraImage:1 kubeClientAgentImage: image/kubeClientAgentImage:1 infra: + apiVersion: config.openshift.io/v1 + kind: Infrastructure status: + apiServerInternalURI: https://api-int.my-test-cluster.installer.team.coreos.systems:6443 + apiServerURL: https://api.my-test-cluster.installer.team.coreos.systems:6443 + etcdDiscoveryDomain: my-test-cluster.installer.team.coreos.systems + infrastructureName: my-test-cluster platformStatus: openstack: apiServerInternalIP: 10.0.0.1 diff --git a/pkg/controller/template/test_data/controller_config_ovirt.yaml b/pkg/controller/template/test_data/controller_config_ovirt.yaml index 5e634fa952..5ad3e1133c 100644 --- a/pkg/controller/template/test_data/controller_config_ovirt.yaml +++ b/pkg/controller/template/test_data/controller_config_ovirt.yaml @@ -19,3 +19,11 @@ spec: setupEtcdEnv: image/setupEtcdEnv:1 infraImage: image/infraImage:1 kubeClientAgentImage: image/kubeClientAgentImage:1 + infra: + apiVersion: config.openshift.io/v1 + kind: Infrastructure + status: + apiServerInternalURI: https://api-int.my-test-cluster.installer.team.coreos.systems:6443 + apiServerURL: https://api.my-test-cluster.installer.team.coreos.systems:6443 + etcdDiscoveryDomain: my-test-cluster.installer.team.coreos.systems + infrastructureName: my-test-cluster diff --git a/pkg/controller/template/test_data/controller_config_vsphere.yaml b/pkg/controller/template/test_data/controller_config_vsphere.yaml index 90f2b8b30a..9b2f8ba903 100644 --- a/pkg/controller/template/test_data/controller_config_vsphere.yaml +++ b/pkg/controller/template/test_data/controller_config_vsphere.yaml @@ -20,7 +20,13 @@ spec: infraImage: image/infraImage:1 kubeClientAgentImage: image/kubeClientAgentImage:1 infra: + apiVersion: config.openshift.io/v1 + kind: Infrastructure status: + apiServerInternalURI: https://api-int.my-test-cluster.installer.team.coreos.systems:6443 + apiServerURL: https://api.my-test-cluster.installer.team.coreos.systems:6443 + etcdDiscoveryDomain: my-test-cluster.installer.team.coreos.systems + infrastructureName: my-test-cluster platformStatus: vsphere: apiServerInternalIP: 10.0.0.1 diff --git a/templates/master/00-master/gcp/files/etc-kubernetes-manifests-gcp-routes-controller.yaml b/templates/master/00-master/gcp/files/etc-kubernetes-manifests-gcp-routes-controller.yaml index 602bc22130..cc7fb63087 100644 --- a/templates/master/00-master/gcp/files/etc-kubernetes-manifests-gcp-routes-controller.yaml +++ b/templates/master/00-master/gcp/files/etc-kubernetes-manifests-gcp-routes-controller.yaml @@ -15,7 +15,7 @@ contents: command: ["gcp-routes-controller"] args: - "run" - - "--health-check-url=https://127.0.0.1:6443/readyz" + - "--health-check-url={{.Infra.Status.APIServerInternalURL}}/readyz" resources: requests: cpu: 20m diff --git a/templates/master/00-master/gcp/files/opt-libexec-openshift-gcp-routes-sh.yaml b/templates/master/00-master/gcp/files/opt-libexec-openshift-gcp-routes-sh.yaml new file mode 100644 index 0000000000..87c1d4a1a6 --- /dev/null +++ b/templates/master/00-master/gcp/files/opt-libexec-openshift-gcp-routes-sh.yaml @@ -0,0 +1,165 @@ +filesystem: "root" +mode: 0755 +path: "/opt/libexec/openshift-gcp-routes.sh" +contents: + inline: | + #!/bin/bash + + # Update iptables rules based on google cloud load balancer VIPS + # + # This is needed because the GCP L3 load balancer doesn't actually do DNAT; + # the destination IP address is still the VIP. Normally, there is an agent that + # adds the vip to the local routing table, tricking the kernel in to thinking + # it's a local IP and allowing processes doing an accept(0.0.0.0) to receive + # the packets. Clever. + # + # We don't do that. Instead, we DNAT with conntrack. This is so we don't break + # existing connections when the vip is removed. This is useful for draining + # connections - take ourselves out of the vip, but service existing conns. + # + # Additionally, clients can write a file to /run/gcp-routes/$IP.down to force + # a VIP as down. This is useful for graceful shutdown / upgrade. + # + # ~cdc~ + + set -e + + # the list of load balancer IPs that are assigned to this node + declare -A vips + + curler() { + curl --silent -L -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/${1}" + } + + CHAIN_NAME="gcp-vips" + RUN_DIR="/run/gcp-routes" + + # Create a chan if it doesn't exist + ensure_chain() { + local table="${1}" + local chain="${2}" + + if ! iptables -w -t "${table}" -S "${chain}" &> /dev/null ; then + iptables -w -t "${table}" -N "${chain}"; + fi; + } + + ensure_rule() { + local table="${1}" + local chain="${2}" + shift 2 + + if ! iptables -w -t "${table}" -C "${chain}" "$@" &> /dev/null; then + iptables -w -t "${table}" -A "${chain}" "$@" + fi + } + + # set the chain, ensure entry rules, ensure ESTABLISHED rule + initialize() { + ensure_chain nat "${CHAIN_NAME}" + ensure_chain nat "${CHAIN_NAME}-local" + ensure_rule nat PREROUTING -m comment --comment 'gcp LB vip DNAT' -j ${CHAIN_NAME} + ensure_rule nat OUTPUT -m comment --comment 'gcp LB vip DNAT for local clients' -j ${CHAIN_NAME}-local + + # Need this so that existing flows (with an entry in conntrack) continue to be + # balanced, even if the DNAT entry is removed + ensure_rule filter INPUT -m comment --comment 'gcp LB vip existing' -m addrtype ! --dst-type LOCAL -m state --state ESTABLISHED,RELATED -j ACCEPT + + mkdir -p "${RUN_DIR}" + } + + remove_stale() { + ## find extra iptables rules + for ipt_vip in $(iptables -w -t nat -S "${CHAIN_NAME}" | awk '$4{print $4}' | awk -F/ '{print $1}'); do + if [[ -z "${vips[${ipt_vip}]}" ]]; then + echo removing stale vip "${ipt_vip}" for external clients + iptables -w -t nat -D "${CHAIN_NAME}" --dst "${ipt_vip}" -j REDIRECT + fi + done + for ipt_vip in $(iptables -w -t nat -S "${CHAIN_NAME}-local" | awk '$4{print $4}' | awk -F/ '{print $1}'); do + if [[ -z "${vips[${ipt_vip}]}" ]] || [[ "${vips[${ipt_vip}]}" = down ]]; then + echo removing stale vip "${ipt_vip}" for local clients + iptables -w -t nat -D "${CHAIN_NAME}-local" --dst "${ipt_vip}" -j REDIRECT + fi + done + } + + add_rules() { + for vip in "${!vips[@]}"; do + echo "ensuring rule for ${vip} for external clients" + ensure_rule nat "${CHAIN_NAME}" --dst "${vip}" -j REDIRECT + + if [[ "${vips[${vip}]}" != down ]]; then + echo "ensuring rule for ${vip} for internal clients" + ensure_rule nat "${CHAIN_NAME}-local" --dst "${vip}" -j REDIRECT + fi + done + } + + clear_rules() { + iptables -t nat -F "${CHAIN_NAME}" || true + iptables -t nat -F "${CHAIN_NAME}-local" || true + } + + # out paramater: vips + list_lb_ips() { + for k in "${!vips[@]}"; do + unset vips["${k}"] + done + + local net_path="network-interfaces/" + for vif in $(curler ${net_path}); do + local hw_addr; hw_addr=$(curler "${net_path}${vif}mac") + local fwip_path; fwip_path="${net_path}${vif}forwarded-ips/" + for level in $(curler "${fwip_path}"); do + for fwip in $(curler "${fwip_path}${level}"); do + if [[ -e "${RUN_DIR}/${fwip}.down" ]]; then + echo "${fwip} is manually marked as down, skipping for internal clients..." + vips[${fwip}]="down" + else + echo "Processing route for NIC ${vif}${hw_addr} for ${fwip}" + vips[${fwip}]="${fwip}" + fi + done + done + done + } + + sleep_or_watch() { + if hash inotifywait &> /dev/null; then + inotifywait -t 30 -r "${RUN_DIR}" &> /dev/null || true + else + # no inotify, need to manually poll + for i in {0..5}; do + for vip in "${!vips[@]}"; do + if [[ "${vips[${vip}]}" != down ]] && [[ -e "${RUN_DIR}/${vip}.down" ]]; then + echo "new downfile detected" + break 2 + elif [[ "${vips[${vip}]}" = down ]] && ! [[ -e "${RUN_DIR}/${vip}.down" ]]; then + echo "downfile disappeared" + break 2 + fi + done + sleep 5 + done + fi + } + + case "$1" in + start) + initialize + while :; do + list_lb_ips + remove_stale + add_rules + echo "done applying vip rules" + sleep_or_watch + done + ;; + cleanup) + clear_rules + ;; + *) + echo $"Usage: $0 {start|cleanup}" + exit 1 + esac diff --git a/templates/master/00-master/gcp/units/gcp-routes.service b/templates/master/00-master/gcp/units/gcp-routes.service new file mode 100644 index 0000000000..bd069de7d0 --- /dev/null +++ b/templates/master/00-master/gcp/units/gcp-routes.service @@ -0,0 +1,6 @@ +name: gcp-routes.service +dropins: +- name: mco-disabled.conf + contents: | + [Unit] + ConditionPathExists=/enoent diff --git a/templates/master/00-master/gcp/units/openshift-gcp-routes.service b/templates/master/00-master/gcp/units/openshift-gcp-routes.service new file mode 100644 index 0000000000..1d5d8ba3ed --- /dev/null +++ b/templates/master/00-master/gcp/units/openshift-gcp-routes.service @@ -0,0 +1,19 @@ +name: "openshift-gcp-routes.service" +enabled: true +contents: | + [Unit] + Description=Update GCP routes for forwarded IPs. + ConditionKernelCommandLine=|ignition.platform.id=gce + ConditionKernelCommandLine=|ignition.platform.id=gcp + After=network.target + + [Service] + Type=simple + ExecStart=/bin/bash /opt/libexec/openshift-gcp-routes.sh start + ExecStopPost=/bin/bash /opt/libexec/openshift-gcp-routes.sh cleanup + User=root + RestartSec=30 + Restart=always + + [Install] + WantedBy=multi-user.target