diff --git a/data/.gitignore b/data/.gitignore new file mode 100644 index 00000000000..2dfc18167aa --- /dev/null +++ b/data/.gitignore @@ -0,0 +1 @@ +/assets_vfsdata.go diff --git a/data/BUILD.bazel b/data/BUILD.bazel new file mode 100644 index 00000000000..975c83922b4 --- /dev/null +++ b/data/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "assets.go", + "unpack.go", + ], + importpath = "github.com/openshift/installer/data", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["unpack_test.go"], + embed = [":go_default_library"], +) diff --git a/data/assets.go b/data/assets.go new file mode 100644 index 00000000000..eea87cb48f4 --- /dev/null +++ b/data/assets.go @@ -0,0 +1,20 @@ +// +build !release +//go:generate go run assets_generate.go + +package data + +import ( + "net/http" + "os" +) + +// Assets contains project assets. +var Assets http.FileSystem + +func init() { + dir := os.Getenv("OPENSHIFT_INSTALL_DATA") + if dir == "" { + dir = "data" + } + Assets = http.Dir(dir) +} diff --git a/data/assets_generate.go b/data/assets_generate.go new file mode 100644 index 00000000000..5163175e35e --- /dev/null +++ b/data/assets_generate.go @@ -0,0 +1,21 @@ +// +build ignore + +package main + +import ( + "log" + + "github.com/openshift/installer/data" + "github.com/shurcooL/vfsgen" +) + +func main() { + err := vfsgen.Generate(data.Assets, vfsgen.Options{ + PackageName: "data", + BuildTags: "release", + VariableName: "Assets", + }) + if err != nil { + log.Fatalln(err) + } +} diff --git a/data/data/config.tf b/data/data/config.tf new file mode 100644 index 00000000000..a70992438f3 --- /dev/null +++ b/data/data/config.tf @@ -0,0 +1,336 @@ +terraform { + required_version = ">= 0.10.7" +} + +provider "archive" { + version = "1.0.0" +} + +provider "external" { + version = "1.0.0" +} + +provider "ignition" { + version = "1.0.0" +} + +provider "local" { + version = "1.0.0" +} + +provider "null" { + version = "1.0.0" +} + +provider "random" { + version = "1.0.0" +} + +provider "template" { + version = "1.0.0" +} + +provider "tls" { + version = "1.0.1" +} + +variable "tectonic_config_version" { + description = < +data "ignition_file" "manifest_file_list" { + count = "${length(var.manifest_names)}" + filesystem = "root" + mode = "0644" + + path = "/opt/tectonic/manifests/${var.manifest_names[count.index]}" + + content { + content = "${data.template_file.manifest_file_list.*.rendered[count.index]}" + } +} + +# Log the generated manifest files to disk for debugging and user visibility +# Dest: ./generated/manifests/ +resource "local_file" "manifest_files" { + count = "${length(var.manifest_names)}" + filename = "./generated/manifests/${var.manifest_names[count.index]}" + content = "${data.template_file.manifest_file_list.*.rendered[count.index]}" +} diff --git a/data/data/modules/bootkube/outputs.tf b/data/data/modules/bootkube/outputs.tf new file mode 100644 index 00000000000..90f9412960b --- /dev/null +++ b/data/data/modules/bootkube/outputs.tf @@ -0,0 +1,30 @@ +output "kubeconfig-kubelet" { + value = "${data.template_file.kubeconfig-kubelet.rendered}" +} + +output "systemd_service_id" { + value = "${data.ignition_systemd_unit.bootkube_service.id}" +} + +output "kube_dns_service_ip" { + value = "${cidrhost(var.service_cidr, 10)}" +} + +output "kubeconfig_rendered" { + value = "${data.template_file.kubeconfig.rendered}" +} + +output "kubeconfig-kubelet_rendered" { + value = "${data.template_file.kubeconfig-kubelet.rendered}" +} + +output "ignition_file_id_list" { + value = ["${flatten(list( + list( + data.ignition_file.bootkube_sh.id, + data.ignition_file.kubeconfig.id, + data.ignition_file.kubeconfig-kubelet.id, + ), + data.ignition_file.manifest_file_list.*.id, + ))}"] +} diff --git a/data/data/modules/bootkube/resources/bootkube.service b/data/data/modules/bootkube/resources/bootkube.service new file mode 100644 index 00000000000..9f9231ba8ae --- /dev/null +++ b/data/data/modules/bootkube/resources/bootkube.service @@ -0,0 +1,12 @@ +[Unit] +Description=Bootstrap a Kubernetes cluster +Wants=kubelet.service +After=kubelet.service + +[Service] +WorkingDirectory=/opt/tectonic + +ExecStart=/opt/tectonic/bootkube.sh + +Restart=on-failure +RestartSec=5s diff --git a/data/data/modules/bootkube/resources/bootkube.sh b/data/data/modules/bootkube/resources/bootkube.sh new file mode 100644 index 00000000000..a8bfa0567f2 --- /dev/null +++ b/data/data/modules/bootkube/resources/bootkube.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +set -e + +mkdir --parents /etc/kubernetes/manifests/ + +if [ ! -d kco-bootstrap ] +then + echo "Rendering Kubernetes core manifests..." + + # shellcheck disable=SC2154 + podman run \ + --volume "$PWD:/assets:z" \ + --volume /etc/kubernetes:/etc/kubernetes:z \ + "${kube_core_renderer_image}" \ + --config=/assets/kco-config.yaml \ + --output=/assets/kco-bootstrap + + cp --recursive kco-bootstrap/bootstrap-configs /etc/kubernetes/bootstrap-configs + cp --recursive kco-bootstrap/bootstrap-manifests . + cp --recursive kco-bootstrap/manifests . +fi + +if [ ! -d "mco-bootstrap" ] +then + echo "Rendering MCO manifests..." + + # shellcheck disable=SC2154 + podman run \ + --user 0 \ + --volume "$PWD:/assets:z" \ + "${machine_config_operator_image}" \ + bootstrap \ + --etcd-ca=/assets/tls/etcd-client-ca.crt \ + --root-ca=/assets/tls/root-ca.crt \ + --config-file=/assets/manifests/cluster-config.yaml \ + --dest-dir=/assets/mco-bootstrap \ + --images-json-configmap=/assets/manifests/machine-config-operator-01-images-configmap.yaml + + # Bootstrap MachineConfigController uses /etc/mcc/bootstrap/manifests/ dir to + # 1. read the controller config rendered by MachineConfigOperator + # 2. read the default MachineConfigPools rendered by MachineConfigOperator + # 3. read any additional MachineConfigs that are needed for the default MachineConfigPools. + mkdir --parents /etc/mcc/bootstrap/ + cp --recursive mco-bootstrap/manifests /etc/mcc/bootstrap/manifests + cp mco-bootstrap/machineconfigoperator-bootstrap-pod.yaml /etc/kubernetes/manifests/ + + # /etc/ssl/mcs/tls.{crt, key} are locations for MachineConfigServer's tls assets. + mkdir --parents /etc/ssl/mcs/ + cp tls/machine-config-server.crt /etc/ssl/mcs/tls.crt + cp tls/machine-config-server.key /etc/ssl/mcs/tls.key +fi + +# We originally wanted to run the etcd cert signer as +# a static pod, but kubelet could't remove static pod +# when API server is not up, so we have to run this as +# podman container. +# See https://github.com/kubernetes/kubernetes/issues/43292 + +echo "Starting etcd certificate signer..." + +trap "podman rm --force etcd-signer" ERR + +# shellcheck disable=SC2154 +podman run \ + --name etcd-signer \ + --detach \ + --tmpfs /tmp \ + --volume /opt/tectonic/tls:/opt/tectonic/tls:ro,z \ + --network host \ + "${etcd_cert_signer_image}" \ + serve \ + --cacrt=/opt/tectonic/tls/etcd-client-ca.crt \ + --cakey=/opt/tectonic/tls/etcd-client-ca.key \ + --servcrt=/opt/tectonic/tls/apiserver.crt \ + --servkey=/opt/tectonic/tls/apiserver.key \ + --address=0.0.0.0:6443 \ + --csrdir=/tmp \ + --peercertdur=26280h \ + --servercertdur=26280h + +echo "Waiting for etcd cluster..." + +# Wait for the etcd cluster to come up. +set +e +# shellcheck disable=SC2154,SC2086 +until podman run \ + --rm \ + --network host \ + --name etcdctl \ + --env ETCDCTL_API=3 \ + --volume /opt/tectonic/tls:/opt/tectonic/tls:ro,z \ + "${etcdctl_image}" \ + /usr/local/bin/etcdctl \ + --dial-timeout=10m \ + --cacert=/opt/tectonic/tls/etcd-client-ca.crt \ + --cert=/opt/tectonic/tls/etcd-client.crt \ + --key=/opt/tectonic/tls/etcd-client.key \ + --endpoints=${etcd_cluster} \ + endpoint health +do + echo "etcdctl failed. Retrying in 5 seconds..." + sleep 5 +done +set -e + +echo "etcd cluster up. Killing etcd certificate signer..." + +podman rm --force etcd-signer +rm --force /etc/kubernetes/manifests/machineconfigoperator-bootstrap-pod.yaml + +echo "Starting bootkube..." + +# shellcheck disable=SC2154 +podman run \ + --rm \ + --volume "$PWD:/assets:z" \ + --volume /etc/kubernetes:/etc/kubernetes:z \ + --network=host \ + --entrypoint=/bootkube \ + "${bootkube_image}" \ + start --asset-dir=/assets diff --git a/data/data/modules/bootkube/resources/kubeconfig b/data/data/modules/bootkube/resources/kubeconfig new file mode 100644 index 00000000000..c7f674750ac --- /dev/null +++ b/data/data/modules/bootkube/resources/kubeconfig @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Config +clusters: +- name: ${cluster_name} + cluster: + server: ${server} + certificate-authority-data: ${root_ca_cert} +users: +- name: admin + user: + client-certificate-data: ${admin_cert} + client-key-data: ${admin_key} +contexts: +- context: + cluster: ${cluster_name} + user: admin diff --git a/data/data/modules/bootkube/resources/kubeconfig-kubelet b/data/data/modules/bootkube/resources/kubeconfig-kubelet new file mode 100644 index 00000000000..321029fd640 --- /dev/null +++ b/data/data/modules/bootkube/resources/kubeconfig-kubelet @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Config +clusters: +- name: ${cluster_name} + cluster: + server: ${server} + certificate-authority-data: ${root_ca_cert} +users: +- name: kubelet + user: + client-certificate-data: ${client_cert} + client-key-data: ${client_key} +contexts: +- context: + cluster: ${cluster_name} + user: kubelet diff --git a/data/data/modules/bootkube/resources/manifests/01-tectonic-namespace.yaml b/data/data/modules/bootkube/resources/manifests/01-tectonic-namespace.yaml new file mode 100644 index 00000000000..34ae8d95f3f --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/01-tectonic-namespace.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: tectonic-system # Create the namespace first. + labels: # network policy can only select by labels + name: tectonic-system + openshift.io/run-level: "1" diff --git a/data/data/modules/bootkube/resources/manifests/02-ingress-namespace.yaml b/data/data/modules/bootkube/resources/manifests/02-ingress-namespace.yaml new file mode 100644 index 00000000000..b02ff7d5c2c --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/02-ingress-namespace.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Namespace +metadata: + # This is the namespace used to hold the tectonic ingress controllers + name: openshift-ingress + # Give the namespace a label, so we can select for it in networkpolicy + labels: + kubernetes.io/ingress.class: tectonic + name: openshift-ingress + openshift.io/run-level: "1" diff --git a/data/data/modules/bootkube/resources/manifests/03-openshift-web-console-namespace.yaml b/data/data/modules/bootkube/resources/manifests/03-openshift-web-console-namespace.yaml new file mode 100644 index 00000000000..cbea3467240 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/03-openshift-web-console-namespace.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Namespace +metadata: + # This is the namespace used to hold the openshift console. + # They require openshift console run in this namespace. + name: openshift-web-console + labels: + name: openshift-web-console diff --git a/data/data/modules/bootkube/resources/manifests/04-openshift-machine-config-operator.yaml b/data/data/modules/bootkube/resources/manifests/04-openshift-machine-config-operator.yaml new file mode 100644 index 00000000000..83c8515e59a --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/04-openshift-machine-config-operator.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-machine-config-operator + labels: + name: openshift-machine-config-operator + openshift.io/run-level: "1" diff --git a/data/data/modules/bootkube/resources/manifests/05-openshift-cluster-api-namespace.yaml b/data/data/modules/bootkube/resources/manifests/05-openshift-cluster-api-namespace.yaml new file mode 100644 index 00000000000..7dabda5f37a --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/05-openshift-cluster-api-namespace.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Namespace +metadata: + # This is the namespace used to hold cluster-api components. + name: openshift-cluster-api + labels: + name: openshift-cluster-api + openshift.io/run-level: "1" diff --git a/data/data/modules/bootkube/resources/manifests/app-version-kind.yaml b/data/data/modules/bootkube/resources/manifests/app-version-kind.yaml new file mode 100644 index 00000000000..ebcd04e9ebe --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/app-version-kind.yaml @@ -0,0 +1,10 @@ +apiVersion: "apiextensions.k8s.io/v1beta1" +kind: "CustomResourceDefinition" +metadata: + name: "appversions.tco.coreos.com" +spec: + group: "tco.coreos.com" + version: "v1" + names: + plural: "appversions" + kind: "AppVersion" diff --git a/data/data/modules/bootkube/resources/manifests/app-version-mao.yaml b/data/data/modules/bootkube/resources/manifests/app-version-mao.yaml new file mode 100644 index 00000000000..050b991cc58 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/app-version-mao.yaml @@ -0,0 +1,15 @@ +apiVersion: tco.coreos.com/v1 +kind: AppVersion +metadata: + name: machine-api + namespace: tectonic-system + labels: + managed-by-channel-operator: "true" +spec: + desiredVersion: + paused: false +status: + currentVersion: + paused: false +upgradereq: 1 +upgradecomp: 0 diff --git a/data/data/modules/bootkube/resources/manifests/app-version-tectonic-network.yaml b/data/data/modules/bootkube/resources/manifests/app-version-tectonic-network.yaml new file mode 100644 index 00000000000..08a54f655ba --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/app-version-tectonic-network.yaml @@ -0,0 +1,15 @@ +apiVersion: tco.coreos.com/v1 +kind: AppVersion +metadata: + name: tectonic-network + namespace: kube-system + labels: + managed-by-channel-operator: "true" +spec: + desiredVersion: + paused: false +status: + currentVersion: + paused: false +upgradereq: 1 +upgradecomp: 0 diff --git a/data/data/modules/bootkube/resources/manifests/cluster-apiserver-certs.yaml b/data/data/modules/bootkube/resources/manifests/cluster-apiserver-certs.yaml new file mode 100644 index 00000000000..3dc79fd3413 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/cluster-apiserver-certs.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: cluster-apiserver-certs + namespace: openshift-cluster-api + labels: + api: clusterapi + apiserver: "true" +data: + tls.crt: ${clusterapi_ca_cert} + tls.key: ${clusterapi_ca_key} diff --git a/data/data/modules/bootkube/resources/manifests/ign-config.yaml b/data/data/modules/bootkube/resources/manifests/ign-config.yaml new file mode 100644 index 00000000000..1e914fbadee --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/ign-config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ignition-worker + namespace: openshift-cluster-api +type: Opaque +data: + userData: ${worker_ign_config} diff --git a/data/data/modules/bootkube/resources/manifests/kube-apiserver-secret.yaml b/data/data/modules/bootkube/resources/manifests/kube-apiserver-secret.yaml new file mode 100644 index 00000000000..9e13eb8736f --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/kube-apiserver-secret.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Secret +metadata: + name: kube-apiserver + namespace: kube-system +type: Opaque +data: + aggregator-ca.crt: ${aggregator_ca_cert} + aggregator-ca.key: ${aggregator_ca_key} + apiserver.key: ${apiserver_key} + apiserver.crt: ${apiserver_cert} + apiserver-proxy.key: ${apiserver_proxy_key} + apiserver-proxy.crt: ${apiserver_proxy_cert} + service-account.pub: ${serviceaccount_pub} + service-account.key: ${serviceaccount_key} + root-ca.crt: ${root_ca_cert} + kube-ca.crt: ${kube_ca_cert} + etcd-client-ca.crt: ${etcd_ca_cert} + etcd-client.crt: ${etcd_client_cert} + etcd-client.key: ${etcd_client_key} + oidc-ca.crt: ${oidc_ca_cert} + service-serving-ca.crt: ${service_serving_ca_cert} + service-serving-ca.key: ${service_serving_ca_key} + kubeconfig: ${openshift_loopback_kubeconfig} diff --git a/data/data/modules/bootkube/resources/manifests/kube-cloud-config.yaml b/data/data/modules/bootkube/resources/manifests/kube-cloud-config.yaml new file mode 100644 index 00000000000..02d0846a9ab --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/kube-cloud-config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: kube-cloud-cfg + namespace: kube-system +type: Opaque +data: + config: ${base64encode(cloud_provider_config)} diff --git a/data/data/modules/bootkube/resources/manifests/kube-controller-manager-secret.yaml b/data/data/modules/bootkube/resources/manifests/kube-controller-manager-secret.yaml new file mode 100644 index 00000000000..7a3c83dc970 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/kube-controller-manager-secret.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: kube-controller-manager + namespace: kube-system +type: Opaque +data: + service-account.key: ${serviceaccount_key} + root-ca.crt: ${root_ca_cert} + kube-ca.crt: ${kube_ca_cert} + kube-ca.key: ${kube_ca_key} diff --git a/data/data/modules/bootkube/resources/manifests/machine-api-operator.yaml b/data/data/modules/bootkube/resources/manifests/machine-api-operator.yaml new file mode 100644 index 00000000000..125b870bc3d --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/machine-api-operator.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: machine-api-operator + namespace: kube-system + labels: + k8s-app: machine-api-operator + managed-by-channel-operator: "true" +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: machine-api-operator + template: + metadata: + labels: + k8s-app: machine-api-operator + tectonic-app-version-name: machine-api + spec: + containers: + - name: machine-api-operator + image: quay.io/coreos/machine-api-operator:b6a04c2 + command: + - "/machine-api-operator" + resources: + limits: + cpu: 20m + memory: 50Mi + requests: + cpu: 20m + memory: 50Mi + volumeMounts: + - name: cluster-config + mountPath: /etc/mao-config + imagePullSecrets: + - name: coreos-pull-secret + nodeSelector: + node-role.kubernetes.io/master: "" + restartPolicy: Always + securityContext: + runAsNonRoot: true + runAsUser: 65534 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + volumes: + - name: cluster-config + configMap: + name: cluster-config-v1 + items: + - key: mao-config + path: config + diff --git a/data/data/modules/bootkube/resources/manifests/machine-config-operator-00-config-crd.yaml b/data/data/modules/bootkube/resources/manifests/machine-config-operator-00-config-crd.yaml new file mode 100644 index 00000000000..99ec6196194 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/machine-config-operator-00-config-crd.yaml @@ -0,0 +1,24 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + # name must match the spec fields below, and be in the form: . + name: mcoconfigs.machineconfiguration.openshift.io +spec: + # group name to use for REST API: /apis// + group: machineconfiguration.openshift.io + # list of versions supported by this CustomResourceDefinition + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + # either Namespaced or Cluster + scope: Namespaced + names: + # plural name to be used in the URL: /apis/// + plural: mcoconfigs + # singular name to be used as an alias on the CLI and for display + singular: mcoconfig + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: MCOConfig diff --git a/data/data/modules/bootkube/resources/manifests/machine-config-operator-01-images-configmap.yaml b/data/data/modules/bootkube/resources/manifests/machine-config-operator-01-images-configmap.yaml new file mode 100644 index 00000000000..79bed0f86d2 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/machine-config-operator-01-images-configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: machine-config-operator-images + namespace: openshift-machine-config-operator +data: + images.json: '{"machineConfigController": "docker.io/openshift/origin-machine-config-controller:v4.0.0", "machineConfigDaemon": "docker.io/openshift/origin-machine-config-daemon:v4.0.0", "machineConfigServer": "docker.io/openshift/origin-machine-config-server:v4.0.0"}' diff --git a/data/data/modules/bootkube/resources/manifests/machine-config-operator-02-rbac.yaml b/data/data/modules/bootkube/resources/manifests/machine-config-operator-02-rbac.yaml new file mode 100644 index 00000000000..cd69091915e --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/machine-config-operator-02-rbac.yaml @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: default-account-openshift-machine-config-operator +subjects: +- kind: ServiceAccount + name: default + namespace: openshift-machine-config-operator +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/data/data/modules/bootkube/resources/manifests/machine-config-operator-03-deployment.yaml b/data/data/modules/bootkube/resources/manifests/machine-config-operator-03-deployment.yaml new file mode 100644 index 00000000000..10343fe9155 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/machine-config-operator-03-deployment.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: machine-config-operator + namespace: openshift-machine-config-operator + labels: + k8s-app: machine-config-operator +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: machine-config-operator + template: + metadata: + labels: + k8s-app: machine-config-operator + spec: + containers: + - name: machine-config-operator + image: ${machine_config_operator_image} + args: + - "start" + - "--images-json=/etc/mco/images/images.json" + resources: + limits: + cpu: 20m + memory: 50Mi + requests: + cpu: 20m + memory: 50Mi + volumeMounts: + - name: root-ca + mountPath: /etc/ssl/kubernetes/ca.crt + - name: etcd-ca + mountPath: /etc/ssl/etcd/ca.crt + - name: images + mountPath: /etc/mco/images + nodeSelector: + node-role.kubernetes.io/master: "" + restartPolicy: Always + securityContext: + runAsNonRoot: true + runAsUser: 65534 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + volumes: + - name: images + configMap: + name: machine-config-operator-images + - name: etcd-ca + hostPath: + path: /etc/ssl/etcd/ca.crt + - name: root-ca + hostPath: + path: /etc/kubernetes/ca.crt diff --git a/data/data/modules/bootkube/resources/manifests/machine-config-server-tls-secret.yaml b/data/data/modules/bootkube/resources/manifests/machine-config-server-tls-secret.yaml new file mode 100644 index 00000000000..5856850b5d1 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/machine-config-server-tls-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: machine-config-server-tls + namespace: openshift-machine-config-operator +type: Opaque +data: + tls.crt: ${mcs_tls_cert} + tls.key: ${mcs_tls_key} diff --git a/data/data/modules/bootkube/resources/manifests/openshift-apiserver-secret.yaml b/data/data/modules/bootkube/resources/manifests/openshift-apiserver-secret.yaml new file mode 100644 index 00000000000..a45f61587c7 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/openshift-apiserver-secret.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: openshift-apiserver + namespace: kube-system +type: Opaque +data: + aggregator-ca.crt: ${aggregator_ca_cert} + aggregator-ca.key: ${aggregator_ca_key} + apiserver.key: ${apiserver_key} + apiserver.crt: ${apiserver_cert} + openshift-apiserver.key: ${openshift_apiserver_key} + openshift-apiserver.crt: ${openshift_apiserver_cert} + apiserver-proxy.key: ${apiserver_proxy_key} + apiserver-proxy.crt: ${apiserver_proxy_cert} + service-account.pub: ${serviceaccount_pub} + service-account.key: ${serviceaccount_key} + root-ca.crt: ${root_ca_cert} + kube-ca.crt: ${kube_ca_cert} + etcd-client-ca.crt: ${etcd_ca_cert} + etcd-client.crt: ${etcd_client_cert} + etcd-client.key: ${etcd_client_key} + oidc-ca.crt: ${oidc_ca_cert} + service-serving-ca.crt: ${service_serving_ca_cert} + service-serving-ca.key: ${service_serving_ca_key} + kubeconfig: ${openshift_loopback_kubeconfig} diff --git a/data/data/modules/bootkube/resources/manifests/operatorstatus-crd.yaml b/data/data/modules/bootkube/resources/manifests/operatorstatus-crd.yaml new file mode 100644 index 00000000000..c560f5c3b6f --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/operatorstatus-crd.yaml @@ -0,0 +1,24 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + # name must match the spec fields below, and be in the form: . + name: operatorstatuses.clusterversion.openshift.io +spec: + # group name to use for REST API: /apis// + group: clusterversion.openshift.io + # list of versions supported by this CustomResourceDefinition + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + # either Namespaced or Cluster + scope: Namespaced + names: + # plural name to be used in the URL: /apis/// + plural: operatorstatuses + # singular name to be used as an alias on the CLI and for display + singular: operatorstatus + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: OperatorStatus diff --git a/data/data/modules/bootkube/resources/manifests/pull.json b/data/data/modules/bootkube/resources/manifests/pull.json new file mode 100644 index 00000000000..a442286df29 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/pull.json @@ -0,0 +1,12 @@ +{ + "apiVersion": "v1", + "kind": "Secret", + "type": "kubernetes.io/dockerconfigjson", + "metadata": { + "namespace": "kube-system", + "name": "coreos-pull-secret" + }, + "data": { + ".dockerconfigjson": "${pull_secret}" + } +} diff --git a/data/data/modules/bootkube/resources/manifests/tectonic-network-operator.yaml b/data/data/modules/bootkube/resources/manifests/tectonic-network-operator.yaml new file mode 100644 index 00000000000..c05c7023a23 --- /dev/null +++ b/data/data/modules/bootkube/resources/manifests/tectonic-network-operator.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1beta2 +kind: DaemonSet +metadata: + name: tectonic-network-operator + namespace: kube-system + labels: + k8s-app: tectonic-network-operator + managed-by-channel-operator: "true" +spec: + selector: + matchLabels: + k8s-app: tectonic-network-operator + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + k8s-app: tectonic-network-operator + tectonic-app-version-name: tectonic-network + spec: + containers: + - name: tectonic-network-operator + image: ${tectonic_network_operator_image} + resources: + limits: + cpu: 20m + memory: 50Mi + requests: + cpu: 20m + memory: 50Mi + volumeMounts: + - name: cluster-config + mountPath: /etc/cluster-config + hostNetwork: true + restartPolicy: Always + imagePullSecrets: + - name: coreos-pull-secret + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - name: cluster-config + configMap: + name: cluster-config-v1 + items: + - key: network-config + path: network-config + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/data/data/modules/bootkube/variables.tf b/data/data/modules/bootkube/variables.tf new file mode 100644 index 00000000000..0935eda3443 --- /dev/null +++ b/data/data/modules/bootkube/variables.tf @@ -0,0 +1,168 @@ +variable "apiserver_cert_pem" { + type = "string" + description = "The API server certificate in PEM format." +} + +variable "apiserver_key_pem" { + type = "string" + description = "The API server key in PEM format." +} + +variable "openshift_apiserver_cert_pem" { + type = "string" + description = "The Openshift API server certificate in PEM format." +} + +variable "openshift_apiserver_key_pem" { + type = "string" + description = "The Openshift API server key in PEM format." +} + +variable "apiserver_proxy_cert_pem" { + type = "string" + description = "The API server proxy certificate in PEM format." +} + +variable "apiserver_proxy_key_pem" { + type = "string" + description = "The API server proxy key in PEM format." +} + +variable "cloud_provider_config" { + description = "Content of cloud provider config" + type = "string" + default = "" +} + +variable "cluster_name" { + type = "string" +} + +variable "container_images" { + description = "Container images to use" + type = "map" +} + +variable "etcd_ca_cert_pem" { + type = "string" + description = "The etcd CA certificate in PEM format." +} + +variable "etcd_client_cert_pem" { + type = "string" + description = "The etcd client certificate in PEM format." +} + +variable "etcd_client_key_pem" { + type = "string" + description = "The etcd client key in PEM format." +} + +variable "etcd_endpoints" { + description = "List of etcd endpoints to connect with (hostnames/IPs only)" + type = "list" +} + +variable "kube_apiserver_url" { + description = "URL used to reach kube-apiserver" + type = "string" +} + +variable "root_ca_cert_pem" { + type = "string" + description = "The Root CA in PEM format." +} + +variable "aggregator_ca_cert_pem" { + type = "string" + description = "The Aggregated API Server CA in PEM format." +} + +variable "aggregator_ca_key_pem" { + type = "string" + description = "The Aggregated API Server CA key in PEM format." +} + +variable "kube_ca_cert_pem" { + type = "string" + description = "The Kubernetes CA in PEM format." +} + +variable "kube_ca_key_pem" { + type = "string" + description = "The Kubernetes CA key in PEM format." +} + +variable "service_serving_ca_cert_pem" { + type = "string" + description = "The Service Serving CA in PEM format." +} + +variable "service_serving_ca_key_pem" { + type = "string" + description = "The Service Serving CA key in PEM format." +} + +variable "admin_cert_pem" { + type = "string" + description = "The admin certificate in PEM format." +} + +variable "admin_key_pem" { + type = "string" + description = "The admin key in PEM format." +} + +variable "kubelet_cert_pem" { + type = "string" + description = "The kubelet certificate in PEM format." +} + +variable "kubelet_key_pem" { + type = "string" + description = "The kubelet key in PEM format." +} + +variable "mcs_cert_pem" { + type = "string" +} + +variable "mcs_key_pem" { + type = "string" +} + +variable "service_account_public_key_pem" { + type = "string" +} + +variable "service_account_private_key_pem" { + type = "string" +} + +variable "oidc_ca_cert" { + type = "string" +} + +variable "clusterapi_ca_cert_pem" { + type = "string" +} + +variable "clusterapi_ca_key_pem" { + type = "string" +} + +variable "service_cidr" { + description = "A CIDR notation IP range from which to assign service cluster IPs" + type = "string" +} + +variable "pull_secret" { + type = "string" + description = "Your pull secret. Obtain this from your Tectonic Account: https://account.coreos.com." +} + +variable "worker_ign_config" { + description = "Worker ignition config" + type = "string" + default = "" +} diff --git a/data/data/modules/dns/route53/master.tf b/data/data/modules/dns/route53/master.tf new file mode 100644 index 00000000000..bed1331372b --- /dev/null +++ b/data/data/modules/dns/route53/master.tf @@ -0,0 +1,8 @@ +resource "aws_route53_record" "master_nodes" { + count = "${var.elb_alias_enabled ? 0 : var.master_count}" + zone_id = "${data.aws_route53_zone.tectonic.zone_id}" + name = "${var.cluster_name}-master-${count.index}" + type = "A" + ttl = "60" + records = ["${var.master_ip_addresses[count.index]}"] +} diff --git a/data/data/modules/dns/route53/outputs.tf b/data/data/modules/dns/route53/outputs.tf new file mode 100644 index 00000000000..13d7fc887b3 --- /dev/null +++ b/data/data/modules/dns/route53/outputs.tf @@ -0,0 +1,35 @@ +output "worker_nodes" { + value = "${aws_route53_record.worker_nodes.*.fqdn}" +} + +output "worker_nodes_public" { + value = "${aws_route53_record.worker_nodes_public.*.fqdn}" +} + +output "master_nodes" { + value = "${aws_route53_record.master_nodes.*.fqdn}" +} + +output "ingress_external_fqdn" { + value = "${element(concat(aws_route53_record.tectonic_ingress_public.*.name, list("")), 0)}" +} + +output "ingress_internal_fqdn" { + value = "${element(concat(aws_route53_record.tectonic_ingress_private.*.name, list("")), 0)}" +} + +output "routes_external_fqdn" { + value = "${element(concat(aws_route53_record.routes_ingress_public.*.name, list("")), 0)}" +} + +output "routes_internal_fqdn" { + value = "${element(concat(aws_route53_record.routes_ingress_private.*.name, list("")), 0)}" +} + +output "api_external_fqdn" { + value = "${element(concat(aws_route53_record.tectonic_api_external.*.name, list("")), 0)}" +} + +output "api_internal_fqdn" { + value = "${element(concat(aws_route53_record.tectonic_api_internal.*.name, list("")), 0)}" +} diff --git a/data/data/modules/dns/route53/tectonic.tf b/data/data/modules/dns/route53/tectonic.tf new file mode 100644 index 00000000000..78772f46331 --- /dev/null +++ b/data/data/modules/dns/route53/tectonic.tf @@ -0,0 +1,112 @@ +locals { + public_endpoints_count = "${var.public_endpoints ? 1 : 0}" + private_endpoints_count = "${var.private_endpoints ? 1 : 0}" +} + +data "aws_route53_zone" "tectonic" { + name = "${var.base_domain}" +} + +locals { + public_zone_id = "${join("", data.aws_route53_zone.tectonic.*.zone_id)}" + + zone_id = "${var.private_endpoints ? + var.private_zone_id : + local.public_zone_id}" +} + +resource "aws_route53_record" "tectonic_api" { + count = "${var.elb_alias_enabled ? 0 : 1}" + zone_id = "${local.public_zone_id}" + name = "${var.cluster_name}-k8s" + type = "A" + ttl = "60" + records = ["${var.api_ip_addresses}"] +} + +resource "aws_route53_record" "tectonic_api_external" { + count = "${var.elb_alias_enabled ? local.public_endpoints_count : 0}" + zone_id = "${local.public_zone_id}" + name = "${var.cluster_name}-api.${var.base_domain}" + type = "A" + + alias { + name = "${var.api_external_elb_dns_name}" + zone_id = "${var.api_external_elb_zone_id}" + evaluate_target_health = true + } +} + +resource "aws_route53_record" "tectonic_api_internal" { + count = "${var.elb_alias_enabled ? local.private_endpoints_count : 0}" + zone_id = "${var.private_zone_id}" + name = "${var.cluster_name}-api.${var.base_domain}" + type = "A" + + alias { + name = "${var.api_internal_elb_dns_name}" + zone_id = "${var.api_internal_elb_zone_id}" + evaluate_target_health = true + } +} + +resource "aws_route53_record" "tectonic-console" { + count = "${var.elb_alias_enabled ? 0 : 1}" + zone_id = "${local.public_zone_id}" + name = "${var.cluster_name}" + type = "A" + ttl = "60" + records = ["${var.worker_ip_addresses}"] +} + +resource "aws_route53_record" "tectonic_ingress_public" { + count = "${var.elb_alias_enabled ? local.public_endpoints_count : 0}" + zone_id = "${local.public_zone_id}" + name = "${var.cluster_name}.${var.base_domain}" + type = "A" + + alias { + name = "${var.console_elb_dns_name}" + zone_id = "${var.console_elb_zone_id}" + evaluate_target_health = true + } +} + +resource "aws_route53_record" "tectonic_ingress_private" { + count = "${var.elb_alias_enabled ? local.private_endpoints_count : 0}" + zone_id = "${var.private_zone_id}" + name = "${var.cluster_name}.${var.base_domain}" + type = "A" + + alias { + name = "${var.console_elb_dns_name}" + zone_id = "${var.console_elb_zone_id}" + evaluate_target_health = true + } +} + +resource "aws_route53_record" "routes_ingress_public" { + count = "${var.elb_alias_enabled ? local.public_endpoints_count : 0}" + zone_id = "${local.public_zone_id}" + name = "*.${var.cluster_name}.${var.base_domain}" + type = "A" + + alias { + name = "${var.console_elb_dns_name}" + zone_id = "${var.console_elb_zone_id}" + evaluate_target_health = true + } +} + +resource "aws_route53_record" "routes_ingress_private" { + count = "${var.elb_alias_enabled ? local.private_endpoints_count : 0}" + zone_id = "${var.private_zone_id}" + name = "*.${var.cluster_name}.${var.base_domain}" + type = "A" + + alias { + name = "${var.console_elb_dns_name}" + zone_id = "${var.console_elb_zone_id}" + evaluate_target_health = true + } +} diff --git a/data/data/modules/dns/route53/variables.tf b/data/data/modules/dns/route53/variables.tf new file mode 100644 index 00000000000..9bc319fd715 --- /dev/null +++ b/data/data/modules/dns/route53/variables.tf @@ -0,0 +1,131 @@ +variable "cluster_id" { + description = "Cluster ID" + type = "string" +} + +variable "cluster_name" { + description = "The name of the cluster" + type = "string" +} + +variable "base_domain" { + description = "The base domain used in records" + type = "string" +} + +variable "master_count" { + description = "The number of masters" + type = "string" +} + +variable "worker_count" { + description = "The number of workers" + type = "string" + default = "0" +} + +variable "master_ip_addresses" { + description = "List of string IPs for masters" + type = "list" + default = [] +} + +variable "worker_ip_addresses" { + description = "List of string IPs for workers" + type = "list" + default = [] +} + +variable "worker_public_ips" { + description = "(optional) List of string public IPs for workers" + type = "list" + default = [] +} + +// hack: worker_public_ips_enabled is a workaround for https://github.com/hashicorp/terraform/issues/10857 +variable "worker_public_ips_enabled" { + description = "Worker nodes have public IPs assigned. worker_public_ips must be provided if true." + default = true +} + +variable "api_ip_addresses" { + description = "List of string IPs for k8s API" + type = "list" +} + +variable "extra_tags" { + type = "map" + description = "Extra tags to be applied to created resources." +} + +// AWS specific internal zone variables + +variable "elb_alias_enabled" { + description = < /etc/kubernetes/ca.crt" +Environment=KUBELET_RUNTIME_REQUEST_TIMEOUT=10m +EnvironmentFile=-/etc/kubernetes/kubelet-env + +ExecStart=/usr/bin/hyperkube \ + kubelet \ + --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --rotate-certificates \ + --container-runtime=remote \ + --container-runtime-endpoint=/var/run/crio/crio.sock \ + --runtime-request-timeout=$${KUBELET_RUNTIME_REQUEST_TIMEOUT} \ + --lock-file=/var/run/lock/kubelet.lock \ + --exit-on-lock-contention \ + --pod-manifest-path=/etc/kubernetes/manifests \ + --allow-privileged \ + --node-labels=${node_label} \ + --minimum-container-ttl-duration=6m0s \ + --cluster-dns=${cluster_dns_ip} \ + --cluster-domain=cluster.local \ + --client-ca-file=/etc/kubernetes/ca.crt \ + --cloud-provider=${cloud_provider} \ + --anonymous-auth=false \ + --cgroup-driver=systemd \ + --serialize-image-pulls=false \ + ${cloud_provider_config} \ + ${debug_config} \ + ${node_taints_param} \ + +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/data/data/modules/ignition/variables.tf b/data/data/modules/ignition/variables.tf new file mode 100644 index 00000000000..9c697fc4e3c --- /dev/null +++ b/data/data/modules/ignition/variables.tf @@ -0,0 +1,71 @@ +variable "container_images" { + description = "Container images to use" + type = "map" +} + +variable "image_re" { + description = < +data "ignition_file" "tectonic_manifest_list" { + count = "${length(var.manifest_names)}" + filesystem = "root" + mode = "0644" + + path = "/opt/tectonic/tectonic/${var.manifest_names[count.index]}" + + content { + content = "${data.template_file.manifest_file_list.*.rendered[count.index]}" + } +} + +# Log the generated manifest files to disk for debugging and user visibility +# Dest: ./generated/tectonic/ +resource "local_file" "manifest_files" { + count = "${length(var.manifest_names)}" + filename = "./generated/tectonic/${var.manifest_names[count.index]}" + content = "${data.template_file.manifest_file_list.*.rendered[count.index]}" +} diff --git a/data/data/modules/tectonic/output.tf b/data/data/modules/tectonic/output.tf new file mode 100644 index 00000000000..2c0438f89cb --- /dev/null +++ b/data/data/modules/tectonic/output.tf @@ -0,0 +1,21 @@ +output "systemd_service_id" { + value = "${data.ignition_systemd_unit.tectonic_service.id}" +} + +output "ignition_file_id_list" { + value = ["${concat( + list(data.ignition_file.tectonic_sh.id), + data.ignition_file.tectonic_manifest_list.*.id, + )}"] +} + +output "cluster_id" { + value = "${format( + "%s-%s-%s-%s-%s", + substr(random_id.cluster_id.hex, 0, 8), + substr(random_id.cluster_id.hex, 8, 4), + substr(random_id.cluster_id.hex, 12, 4), + substr(random_id.cluster_id.hex, 16, 4), + substr(random_id.cluster_id.hex, 20, 12) + )}" +} diff --git a/data/data/modules/tectonic/resources/manifests/ingress/README.md b/data/data/modules/tectonic/resources/manifests/ingress/README.md new file mode 100644 index 00000000000..60ef4987e9b --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/ingress/README.md @@ -0,0 +1,2 @@ +tectonic-ingress-controller-operator is a special case, since it is in its own +namespace and reads its own config. diff --git a/data/data/modules/tectonic/resources/manifests/ingress/cluster-config.yaml b/data/data/modules/tectonic/resources/manifests/ingress/cluster-config.yaml new file mode 100644 index 00000000000..13192da85bf --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/ingress/cluster-config.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-config-v1 + namespace: openshift-ingress +data: + ingress-config: | + apiVersion: v1 + kind: TectonicIngressOperatorConfig + type: ${ingress_kind} + statsPassword: ${ingress_status_password} + statsUsername: admin diff --git a/data/data/modules/tectonic/resources/manifests/ingress/pull.json b/data/data/modules/tectonic/resources/manifests/ingress/pull.json new file mode 100644 index 00000000000..410066261f5 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/ingress/pull.json @@ -0,0 +1,12 @@ +{ + "apiVersion": "v1", + "kind": "Secret", + "type": "kubernetes.io/dockerconfigjson", + "metadata": { + "namespace": "openshift-ingress", + "name": "coreos-pull-secret" + }, + "data": { + ".dockerconfigjson": "${pull_secret}" + } +} diff --git a/data/data/modules/tectonic/resources/manifests/ingress/svc-account.yaml b/data/data/modules/tectonic/resources/manifests/ingress/svc-account.yaml new file mode 100644 index 00000000000..bfebbd07690 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/ingress/svc-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tectonic-ingress-controller-operator + namespace: openshift-ingress diff --git a/data/data/modules/tectonic/resources/manifests/rbac/binding-admin.yaml b/data/data/modules/tectonic/resources/manifests/rbac/binding-admin.yaml new file mode 100644 index 00000000000..c0f3294fc04 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/rbac/binding-admin.yaml @@ -0,0 +1,15 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: admin-user +subjects: + - kind: ServiceAccount + namespace: tectonic-system + name: default + - kind: ServiceAccount + namespace: openshift-ingress + name: tectonic-ingress-controller-operator +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/data/data/modules/tectonic/resources/manifests/rbac/binding-discovery.yaml b/data/data/modules/tectonic/resources/manifests/rbac/binding-discovery.yaml new file mode 100644 index 00000000000..79154a8aff8 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/rbac/binding-discovery.yaml @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: discovery +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:discovery +subjects: +- kind: Group + name: 'system:unauthenticated' +- kind: Group + name: 'system:authenticated' diff --git a/data/data/modules/tectonic/resources/manifests/rbac/role-admin.yaml b/data/data/modules/tectonic/resources/manifests/rbac/role-admin.yaml new file mode 100644 index 00000000000..11968feba20 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/rbac/role-admin.yaml @@ -0,0 +1,10 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: admin +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] + - nonResourceURLs: ["*"] + verbs: ["*"] diff --git a/data/data/modules/tectonic/resources/manifests/rbac/role-user.yaml b/data/data/modules/tectonic/resources/manifests/rbac/role-user.yaml new file mode 100644 index 00000000000..a2ade705b53 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/rbac/role-user.yaml @@ -0,0 +1,67 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: user +rules: + - apiGroups: [""] + resources: [ + "bindings", "configmaps", "events", "pods", "replicationcontrollers", + "secrets", "services", "serviceaccounts", + "pods/attach", + "pods/binding", + "pods/exec", + "pods/log", + "pods/portforward", + "pods/proxy", + "pods/status", + "replicationcontrollers/scale", + "replicationcontrollers/status", + "services/proxy", + "services/status" + ] + verbs: ["*"] + nonResourceURLs: [] + + - apiGroups: [""] + resources: [ + "componentstatuses", "endpoints", "limitranges", "nodes", "nodes/proxy", "nodes/status", + "namespaces", "namespaces/status", "namespaces/finalize", + "persistentvolumeclaims", "persistentvolumeclaims/status", "persistentvolumes", "resourcequotas", + "resourcequotas/status" + ] + verbs: ["get", "list", "watch", "proxy", "redirect"] + nonResourceURLs: [] + + - apiGroups: ["apps", "batch", "autoscaling", "policy"] + resources: ["*"] + verbs: ["*"] + nonResourceURLs: [] + + - apiGroups: ["extensions"] + resources: [ + "daemonsets", "deployments", "horizontalpodautoscalers", "ingresses", + "jobs", "replicasets", "replicationcontrollers", + + "daemonsets/status", + "deployments/rollback", + "deployments/scale", + "deployments/status", + "horizontalpodautoscalers/status", + "ingresses/status", + "jobs/status", + "replicasets/scale", + "replicasets/status", + "replicationcontrollers/scale" + ] + verbs: ["*"] + nonResourceURLs: [] + + - apiGroups: ["extensions"] + resources: ["networkpolicies", "thirdpartyresources"] + verbs: ["get", "list", "watch", "proxy", "redirect"] + nonResourceURLs: [] + + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["*"] + verbs: ["get", "list", "watch", "proxy", "redirect"] + nonResourceURLs: [] diff --git a/data/data/modules/tectonic/resources/manifests/secrets/ca-cert.yaml b/data/data/modules/tectonic/resources/manifests/secrets/ca-cert.yaml new file mode 100644 index 00000000000..88f71093b85 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/secrets/ca-cert.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: tectonic-ca-cert-secret + namespace: tectonic-system +type: Opaque +data: + ca-cert: ${ingress_ca_cert} diff --git a/data/data/modules/tectonic/resources/manifests/secrets/ingress-tls.yaml b/data/data/modules/tectonic/resources/manifests/secrets/ingress-tls.yaml new file mode 100644 index 00000000000..7898800cc04 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/secrets/ingress-tls.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: tectonic-ingress-tls + namespace: openshift-ingress +type: Opaque +data: + tls.crt: ${ingress_tls_cert} + tls.key: ${ingress_tls_key} + bundle.crt: ${ingress_tls_bundle} \ No newline at end of file diff --git a/data/data/modules/tectonic/resources/manifests/secrets/pull.json b/data/data/modules/tectonic/resources/manifests/secrets/pull.json new file mode 100644 index 00000000000..0e8d1bacf74 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/secrets/pull.json @@ -0,0 +1,12 @@ +{ + "apiVersion": "v1", + "kind": "Secret", + "type": "kubernetes.io/dockerconfigjson", + "metadata": { + "namespace": "tectonic-system", + "name": "coreos-pull-secret" + }, + "data": { + ".dockerconfigjson": "${pull_secret}" + } +} diff --git a/data/data/modules/tectonic/resources/manifests/security/priviledged-scc-tectonic.yaml b/data/data/modules/tectonic/resources/manifests/security/priviledged-scc-tectonic.yaml new file mode 100644 index 00000000000..89975bfddc5 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/security/priviledged-scc-tectonic.yaml @@ -0,0 +1,31 @@ +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: "privileged-tectonic temporarily for running tectonic assets." + name: privileged-tectonic +allowHostDirVolumePlugin: true +allowHostIPC: true +allowHostNetwork: true +allowHostPID: true +allowHostPorts: true +allowPrivilegedContainer: true +allowedCapabilities: +- "*" +fsGroup: + type: RunAsAny +groups: +- system:serviceaccounts:tectonic-system +- system:serviceaccounts:openshift-ingress +readOnlyRootFilesystem: false +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: +- "*" +supplementalGroups: + type: RunAsAny +users: [] +volumes: +- "*" diff --git a/data/data/modules/tectonic/resources/manifests/updater/app-version-kind.yaml b/data/data/modules/tectonic/resources/manifests/updater/app-version-kind.yaml new file mode 100644 index 00000000000..ebcd04e9ebe --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/app-version-kind.yaml @@ -0,0 +1,10 @@ +apiVersion: "apiextensions.k8s.io/v1beta1" +kind: "CustomResourceDefinition" +metadata: + name: "appversions.tco.coreos.com" +spec: + group: "tco.coreos.com" + version: "v1" + names: + plural: "appversions" + kind: "AppVersion" diff --git a/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-kube-addon.yaml b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-kube-addon.yaml new file mode 100644 index 00000000000..ac36ad9c142 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-kube-addon.yaml @@ -0,0 +1,15 @@ +apiVersion: tco.coreos.com/v1 +kind: AppVersion +metadata: + name: kube-addon + namespace: tectonic-system + labels: + managed-by-channel-operator: "true" +spec: + desiredVersion: + paused: false +status: + currentVersion: + paused: false +upgradereq: 1 +upgradecomp: 0 diff --git a/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-kube-core.yaml b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-kube-core.yaml new file mode 100644 index 00000000000..0f6042ddb0a --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-kube-core.yaml @@ -0,0 +1,13 @@ +apiVersion: tco.coreos.com/v1 +kind: AppVersion +metadata: + name: kube-core + namespace: tectonic-system + labels: + managed-by-channel-operator: "true" +spec: + paused: false +status: + paused: false +upgradereq: 0 +upgradecomp: 0 diff --git a/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-alm.yaml b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-alm.yaml new file mode 100644 index 00000000000..1327041ebe9 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-alm.yaml @@ -0,0 +1,12 @@ +apiVersion: tco.coreos.com/v1 +kind: AppVersion +metadata: + name: tectonic-alm-operator + namespace: tectonic-system + labels: + managed-by-channel-operator: "true" + annotations: + tectonic-operators.coreos.com/upgrade-behaviour: "CreateOrUpgrade" +spec: + desiredVersion: ${tectonic_alm_operator_version} + paused: false diff --git a/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-cluster.yaml b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-cluster.yaml new file mode 100644 index 00000000000..82cefbd5b3a --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-cluster.yaml @@ -0,0 +1,13 @@ +apiVersion: tco.coreos.com/v1 +kind: AppVersion +metadata: + name: tectonic-cluster + namespace: tectonic-system + labels: + managed-by-channel-operator: "true" +spec: + desiredVersion: ${tectonic_version} + paused: false +status: + currentVersion: ${tectonic_version} + paused: false diff --git a/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-ingress.yaml b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-ingress.yaml new file mode 100644 index 00000000000..bb97f74a806 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-ingress.yaml @@ -0,0 +1,14 @@ +apiVersion: tco.coreos.com/v1 +kind: AppVersion +metadata: + name: tectonic-ingress + namespace: tectonic-system + labels: + managed-by-channel-operator: "true" +spec: + desiredVersion: + paused: false +status: + paused: false +upgradereq: 1 +upgradecomp: 0 diff --git a/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-utility.yaml b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-utility.yaml new file mode 100644 index 00000000000..47839385507 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/app_versions/app-version-tectonic-utility.yaml @@ -0,0 +1,14 @@ +apiVersion: tco.coreos.com/v1 +kind: AppVersion +metadata: + name: tectonic-utility + namespace: tectonic-system + labels: + managed-by-channel-operator: "true" +spec: + desiredVersion: + paused: false +status: + paused: false +upgradereq: 1 +upgradecomp: 0 diff --git a/data/data/modules/tectonic/resources/manifests/updater/migration-status-kind.yaml b/data/data/modules/tectonic/resources/manifests/updater/migration-status-kind.yaml new file mode 100644 index 00000000000..546baa04e91 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/migration-status-kind.yaml @@ -0,0 +1,10 @@ +apiVersion: "apiextensions.k8s.io/v1beta1" +kind: "CustomResourceDefinition" +metadata: + name: "migrationstatuses.kvo.coreos.com" +spec: + group: "kvo.coreos.com" + version: "v1" + names: + plural: "migrationstatuses" + kind: "MigrationStatus" diff --git a/data/data/modules/tectonic/resources/manifests/updater/operators/kube-addon-operator.yaml b/data/data/modules/tectonic/resources/manifests/updater/operators/kube-addon-operator.yaml new file mode 100644 index 00000000000..7ff12e0d257 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/operators/kube-addon-operator.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: kube-addon-operator + namespace: tectonic-system + labels: + k8s-app: kube-addon-operator + managed-by-channel-operator: "true" +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: kube-addon-operator + template: + metadata: + labels: + k8s-app: kube-addon-operator + tectonic-app-version-name: kube-addon + spec: + containers: + - name: kube-addon-operator + image: ${kube_addon_operator_image} + resources: + limits: + cpu: 20m + memory: 50Mi + requests: + cpu: 20m + memory: 50Mi + volumeMounts: + - name: cluster-config + mountPath: /etc/cluster-config + imagePullSecrets: + - name: coreos-pull-secret + nodeSelector: + node-role.kubernetes.io/master: "" + restartPolicy: Always + securityContext: + runAsNonRoot: true + runAsUser: 65534 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + volumes: + - name: cluster-config + configMap: + name: cluster-config-v1 + items: + - key: addon-config + path: addon-config diff --git a/data/data/modules/tectonic/resources/manifests/updater/operators/kube-core-operator.yaml b/data/data/modules/tectonic/resources/manifests/updater/operators/kube-core-operator.yaml new file mode 100644 index 00000000000..5edef2fe376 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/operators/kube-core-operator.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: kube-core-operator + namespace: kube-system + labels: + k8s-app: kube-core-operator + managed-by-channel-operator: "true" +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: kube-core-operator + template: + metadata: + labels: + k8s-app: kube-core-operator + tectonic-app-version-name: kube-core + spec: + containers: + - name: kube-core-operator + image: ${kube_core_operator_image} + imagePullPolicy: Always + args: + - --config=/etc/cluster-config/kco-config.yaml + resources: + limits: + cpu: 20m + memory: 50Mi + requests: + cpu: 20m + memory: 50Mi + volumeMounts: + - name: cluster-config + mountPath: /etc/cluster-config + imagePullSecrets: + - name: coreos-pull-secret + nodeSelector: + node-role.kubernetes.io/master: "" + restartPolicy: Always + securityContext: + runAsNonRoot: true + runAsUser: 65534 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + volumes: + - name: cluster-config + configMap: + name: cluster-config-v1 + items: + - key: kco-config + path: kco-config.yaml diff --git a/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-alm-operator.yaml b/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-alm-operator.yaml new file mode 100644 index 00000000000..df2cf2ca017 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-alm-operator.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: tectonic-alm-operator + namespace: tectonic-system + labels: + k8s-app: tectonic-alm-operator + managed-by-channel-operator: "true" + annotations: + tectonic-operators.coreos.com/upgrade-behaviour: 'CreateOrUpgrade' +spec: + replicas: 1 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: tectonic-alm-operator + template: + metadata: + labels: + k8s-app: tectonic-alm-operator + spec: + containers: + - name: tectonic-alm-operator + image: ${tectonic_alm_operator_image} + args: + - --manifest-dir=/manifests + - --operator-name=tectonic-alm-operator + - --appversion-name=tectonic-alm-operator + - --v=2 + imagePullSecrets: + - name: coreos-pull-secret + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + securityContext: + runAsNonRoot: true + runAsUser: 65534 diff --git a/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-channel-operator.yaml b/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-channel-operator.yaml new file mode 100644 index 00000000000..b031b30a4fd --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-channel-operator.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: tectonic-channel-operator + namespace: tectonic-system + labels: + k8s-app: tectonic-channel-operator + managed-by-channel-operator: "true" +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: tectonic-channel-operator + template: + metadata: + labels: + k8s-app: tectonic-channel-operator + tectonic-app-version-name: tectonic-cluster + spec: + containers: + - name: tectonic-channel-operator + image: ${tectonic_channel_operator_image} + env: + - name: CLUSTER_ID + valueFrom: + configMapKeyRef: + name: tectonic-config + key: clusterID + resources: + limits: + cpu: 20m + memory: 50Mi + requests: + cpu: 20m + memory: 50Mi + volumeMounts: + - name: certs + mountPath: /etc/ssl/certs + imagePullSecrets: + - name: coreos-pull-secret + nodeSelector: + node-role.kubernetes.io/master: "" + restartPolicy: Always + securityContext: + runAsNonRoot: true + runAsUser: 65534 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + volumes: + - name: certs + hostPath: + path: /etc/ssl/certs diff --git a/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-ingress-controller-operator.yaml b/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-ingress-controller-operator.yaml new file mode 100644 index 00000000000..4a7a4e59087 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-ingress-controller-operator.yaml @@ -0,0 +1,52 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: tectonic-ingress-controller-operator + namespace: openshift-ingress + labels: + k8s-app: tectonic-ingress-controller-operator + managed-by-channel-operator: "true" +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: tectonic-ingress-controller-operator + template: + metadata: + labels: + k8s-app: tectonic-ingress-controller-operator + tectonic-app-version-name: tectonic-ingress + spec: + containers: + - name: tectonic-ingress-controller-operator + image: ${tectonic_ingress_controller_operator_image} + resources: + limits: + cpu: 20m + memory: 50Mi + requests: + cpu: 20m + memory: 50Mi + volumeMounts: + - name: cluster-config + mountPath: /etc/cluster-config + imagePullSecrets: + - name: coreos-pull-secret + nodeSelector: + node-role.kubernetes.io/master: "" + restartPolicy: Always + securityContext: + runAsNonRoot: true + runAsUser: 65534 + serviceAccount: tectonic-ingress-controller-operator + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + volumes: + - name: cluster-config + configMap: + name: cluster-config-v1 + items: + - key: ingress-config + path: ingress-config diff --git a/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-utility-operator.yaml b/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-utility-operator.yaml new file mode 100644 index 00000000000..eb4fc403c7d --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/operators/tectonic-utility-operator.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: tectonic-utility-operator + namespace: tectonic-system + labels: + k8s-app: tectonic-utility-operator + managed-by-channel-operator: "true" +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: tectonic-utility-operator + template: + metadata: + labels: + k8s-app: tectonic-utility-operator + tectonic-app-version-name: tectonic-utility + spec: + containers: + - name: tectonic-utility-operator + image: ${tectonic_utility_operator_image} + resources: + limits: + cpu: 20m + memory: 50Mi + requests: + cpu: 20m + memory: 50Mi + volumeMounts: + - name: cluster-config + mountPath: /etc/cluster-config + imagePullSecrets: + - name: coreos-pull-secret + nodeSelector: + node-role.kubernetes.io/master: "" + restartPolicy: Always + securityContext: + runAsNonRoot: true + runAsUser: 65534 + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + volumes: + - name: cluster-config + configMap: + name: cluster-config-v1 + items: + - key: utility-config + path: utility-config diff --git a/data/data/modules/tectonic/resources/manifests/updater/tectonic-channel-operator-config.yaml b/data/data/modules/tectonic/resources/manifests/updater/tectonic-channel-operator-config.yaml new file mode 100644 index 00000000000..5fab4614c94 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/tectonic-channel-operator-config.yaml @@ -0,0 +1,12 @@ +apiVersion: tco.coreos.com/v1 +kind: ChannelOperatorConfig +metadata: + name: default + namespace: tectonic-system +server: ${update_server} +channel: ${update_channel} +appID: ${update_app_id} +automaticUpdate: false +triggerUpdate: false +triggerUpdateCheck: false +updateCheckInterval: 2700 diff --git a/data/data/modules/tectonic/resources/manifests/updater/tectonic-channel-operator-kind.yaml b/data/data/modules/tectonic/resources/manifests/updater/tectonic-channel-operator-kind.yaml new file mode 100644 index 00000000000..a92fa942810 --- /dev/null +++ b/data/data/modules/tectonic/resources/manifests/updater/tectonic-channel-operator-kind.yaml @@ -0,0 +1,10 @@ +apiVersion: "apiextensions.k8s.io/v1beta1" +kind: "CustomResourceDefinition" +metadata: + name: "channeloperatorconfigs.tco.coreos.com" +spec: + group: "tco.coreos.com" + version: "v1" + names: + plural: "channeloperatorconfigs" + kind: "ChannelOperatorConfig" diff --git a/data/data/modules/tectonic/resources/tectonic.service b/data/data/modules/tectonic/resources/tectonic.service new file mode 100644 index 00000000000..85fe18e3335 --- /dev/null +++ b/data/data/modules/tectonic/resources/tectonic.service @@ -0,0 +1,15 @@ +[Unit] +Description=Bootstrap a Tectonic cluster +Wants=bootkube.service +After=bootkube.service + +[Service] +WorkingDirectory=/opt/tectonic/tectonic + +ExecStart=/opt/tectonic/tectonic.sh /opt/tectonic/auth/kubeconfig + +Restart=on-failure +RestartSec=5s + +[Install] +WantedBy=multi-user.target diff --git a/data/data/modules/tectonic/resources/tectonic.sh b/data/data/modules/tectonic/resources/tectonic.sh new file mode 100755 index 00000000000..db6dc3ec7f1 --- /dev/null +++ b/data/data/modules/tectonic/resources/tectonic.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +set -e + +KUBECONFIG="$1" + +kubectl() { + echo "Executing kubectl $*" >&2 + while true + do + set +e + out=$(oc --config="$KUBECONFIG" "$@" 2>&1) + status=$? + set -e + + if grep --quiet "AlreadyExists" <<< "$out" + then + echo "$out, skipping" >&2 + return + fi + + echo "$out" + if [ "$status" -eq 0 ] + then + return + fi + + echo "kubectl $* failed. Retrying in 5 seconds..." >&2 + sleep 5 + done +} + +wait_for_pods() { + echo "Waiting for pods in namespace $1..." + while true + do + out=$(kubectl --namespace "$1" get pods --output custom-columns=STATUS:.status.phase,NAME:.metadata.name --no-headers=true) + echo "$out" + + # make sure kubectl returns at least one status + if [ "$(wc --lines <<< "$out")" -eq 0 ] + then + echo "No pods were found. Waiting for 5 seconds..." + sleep 5 + continue + fi + + if ! grep --invert-match '^Running' <<< "$out" + then + return + fi + + echo "Not all pods available yet. Waiting for 5 seconds..." + sleep 5 + done + set -e +} + +# Wait for Kubernetes pods +wait_for_pods kube-system + +echo "Creating initial roles..." +kubectl delete --filename rbac/role-admin.yaml + +kubectl create --filename ingress/svc-account.yaml +kubectl create --filename rbac/role-admin.yaml +kubectl create --filename rbac/role-user.yaml +kubectl create --filename rbac/binding-admin.yaml +kubectl create --filename rbac/binding-discovery.yaml + +echo "Creating cluster config for Tectonic..." +kubectl create --filename cluster-config.yaml +kubectl create --filename ingress/cluster-config.yaml + +echo "Creating Tectonic secrets..." +kubectl create --filename secrets/pull.json +kubectl create --filename secrets/ingress-tls.yaml +kubectl create --filename secrets/ca-cert.yaml +kubectl create --filename ingress/pull.json + +echo "Creating operators..." +kubectl create --filename security/priviledged-scc-tectonic.yaml +kubectl create --filename updater/tectonic-channel-operator-kind.yaml +kubectl create --filename updater/app-version-kind.yaml +kubectl create --filename updater/migration-status-kind.yaml + +kubectl --namespace=tectonic-system get customresourcedefinition channeloperatorconfigs.tco.coreos.com +kubectl create --filename updater/tectonic-channel-operator-config.yaml + +kubectl create --filename updater/operators/kube-core-operator.yaml +kubectl create --filename updater/operators/tectonic-channel-operator.yaml +kubectl create --filename updater/operators/kube-addon-operator.yaml +kubectl create --filename updater/operators/tectonic-alm-operator.yaml +kubectl create --filename updater/operators/tectonic-utility-operator.yaml +kubectl create --filename updater/operators/tectonic-ingress-controller-operator.yaml + +kubectl --namespace=tectonic-system get customresourcedefinition appversions.tco.coreos.com +kubectl create --filename updater/app_versions/app-version-tectonic-cluster.yaml +kubectl create --filename updater/app_versions/app-version-kube-core.yaml +kubectl create --filename updater/app_versions/app-version-kube-addon.yaml +kubectl create --filename updater/app_versions/app-version-tectonic-alm.yaml +kubectl create --filename updater/app_versions/app-version-tectonic-utility.yaml +kubectl create --filename updater/app_versions/app-version-tectonic-ingress.yaml + +# Wait for Tectonic pods +wait_for_pods tectonic-system + +echo "Tectonic installation is done" diff --git a/data/data/modules/tectonic/variables.tf b/data/data/modules/tectonic/variables.tf new file mode 100644 index 00000000000..afd4e76561e --- /dev/null +++ b/data/data/modules/tectonic/variables.tf @@ -0,0 +1,65 @@ +variable "container_images" { + description = "Container images to use. Leave blank for defaults." + type = "map" +} + +variable "container_base_images" { + description = "Container base images to use. Leave blank for defaults." + type = "map" +} + +variable "versions" { + description = "Versions of the components to use. Leave blank for defaults." + type = "map" +} + +variable "platform" { + description = "Platform on which Tectonic is being installed. Example: aws or libvirt." + type = "string" +} + +variable "ingress_kind" { + description = "Type of Ingress mapping to use. Example: HostPort or NodePort." + type = "string" +} + +variable "pull_secret" { + type = "string" + description = "Your pull secret. Obtain this from your Tectonic Account: https://account.coreos.com." +} + +variable "base_address" { + description = "Base address used to access the Tectonic Console, without protocol nor trailing forward slash (may contain a port). Example: console.example.com:30000." + type = "string" +} + +variable "update_server" { + description = "Server contacted to request Tectonic software updates. Leave blank for defaults." + type = "string" +} + +variable "update_channel" { + description = "Release channel used to request Tectonic software updates. Leave blank for defaults. Example: Tectonic-1.5" + type = "string" +} + +variable "update_app_id" { + description = "Application identifier used to request Tectonic software updates. Leave blank for defaults." + type = "string" +} + +variable "ingress_ca_cert_pem" { + type = "string" +} + +variable "ingress_cert_pem" { + type = "string" +} + +variable "ingress_key_pem" { + type = "string" +} + +variable "ingress_bundle_pem" { + type = "string" +} diff --git a/data/data/steps/infra/aws/inputs.tf b/data/data/steps/infra/aws/inputs.tf new file mode 100644 index 00000000000..408a8c9a65d --- /dev/null +++ b/data/data/steps/infra/aws/inputs.tf @@ -0,0 +1,15 @@ +data "terraform_remote_state" "assets" { + backend = "local" + + config { + path = "${path.cwd}/assets.tfstate" + } + + defaults { + ignition_bootstrap = "" + } +} + +locals { + ignition_bootstrap = "${var.ignition_bootstrap != "" ? var.ignition_bootstrap : data.terraform_remote_state.assets.ignition_bootstrap}" +} diff --git a/data/data/steps/infra/aws/main.tf b/data/data/steps/infra/aws/main.tf new file mode 100644 index 00000000000..fc5b8dce50e --- /dev/null +++ b/data/data/steps/infra/aws/main.tf @@ -0,0 +1,149 @@ +locals { + private_endpoints = "${var.tectonic_aws_endpoints == "public" ? false : true}" + public_endpoints = "${var.tectonic_aws_endpoints == "private" ? false : true}" + private_zone_id = "${var.tectonic_aws_external_private_zone != "" ? var.tectonic_aws_external_private_zone : join("", aws_route53_zone.tectonic_int.*.zone_id)}" +} + +provider "aws" { + region = "${var.tectonic_aws_region}" + profile = "${var.tectonic_aws_profile}" + version = "1.8.0" + + assume_role { + role_arn = "${var.tectonic_aws_installer_role == "" ? "" : "${var.tectonic_aws_installer_role}"}" + session_name = "TECTONIC_INSTALLER_${var.tectonic_cluster_name}" + } +} + +module "bootstrap" { + source = "../../../modules/aws/bootstrap" + + ami = "${var.tectonic_aws_ec2_ami_override}" + associate_public_ip_address = "${var.tectonic_aws_endpoints != "private"}" + bucket = "${aws_s3_bucket.tectonic.bucket}" + cluster_name = "${var.tectonic_cluster_name}" + elbs = "${module.vpc.aws_lbs}" + elbs_length = "${module.vpc.aws_lbs_length}" + iam_role = "${var.tectonic_aws_master_iam_role_name}" + ignition = "${local.ignition_bootstrap}" + subnet_id = "${module.vpc.master_subnet_ids[0]}" + vpc_security_group_ids = ["${concat(var.tectonic_aws_master_extra_sg_ids, list(module.vpc.master_sg_id))}"] + + tags = "${merge(map( + "Name", "${var.tectonic_cluster_name}-bootstrap", + "tectonicClusterID", "${var.tectonic_cluster_id}" + ), var.tectonic_aws_extra_tags)}" +} + +module "masters" { + source = "../../../modules/aws/master" + + elb_api_internal_id = "${module.vpc.aws_elb_api_internal_id}" + elb_api_external_id = "${module.vpc.aws_elb_api_external_id}" + elb_console_id = "${module.vpc.aws_elb_console_id}" + base_domain = "${var.tectonic_base_domain}" + cluster_id = "${var.tectonic_cluster_id}" + cluster_name = "${var.tectonic_cluster_name}" + container_images = "${var.tectonic_container_images}" + ec2_type = "${var.tectonic_aws_master_ec2_type}" + extra_tags = "${var.tectonic_aws_extra_tags}" + instance_count = "${var.tectonic_master_count}" + master_iam_role = "${var.tectonic_aws_master_iam_role_name}" + master_sg_ids = "${concat(var.tectonic_aws_master_extra_sg_ids, list(module.vpc.master_sg_id))}" + private_endpoints = "${local.private_endpoints}" + public_endpoints = "${local.public_endpoints}" + root_volume_iops = "${var.tectonic_aws_master_root_volume_iops}" + root_volume_size = "${var.tectonic_aws_master_root_volume_size}" + root_volume_type = "${var.tectonic_aws_master_root_volume_type}" + subnet_ids = "${module.vpc.master_subnet_ids}" + ec2_ami = "${var.tectonic_aws_ec2_ami_override}" + user_data_igns = ["${var.ignition_masters}"] +} + +module "iam" { + source = "../../../modules/aws/iam" + + cluster_name = "${var.tectonic_cluster_name}" + worker_iam_role = "${var.tectonic_aws_worker_iam_role_name}" +} + +module "dns" { + source = "../../../modules/dns/route53" + + api_external_elb_dns_name = "${module.vpc.aws_elb_api_external_dns_name}" + api_external_elb_zone_id = "${module.vpc.aws_elb_api_external_zone_id}" + api_internal_elb_dns_name = "${module.vpc.aws_elb_api_internal_dns_name}" + api_internal_elb_zone_id = "${module.vpc.aws_elb_api_internal_zone_id}" + api_ip_addresses = "${module.vpc.aws_lbs}" + base_domain = "${var.tectonic_base_domain}" + cluster_id = "${var.tectonic_cluster_id}" + cluster_name = "${var.tectonic_cluster_name}" + console_elb_dns_name = "${module.vpc.aws_console_dns_name}" + console_elb_zone_id = "${module.vpc.aws_elb_console_zone_id}" + elb_alias_enabled = true + master_count = "${var.tectonic_master_count}" + private_zone_id = "${local.private_zone_id}" + external_vpc_id = "${module.vpc.vpc_id}" + extra_tags = "${var.tectonic_aws_extra_tags}" + private_endpoints = "${local.private_endpoints}" + public_endpoints = "${local.public_endpoints}" +} + +module "vpc" { + source = "../../../modules/aws/vpc" + + base_domain = "${var.tectonic_base_domain}" + cidr_block = "${var.tectonic_aws_vpc_cidr_block}" + cluster_id = "${var.tectonic_cluster_id}" + cluster_name = "${var.tectonic_cluster_name}" + external_vpc_id = "${var.tectonic_aws_external_vpc_id}" + + external_master_subnet_ids = "${compact(var.tectonic_aws_external_master_subnet_ids)}" + external_worker_subnet_ids = "${compact(var.tectonic_aws_external_worker_subnet_ids)}" + extra_tags = "${var.tectonic_aws_extra_tags}" + + // empty map subnet_configs will have the vpc module creating subnets in all availabile AZs + new_master_subnet_configs = "${var.tectonic_aws_master_custom_subnets}" + new_worker_subnet_configs = "${var.tectonic_aws_worker_custom_subnets}" + + private_master_endpoints = "${local.private_endpoints}" + public_master_endpoints = "${local.public_endpoints}" +} + +resource "aws_route53_record" "etcd_a_nodes" { + count = "${var.tectonic_master_count}" + type = "A" + ttl = "60" + zone_id = "${local.private_zone_id}" + name = "${var.tectonic_cluster_name}-etcd-${count.index}" + records = ["${module.masters.ip_addresses[count.index]}"] +} + +resource "aws_route53_zone" "tectonic_int" { + count = "${local.private_endpoints ? "${var.tectonic_aws_external_private_zone == "" ? 1 : 0 }" : 0}" + vpc_id = "${module.vpc.vpc_id}" + name = "${var.tectonic_base_domain}" + force_destroy = true + + tags = "${merge(map( + "Name", "${var.tectonic_cluster_name}_tectonic_int", + "KubernetesCluster", "${var.tectonic_cluster_name}", + "tectonicClusterID", "${var.tectonic_cluster_id}" + ), var.tectonic_aws_extra_tags)}" +} + +resource "aws_s3_bucket" "tectonic" { + bucket = "${lower(var.tectonic_cluster_name)}.${var.tectonic_base_domain}" + + acl = "private" + + tags = "${merge(map( + "Name", "${var.tectonic_cluster_name}-tectonic", + "KubernetesCluster", "${var.tectonic_cluster_name}", + "tectonicClusterID", "${var.tectonic_cluster_id}" + ), var.tectonic_aws_extra_tags)}" + + lifecycle { + ignore_changes = ["*"] + } +} diff --git a/data/data/steps/infra/aws/variables-aws.tf b/data/data/steps/infra/aws/variables-aws.tf new file mode 100644 index 00000000000..59d396e9403 --- /dev/null +++ b/data/data/steps/infra/aws/variables-aws.tf @@ -0,0 +1,262 @@ +variable "tectonic_aws_config_version" { + description = <&2 + exit 1 +esac + +CGO_ENABLED=0 go build -tags "${TAGS}" -o ./bin/openshift-install ./cmd/openshift-install diff --git a/pkg/asset/cluster/BUILD.bazel b/pkg/asset/cluster/BUILD.bazel index 8fa808439e1..cf4b674de42 100644 --- a/pkg/asset/cluster/BUILD.bazel +++ b/pkg/asset/cluster/BUILD.bazel @@ -11,6 +11,7 @@ go_library( importpath = "github.com/openshift/installer/pkg/asset/cluster", visibility = ["//visibility:public"], deps = [ + "//data:go_default_library", "//pkg/asset:go_default_library", "//pkg/asset/ignition:go_default_library", "//pkg/asset/installconfig:go_default_library", diff --git a/pkg/asset/cluster/cluster.go b/pkg/asset/cluster/cluster.go index 5764374a0e9..f489c452cdf 100644 --- a/pkg/asset/cluster/cluster.go +++ b/pkg/asset/cluster/cluster.go @@ -2,6 +2,7 @@ package cluster import ( "encoding/json" + "errors" "fmt" "io/ioutil" "os" @@ -9,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" + "github.com/openshift/installer/data" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/terraform" "github.com/openshift/installer/pkg/types/config" @@ -42,11 +44,6 @@ func (c *Cluster) Dependencies() []asset.Asset { // Generate launches the cluster and generates the terraform state file on disk. func (c *Cluster) Generate(parents map[asset.Asset]*asset.State) (*asset.State, error) { - dir, err := terraform.BaseLocation() - if err != nil { - return nil, fmt.Errorf("error finding baselocation for terraform: %v", err) - } - state, ok := parents[c.tfvars] if !ok { return nil, fmt.Errorf("failed to get terraform.tfvar state in the parent asset states") @@ -68,11 +65,24 @@ func (c *Cluster) Generate(parents map[asset.Asset]*asset.State) (*asset.State, return nil, fmt.Errorf("failed to unmarshal terraform tfvars file: %v", err) } - templateDir, err := terraform.FindStepTemplates(dir, terraform.InfraStep, tfvars.Platform) + if err := data.Unpack(tmpDir); err != nil { + return nil, err + } + + templateDir, err := terraform.FindStepTemplates(tmpDir, terraform.InfraStep, tfvars.Platform) if err != nil { + if os.IsNotExist(err) { + return nil, errors.New("infra step not found; set OPENSHIFT_INSTALL_DATA to point to the data directory") + } return nil, fmt.Errorf("error finding terraform templates: %v", err) } + // take advantage of the new installer only having one step. + err = os.Rename(filepath.Join(tmpDir, "config.tf"), filepath.Join(templateDir, "config.tf")) + if err != nil { + return nil, err + } + // This runs the terraform in a temp directory, the tfstate file will be returned // to the asset store to persist it on the disk. if err := terraform.Init(tmpDir, templateDir); err != nil { diff --git a/pkg/destroy/BUILD.bazel b/pkg/destroy/BUILD.bazel index 0a1eb5b3a84..88ead3d58b7 100644 --- a/pkg/destroy/BUILD.bazel +++ b/pkg/destroy/BUILD.bazel @@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["destroyer.go"], + srcs = [ + "destroyer.go", + "doc.go", + ], importpath = "github.com/openshift/installer/pkg/destroy", visibility = ["//visibility:public"], deps = [ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 13eb1060a51..4ae7a5c4d7d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.15.41" +const SDKVersion = "1.15.42" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 495f5321afd..5d4ffc3d48c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -27990,7 +27990,8 @@ type CreateFlowLogsInput struct { // can also specify a subfolder in the bucket. To specify a subfolder in the // bucket, use the following ARN format: bucket_ARN/subfolder_name/. For example, // to specify a subfolder named my-logs in a bucket named my-bucket, use the - // following ARN: arn:aws:s3:::my-bucket/my-logs/. + // following ARN: arn:aws:s3:::my-bucket/my-logs/. You cannot use AWSLogs as + // a subfolder name. This is a reserved term. LogDestination *string `type:"string"` // Specifies the type of destination to which the flow log data is to be published. @@ -28271,7 +28272,9 @@ func (s *CreateFpgaImageOutput) SetFpgaImageId(v string) *CreateFpgaImageOutput type CreateImageInput struct { _ struct{} `type:"structure"` - // Information about one or more block device mappings. + // Information about one or more block device mappings. This parameter cannot + // be used to modify the encryption status of existing volumes or snapshots. + // To create an AMI with encrypted snapshots, use the CopyImage action. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` // A description for the new image. @@ -39952,6 +39955,14 @@ type DescribeRouteTablesInput struct { // * vpc-id - The ID of the VPC for the route table. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 5 and 100. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` + // One or more route table IDs. // // Default: Describes all your route tables. @@ -39980,15 +39991,32 @@ func (s *DescribeRouteTablesInput) SetFilters(v []*Filter) *DescribeRouteTablesI return s } +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeRouteTablesInput) SetMaxResults(v int64) *DescribeRouteTablesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeRouteTablesInput) SetNextToken(v string) *DescribeRouteTablesInput { + s.NextToken = &v + return s +} + // SetRouteTableIds sets the RouteTableIds field's value. func (s *DescribeRouteTablesInput) SetRouteTableIds(v []*string) *DescribeRouteTablesInput { s.RouteTableIds = v return s } +// Contains the output of DescribeRouteTables. type DescribeRouteTablesOutput struct { _ struct{} `type:"structure"` + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + // Information about one or more route tables. RouteTables []*RouteTable `locationName:"routeTableSet" locationNameList:"item" type:"list"` } @@ -40003,6 +40031,12 @@ func (s DescribeRouteTablesOutput) GoString() string { return s.String() } +// SetNextToken sets the NextToken field's value. +func (s *DescribeRouteTablesOutput) SetNextToken(v string) *DescribeRouteTablesOutput { + s.NextToken = &v + return s +} + // SetRouteTables sets the RouteTables field's value. func (s *DescribeRouteTablesOutput) SetRouteTables(v []*RouteTable) *DescribeRouteTablesOutput { s.RouteTables = v @@ -45018,9 +45052,14 @@ type EbsBlockDevice struct { DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` // Indicates whether the EBS volume is encrypted. Encrypted volumes can only - // be attached to instances that support Amazon EBS encryption. If you are creating - // a volume from a snapshot, you can't specify an encryption value. This is - // because only blank volumes can be encrypted on creation. + // be attached to instances that support Amazon EBS encryption. + // + // If you are creating a volume from a snapshot, you cannot specify an encryption + // value. This is because only blank volumes can be encrypted on creation. If + // you are creating a snapshot from an existing EBS volume, you cannot specify + // an encryption value that differs from that of the EBS volume. We recommend + // that you omit the encryption value from the block device mappings when creating + // an image from an instance. Encrypted *bool `locationName:"encrypted" type:"boolean"` // The number of I/O operations per second (IOPS) that the volume supports. diff --git a/vendor/github.com/shurcooL/vfsgen/BUILD.bazel b/vendor/github.com/shurcooL/vfsgen/BUILD.bazel new file mode 100644 index 00000000000..d606414d749 --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "commentwriter.go", + "doc.go", + "generator.go", + "options.go", + "stringwriter.go", + ], + importmap = "installer/vendor/github.com/shurcooL/vfsgen", + importpath = "github.com/shurcooL/vfsgen", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/shurcooL/httpfs/vfsutil:go_default_library"], +) diff --git a/vendor/github.com/shurcooL/vfsgen/commentwriter.go b/vendor/github.com/shurcooL/vfsgen/commentwriter.go new file mode 100644 index 00000000000..b6847f52b0a --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/commentwriter.go @@ -0,0 +1,45 @@ +package vfsgen + +import "io" + +// commentWriter writes a Go comment to the underlying io.Writer, +// using line comment form (//). +type commentWriter struct { + W io.Writer + wroteSlashes bool // Wrote "//" at the beginning of the current line. +} + +func (c *commentWriter) Write(p []byte) (int, error) { + var n int + for i, b := range p { + if !c.wroteSlashes { + s := "//" + if b != '\n' { + s = "// " + } + if _, err := io.WriteString(c.W, s); err != nil { + return n, err + } + c.wroteSlashes = true + } + n0, err := c.W.Write(p[i : i+1]) + n += n0 + if err != nil { + return n, err + } + if b == '\n' { + c.wroteSlashes = false + } + } + return len(p), nil +} + +func (c *commentWriter) Close() error { + if !c.wroteSlashes { + if _, err := io.WriteString(c.W, "//"); err != nil { + return err + } + c.wroteSlashes = true + } + return nil +} diff --git a/vendor/github.com/shurcooL/vfsgen/doc.go b/vendor/github.com/shurcooL/vfsgen/doc.go new file mode 100644 index 00000000000..46f28504e31 --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/doc.go @@ -0,0 +1,15 @@ +/* +Package vfsgen takes an http.FileSystem (likely at `go generate` time) and +generates Go code that statically implements the provided http.FileSystem. + +Features: + +- Efficient generated code without unneccessary overhead. + +- Uses gzip compression internally (selectively, only for files that compress well). + +- Enables direct access to internal gzip compressed bytes via an optional interface. + +- Outputs `gofmt`ed Go code. +*/ +package vfsgen diff --git a/vendor/github.com/shurcooL/vfsgen/generator.go b/vendor/github.com/shurcooL/vfsgen/generator.go new file mode 100644 index 00000000000..5782693ebbd --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/generator.go @@ -0,0 +1,485 @@ +package vfsgen + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + pathpkg "path" + "sort" + "strconv" + "text/template" + "time" + + "github.com/shurcooL/httpfs/vfsutil" +) + +// Generate Go code that statically implements input filesystem, +// write the output to a file specified in opt. +func Generate(input http.FileSystem, opt Options) error { + opt.fillMissing() + + // Use an in-memory buffer to generate the entire output. + buf := new(bytes.Buffer) + + err := t.ExecuteTemplate(buf, "Header", opt) + if err != nil { + return err + } + + var toc toc + err = findAndWriteFiles(buf, input, &toc) + if err != nil { + return err + } + + err = t.ExecuteTemplate(buf, "DirEntries", toc.dirs) + if err != nil { + return err + } + + err = t.ExecuteTemplate(buf, "Trailer", toc) + if err != nil { + return err + } + + // Write output file (all at once). + fmt.Println("writing", opt.Filename) + err = ioutil.WriteFile(opt.Filename, buf.Bytes(), 0644) + return err +} + +type toc struct { + dirs []*dirInfo + + HasCompressedFile bool // There's at least one compressedFile. + HasFile bool // There's at least one uncompressed file. +} + +// fileInfo is a definition of a file. +type fileInfo struct { + Path string + Name string + ModTime time.Time + UncompressedSize int64 +} + +// dirInfo is a definition of a directory. +type dirInfo struct { + Path string + Name string + ModTime time.Time + Entries []string +} + +// findAndWriteFiles recursively finds all the file paths in the given directory tree. +// They are added to the given map as keys. Values will be safe function names +// for each file, which will be used when generating the output code. +func findAndWriteFiles(buf *bytes.Buffer, fs http.FileSystem, toc *toc) error { + walkFn := func(path string, fi os.FileInfo, r io.ReadSeeker, err error) error { + if err != nil { + // Consider all errors reading the input filesystem as fatal. + return err + } + + switch fi.IsDir() { + case false: + file := &fileInfo{ + Path: path, + Name: pathpkg.Base(path), + ModTime: fi.ModTime().UTC(), + UncompressedSize: fi.Size(), + } + + marker := buf.Len() + + // Write CompressedFileInfo. + err = writeCompressedFileInfo(buf, file, r) + switch err { + default: + return err + case nil: + toc.HasCompressedFile = true + // If compressed file is not smaller than original, revert and write original file. + case errCompressedNotSmaller: + _, err = r.Seek(0, io.SeekStart) + if err != nil { + return err + } + + buf.Truncate(marker) + + // Write FileInfo. + err = writeFileInfo(buf, file, r) + if err != nil { + return err + } + toc.HasFile = true + } + case true: + entries, err := readDirPaths(fs, path) + if err != nil { + return err + } + + dir := &dirInfo{ + Path: path, + Name: pathpkg.Base(path), + ModTime: fi.ModTime().UTC(), + Entries: entries, + } + + toc.dirs = append(toc.dirs, dir) + + // Write DirInfo. + err = t.ExecuteTemplate(buf, "DirInfo", dir) + if err != nil { + return err + } + } + + return nil + } + + err := vfsutil.WalkFiles(fs, "/", walkFn) + return err +} + +// readDirPaths reads the directory named by dirname and returns +// a sorted list of directory paths. +func readDirPaths(fs http.FileSystem, dirname string) ([]string, error) { + fis, err := vfsutil.ReadDir(fs, dirname) + if err != nil { + return nil, err + } + paths := make([]string, len(fis)) + for i := range fis { + paths[i] = pathpkg.Join(dirname, fis[i].Name()) + } + sort.Strings(paths) + return paths, nil +} + +// writeCompressedFileInfo writes CompressedFileInfo. +// It returns errCompressedNotSmaller if compressed file is not smaller than original. +func writeCompressedFileInfo(w io.Writer, file *fileInfo, r io.Reader) error { + err := t.ExecuteTemplate(w, "CompressedFileInfo-Before", file) + if err != nil { + return err + } + sw := &stringWriter{Writer: w} + gw := gzip.NewWriter(sw) + _, err = io.Copy(gw, r) + if err != nil { + return err + } + err = gw.Close() + if err != nil { + return err + } + if sw.N >= file.UncompressedSize { + return errCompressedNotSmaller + } + err = t.ExecuteTemplate(w, "CompressedFileInfo-After", file) + return err +} + +var errCompressedNotSmaller = errors.New("compressed file is not smaller than original") + +// Write FileInfo. +func writeFileInfo(w io.Writer, file *fileInfo, r io.Reader) error { + err := t.ExecuteTemplate(w, "FileInfo-Before", file) + if err != nil { + return err + } + sw := &stringWriter{Writer: w} + _, err = io.Copy(sw, r) + if err != nil { + return err + } + err = t.ExecuteTemplate(w, "FileInfo-After", file) + return err +} + +var t = template.Must(template.New("").Funcs(template.FuncMap{ + "quote": strconv.Quote, + "comment": func(s string) (string, error) { + var buf bytes.Buffer + cw := &commentWriter{W: &buf} + _, err := io.WriteString(cw, s) + if err != nil { + return "", err + } + err = cw.Close() + return buf.String(), err + }, +}).Parse(`{{define "Header"}}// Code generated by vfsgen; DO NOT EDIT. + +{{with .BuildTags}}// +build {{.}} + +{{end}}package {{.PackageName}} + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + pathpkg "path" + "time" +) + +{{comment .VariableComment}} +var {{.VariableName}} = func() http.FileSystem { + fs := vfsgen۰FS{ +{{end}} + + + +{{define "CompressedFileInfo-Before"}} {{quote .Path}}: &vfsgen۰CompressedFileInfo{ + name: {{quote .Name}}, + modTime: {{template "Time" .ModTime}}, + uncompressedSize: {{.UncompressedSize}}, +{{/* This blank line separating compressedContent is neccessary to prevent potential gofmt issues. See issue #19. */}} + compressedContent: []byte("{{end}}{{define "CompressedFileInfo-After"}}"), + }, +{{end}} + + + +{{define "FileInfo-Before"}} {{quote .Path}}: &vfsgen۰FileInfo{ + name: {{quote .Name}}, + modTime: {{template "Time" .ModTime}}, + content: []byte("{{end}}{{define "FileInfo-After"}}"), + }, +{{end}} + + + +{{define "DirInfo"}} {{quote .Path}}: &vfsgen۰DirInfo{ + name: {{quote .Name}}, + modTime: {{template "Time" .ModTime}}, + }, +{{end}} + + + +{{define "DirEntries"}} } +{{range .}}{{if .Entries}} fs[{{quote .Path}}].(*vfsgen۰DirInfo).entries = []os.FileInfo{{"{"}}{{range .Entries}} + fs[{{quote .}}].(os.FileInfo),{{end}} + } +{{end}}{{end}} + return fs +}() +{{end}} + + + +{{define "Trailer"}} +type vfsgen۰FS map[string]interface{} + +func (fs vfsgen۰FS) Open(path string) (http.File, error) { + path = pathpkg.Clean("/" + path) + f, ok := fs[path] + if !ok { + return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist} + } + + switch f := f.(type) {{"{"}}{{if .HasCompressedFile}} + case *vfsgen۰CompressedFileInfo: + gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent)) + if err != nil { + // This should never happen because we generate the gzip bytes such that they are always valid. + panic("unexpected error reading own gzip compressed bytes: " + err.Error()) + } + return &vfsgen۰CompressedFile{ + vfsgen۰CompressedFileInfo: f, + gr: gr, + }, nil{{end}}{{if .HasFile}} + case *vfsgen۰FileInfo: + return &vfsgen۰File{ + vfsgen۰FileInfo: f, + Reader: bytes.NewReader(f.content), + }, nil{{end}} + case *vfsgen۰DirInfo: + return &vfsgen۰Dir{ + vfsgen۰DirInfo: f, + }, nil + default: + // This should never happen because we generate only the above types. + panic(fmt.Sprintf("unexpected type %T", f)) + } +} +{{if .HasCompressedFile}} +// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file. +type vfsgen۰CompressedFileInfo struct { + name string + modTime time.Time + compressedContent []byte + uncompressedSize int64 +} + +func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot Readdir from file %s", f.name) +} +func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil } + +func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte { + return f.compressedContent +} + +func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name } +func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize } +func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 } +func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime } +func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false } +func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil } + +// vfsgen۰CompressedFile is an opened compressedFile instance. +type vfsgen۰CompressedFile struct { + *vfsgen۰CompressedFileInfo + gr *gzip.Reader + grPos int64 // Actual gr uncompressed position. + seekPos int64 // Seek uncompressed position. +} + +func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) { + if f.grPos > f.seekPos { + // Rewind to beginning. + err = f.gr.Reset(bytes.NewReader(f.compressedContent)) + if err != nil { + return 0, err + } + f.grPos = 0 + } + if f.grPos < f.seekPos { + // Fast-forward. + _, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos) + if err != nil { + return 0, err + } + f.grPos = f.seekPos + } + n, err = f.gr.Read(p) + f.grPos += int64(n) + f.seekPos = f.grPos + return n, err +} +func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + f.seekPos = 0 + offset + case io.SeekCurrent: + f.seekPos += offset + case io.SeekEnd: + f.seekPos = f.uncompressedSize + offset + default: + panic(fmt.Errorf("invalid whence value: %v", whence)) + } + return f.seekPos, nil +} +func (f *vfsgen۰CompressedFile) Close() error { + return f.gr.Close() +} +{{else}} +// We already imported "compress/gzip" and "io/ioutil", but ended up not using them. Avoid unused import error. +var _ = gzip.Reader{} +var _ = ioutil.Discard +{{end}}{{if .HasFile}} +// vfsgen۰FileInfo is a static definition of an uncompressed file (because it's not worth gzip compressing). +type vfsgen۰FileInfo struct { + name string + modTime time.Time + content []byte +} + +func (f *vfsgen۰FileInfo) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot Readdir from file %s", f.name) +} +func (f *vfsgen۰FileInfo) Stat() (os.FileInfo, error) { return f, nil } + +func (f *vfsgen۰FileInfo) NotWorthGzipCompressing() {} + +func (f *vfsgen۰FileInfo) Name() string { return f.name } +func (f *vfsgen۰FileInfo) Size() int64 { return int64(len(f.content)) } +func (f *vfsgen۰FileInfo) Mode() os.FileMode { return 0444 } +func (f *vfsgen۰FileInfo) ModTime() time.Time { return f.modTime } +func (f *vfsgen۰FileInfo) IsDir() bool { return false } +func (f *vfsgen۰FileInfo) Sys() interface{} { return nil } + +// vfsgen۰File is an opened file instance. +type vfsgen۰File struct { + *vfsgen۰FileInfo + *bytes.Reader +} + +func (f *vfsgen۰File) Close() error { + return nil +} +{{else if not .HasCompressedFile}} +// We already imported "bytes", but ended up not using it. Avoid unused import error. +var _ = bytes.Reader{} +{{end}} +// vfsgen۰DirInfo is a static definition of a directory. +type vfsgen۰DirInfo struct { + name string + modTime time.Time + entries []os.FileInfo +} + +func (d *vfsgen۰DirInfo) Read([]byte) (int, error) { + return 0, fmt.Errorf("cannot Read from directory %s", d.name) +} +func (d *vfsgen۰DirInfo) Close() error { return nil } +func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil } + +func (d *vfsgen۰DirInfo) Name() string { return d.name } +func (d *vfsgen۰DirInfo) Size() int64 { return 0 } +func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir } +func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime } +func (d *vfsgen۰DirInfo) IsDir() bool { return true } +func (d *vfsgen۰DirInfo) Sys() interface{} { return nil } + +// vfsgen۰Dir is an opened dir instance. +type vfsgen۰Dir struct { + *vfsgen۰DirInfo + pos int // Position within entries for Seek and Readdir. +} + +func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) { + if offset == 0 && whence == io.SeekStart { + d.pos = 0 + return 0, nil + } + return 0, fmt.Errorf("unsupported Seek in directory %s", d.name) +} + +func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) { + if d.pos >= len(d.entries) && count > 0 { + return nil, io.EOF + } + if count <= 0 || count > len(d.entries)-d.pos { + count = len(d.entries) - d.pos + } + e := d.entries[d.pos : d.pos+count] + d.pos += count + return e, nil +} +{{end}} + + + +{{define "Time"}} +{{- if .IsZero -}} + time.Time{} +{{- else -}} + time.Date({{.Year}}, {{printf "%d" .Month}}, {{.Day}}, {{.Hour}}, {{.Minute}}, {{.Second}}, {{.Nanosecond}}, time.UTC) +{{- end -}} +{{end}} +`)) diff --git a/vendor/github.com/shurcooL/vfsgen/options.go b/vendor/github.com/shurcooL/vfsgen/options.go new file mode 100644 index 00000000000..d10d348e709 --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/options.go @@ -0,0 +1,45 @@ +package vfsgen + +import ( + "fmt" + "strings" +) + +// Options for vfsgen code generation. +type Options struct { + // Filename of the generated Go code output (including extension). + // If left empty, it defaults to "{{toLower .VariableName}}_vfsdata.go". + Filename string + + // PackageName is the name of the package in the generated code. + // If left empty, it defaults to "main". + PackageName string + + // BuildTags are the optional build tags in the generated code. + // The build tags syntax is specified by the go tool. + BuildTags string + + // VariableName is the name of the http.FileSystem variable in the generated code. + // If left empty, it defaults to "assets". + VariableName string + + // VariableComment is the comment of the http.FileSystem variable in the generated code. + // If left empty, it defaults to "{{.VariableName}} statically implements the virtual filesystem provided to vfsgen.". + VariableComment string +} + +// fillMissing sets default values for mandatory options that are left empty. +func (opt *Options) fillMissing() { + if opt.PackageName == "" { + opt.PackageName = "main" + } + if opt.VariableName == "" { + opt.VariableName = "assets" + } + if opt.Filename == "" { + opt.Filename = fmt.Sprintf("%s_vfsdata.go", strings.ToLower(opt.VariableName)) + } + if opt.VariableComment == "" { + opt.VariableComment = fmt.Sprintf("%s statically implements the virtual filesystem provided to vfsgen.", opt.VariableName) + } +} diff --git a/vendor/github.com/shurcooL/vfsgen/stringwriter.go b/vendor/github.com/shurcooL/vfsgen/stringwriter.go new file mode 100644 index 00000000000..a781efdc6dd --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/stringwriter.go @@ -0,0 +1,27 @@ +package vfsgen + +import ( + "io" +) + +// stringWriter writes given bytes to underlying io.Writer as a Go interpreted string literal value, +// not including double quotes. It tracks the total number of bytes written. +type stringWriter struct { + io.Writer + N int64 // Total bytes written. +} + +func (sw *stringWriter) Write(p []byte) (n int, err error) { + const hex = "0123456789abcdef" + buf := []byte{'\\', 'x', 0, 0} + for _, b := range p { + buf[2], buf[3] = hex[b/16], hex[b%16] + _, err = sw.Writer.Write(buf) + if err != nil { + return n, err + } + n++ + sw.N++ + } + return n, nil +}