From 48df5cfaeb455382f62d29ea470ae9980258a33d Mon Sep 17 00:00:00 2001 From: Cole Mickens Date: Mon, 23 May 2016 17:49:18 -0700 Subject: [PATCH] min-turnup: azure --- min-turnup/Dockerfile | 53 ++++ min-turnup/Makefile | 11 +- min-turnup/README-azure | 12 + min-turnup/default.config | 31 -- min-turnup/phase1/Kconfig | 10 +- min-turnup/phase1/azure/.gitignore | 3 + min-turnup/phase1/azure/Kconfig | 76 +++++ min-turnup/phase1/azure/all.jsonnet | 4 + min-turnup/phase1/azure/azure.json | 11 + min-turnup/phase1/azure/configure-vm.sh | 69 +++++ min-turnup/phase1/azure/gen | 8 + min-turnup/phase1/azure/lib/azure.jsonnet | 284 ++++++++++++++++++ min-turnup/phase2/Kconfig | 8 +- min-turnup/phase2/ansible/Makefile | 3 +- .../roles/master/tasks/manifests.yml | 22 +- .../roles/master/templates/etcd.json.j2 | 49 +++ .../roles/master/templates/etcd.jsonnet | 70 ----- .../master/templates/kube-apiserver.json.j2 | 102 +++++++ .../master/templates/kube-apiserver.jsonnet | 94 ------ .../templates/kube-controller-manager.json.j2 | 87 ++++++ .../templates/kube-controller-manager.jsonnet | 77 ----- .../master/templates/kube-scheduler.json.j2 | 41 +++ .../master/templates/kube-scheduler.jsonnet | 41 --- .../roles/master/templates/kubeconfig.jsonnet | 26 -- .../playbooks/roles/node/tasks/kubelet.yml | 1 + .../playbooks/roles/node/tasks/main.yml | 1 - .../roles/node/templates/kubelet.service.j2 | 24 +- min-turnup/util/config_to_json | 2 +- 28 files changed, 852 insertions(+), 368 deletions(-) create mode 100644 min-turnup/Dockerfile create mode 100644 min-turnup/README-azure delete mode 100644 min-turnup/default.config create mode 100644 min-turnup/phase1/azure/.gitignore create mode 100644 min-turnup/phase1/azure/Kconfig create mode 100644 min-turnup/phase1/azure/all.jsonnet create mode 100644 min-turnup/phase1/azure/azure.json create mode 100644 min-turnup/phase1/azure/configure-vm.sh create mode 100755 min-turnup/phase1/azure/gen create mode 100644 min-turnup/phase1/azure/lib/azure.jsonnet create mode 100644 min-turnup/phase2/ansible/playbooks/roles/master/templates/etcd.json.j2 delete mode 100644 min-turnup/phase2/ansible/playbooks/roles/master/templates/etcd.jsonnet create mode 100644 min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-apiserver.json.j2 delete mode 100644 min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-apiserver.jsonnet create mode 100644 min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-controller-manager.json.j2 delete mode 100644 min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-controller-manager.jsonnet create mode 100644 min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-scheduler.json.j2 delete mode 100644 min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-scheduler.jsonnet delete mode 100644 min-turnup/phase2/ansible/playbooks/roles/master/templates/kubeconfig.jsonnet diff --git a/min-turnup/Dockerfile b/min-turnup/Dockerfile new file mode 100644 index 000000000..e54c441ce --- /dev/null +++ b/min-turnup/Dockerfile @@ -0,0 +1,53 @@ +FROM docker.io/buildpack-deps:xenial + +RUN bash -c "\ + apt-get update && apt-get -y upgrade && \ + apt-get install -y make unzip jq && \ + rm -rf /var/lib/apt/lists/*" + +ENV TERRAFORM_VERSION 0.7.0-rc1 +RUN bash -c "\ + wget -q -O /tmp/terraform.zip \"https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip\" && \ + unzip /tmp/terraform.zip -d /usr/local/bin/ && \ + rm /tmp/terraform.zip" + +################################################################################################ +# TEMP: For now get terraform from source so we get ipv4 forwarding for Azure +################################################################################################ +#RUN bash -c "\ +# apt-get update && apt-get -y upgrade && \ +# apt-get install -y nodejs npm golang-go && \ +# rm -rf /var/lib/apt/lists/*" +#ENV GOPATH "/root/gopkgs" +#ENV PATH "$GOPATH/bin:$PATH" +#RUN bash -c "mkdir -p $GOPATH/src/github.com/hashicorp/ && \ +# git clone https://github.com/hashicorp/terraform $GOPATH/src/github.com/hashicorp/terraform && \ +# cd $GOPATH/src/github.com/hashicorp/terraform; make dev" + +################################################################################################ +# Azure - Required dependencies +################################################################################################ +RUN bash -c "\ + apt-get update && apt-get -y upgrade && \ + apt-get install -y nodejs npm && \ + rm -rf /var/lib/apt/lists/*" +RUN bash -c "npm install -g azure-cli" + +################################################################################################ + +################################################################################################ +# Jsonnet +################################################################################################ + +RUN bash -c "\ + cd /tmp; \ + git clone https://github.com/google/jsonnet; \ + cd jsonnet; \ + make; \ + cp jsonnet /usr/bin/jsonnet;" + +WORKDIR /opt/kube-deploy + +ADD . /opt/kube-deploy/ + +CMD make diff --git a/min-turnup/Makefile b/min-turnup/Makefile index fcdfa1599..cc37800a5 100644 --- a/min-turnup/Makefile +++ b/min-turnup/Makefile @@ -1,6 +1,8 @@ SHELL=/bin/bash -.SHELLFLAGS="-O extglob -o errexit -o pipefail -o nounset -c" + +# TODO: things break with this uncommented +#.SHELLFLAGS="-O extglob -o errexit -o pipefail -o nounset -c" .PHONY: config echo-config @@ -17,6 +19,13 @@ endif CONF_TOOL_VERSION = 4.6 KCONFIG_FILES = $(shell find . -name 'Kconfig') + +docker-build: + docker build -t kube-min-turnup . + +docker-run: docker-build + docker run -it --net=host -v `pwd`:/opt/kube-min-turnup kube-min-turnup /bin/bash + default: $(MAKE) config diff --git a/min-turnup/README-azure b/min-turnup/README-azure new file mode 100644 index 000000000..89e149aab --- /dev/null +++ b/min-turnup/README-azure @@ -0,0 +1,12 @@ +Things: + +1. Hardcodes assumptions about ubuntu+systemd +2. terraform is flaky around the subnet +3. terraform is flaky around kubelet.tar sometimes +4. kubelet.service is very flaky and hang irrepairably hang nodes +5. phases are fairly tightly coupled + +Other things: + +1. This copies root ca private key to the nodes which is not needed + diff --git a/min-turnup/default.config b/min-turnup/default.config deleted file mode 100644 index c5257d2c5..000000000 --- a/min-turnup/default.config +++ /dev/null @@ -1,31 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Kubernetes Minimal Turnup Configuration -# - -# -# Phase 1: Cluster Resource Provisioning -# -phase1.num_nodes=10 -phase1.cloud_provider="gce" - -# -# GCE configuration -# -phase1.gce.os_image="gci" - -# -# Phase 1b: Cryptographic Asset Provisioning -# -phase1b.extra_api_sans="10.0.0.1" -phase1b.extra_api_dns_names="kubernetes-master" - -# -# Phase 2: Node Bootstrapping -# -phase2.systemd=y - -# -# Phase 3: Deploying Addons -# -phase3.dashboard=y diff --git a/min-turnup/phase1/Kconfig b/min-turnup/phase1/Kconfig index 81b333416..2089abee0 100644 --- a/min-turnup/phase1/Kconfig +++ b/min-turnup/phase1/Kconfig @@ -10,20 +10,24 @@ config phase1.num_nodes config phase1.instance_prefix string "instance prefix" - default "kuberentes" + default "k0" help A prefix to append to all instance names. menuconfig phase1.cloud_provider - string "cloud proivder" + string "cloud provider" default "gce" help The cloud provider you would like to deploy to. - Valid options are (gce). + Valid options are (gce, azure). if phase1.cloud_provider = "gce" source "phase1/gce/Kconfig" endif +if phase1.cloud_provider = "azure" + source "phase1/azure/Kconfig" +endif + endmenu diff --git a/min-turnup/phase1/azure/.gitignore b/min-turnup/phase1/azure/.gitignore new file mode 100644 index 000000000..7cb9a0a86 --- /dev/null +++ b/min-turnup/phase1/azure/.gitignore @@ -0,0 +1,3 @@ +terraform.tfstate +account.json +terraform.tfstate.backup diff --git a/min-turnup/phase1/azure/Kconfig b/min-turnup/phase1/azure/Kconfig new file mode 100644 index 000000000..ae5962f86 --- /dev/null +++ b/min-turnup/phase1/azure/Kconfig @@ -0,0 +1,76 @@ +menu "Azure configuration" + +config phase1.azure.image_publisher + string "Base Virtual Machine OS Image" + default "canonical" + help + The publisher of the base image used for the VirtualMachines. +config phase1.azure.image_offer + string "Base Virtual Machine OS Image" + default "ubuntuserver" + help + The offer of the base image used for the VirtualMachines. +config phase1.azure.image_sku + string "Base Virtual Machine OS Image" + default "16.04.0-LTS" + help + The sku of the base image used for the VirtualMachines. +config phase1.azure.image_version + string "Base Virtual Machine OS Image" + default "latest" + help + The version of the base image used for the VirtualMachines. + +config phase1.azure.master_vm_size + string "Virtual Machine Size (Master)" + default "Standard_D1_v2" + help + The size of VirtualMachine to deploy. + +config phase1.azure.node_vm_size + string "Virtual Machine Size (Node)" + default "Standard_D1_v2" + help + The size of VirtualMachine to deploy. + +config phase1.azure.master_private_ip + string "Private IP address of Master" + default "10.0.1.4" + help + The private ip address of master + +config phase1.azure.location + string "Resource Location" + default "westus" + help + The Azure location to use. + +config phase1.azure.admin_username + string "Virtual Machine Admin Username" + default "kube" + +config phase1.azure.admin_password + string "Virtual Machine Admin Password" + default "AzureKubernet3s!" + +config phase1.azure.tenant_id + string "ActiveDirectory ServicePrincipal ClientSecret" + help + The ClientSecret of the Service Account to be used by the cluster components. + +config phase1.azure.subscription_id + string "ActiveDirectory ServicePrincipal ClientSecret" + help + The ClientSecret of the Service Account to be used by the cluster components. + +config phase1.azure.client_id + string "ActiveDirectory ServicePrincipal ClientID" + help + The ClientID of the Service Account to be used by the cluster components. + +config phase1.azure.client_secret + string "ActiveDirectory ServicePrincipal ClientSecret" + help + The ClientSecret of the Service Account to be used by the cluster components. + +endmenu diff --git a/min-turnup/phase1/azure/all.jsonnet b/min-turnup/phase1/azure/all.jsonnet new file mode 100644 index 000000000..2c8f2478f --- /dev/null +++ b/min-turnup/phase1/azure/all.jsonnet @@ -0,0 +1,4 @@ +local cfg = import "../../.config.json"; +{ + "azure.tf": (import "lib/azure.jsonnet")(cfg), +} diff --git a/min-turnup/phase1/azure/azure.json b/min-turnup/phase1/azure/azure.json new file mode 100644 index 000000000..530c4182b --- /dev/null +++ b/min-turnup/phase1/azure/azure.json @@ -0,0 +1,11 @@ +{ + "tenantId": "${tenantId}", + "subscriptionId": "${subscriptionId}", + "adClientId": "${adClientId}", + "adClientSecret": "${adClientSecret}", + "resourceGroup": "${resourceGroup}", + "location": "${location}", + "subnetName": "${subnetName}", + "securityGroupName": "${securityGroupName}", + "vnetName": "${vnetName}" +} diff --git a/min-turnup/phase1/azure/configure-vm.sh b/min-turnup/phase1/azure/configure-vm.sh new file mode 100644 index 000000000..39a44d9db --- /dev/null +++ b/min-turnup/phase1/azure/configure-vm.sh @@ -0,0 +1,69 @@ +#! /bin/bash + +set -x +set -o errexit +set -o pipefail +set -o nounset + +mkdir -p /etc/systemd/system/docker.service.d/ +cat < /etc/systemd/system/docker.service.d/clear_mount_propagtion_flags.conf +[Service] +MountFlags=shared +EOF +cat < /etc/systemd/system/docker.service.d/overlay.conf +[Service] +ExecStart= +ExecStart=/usr/bin/docker daemon -H fd:// --storage-driver=overlay +EOF + +curl -sSL https://get.docker.com/ | sh + +apt-get update +#apt-get dist-upgrade -y +apt-get install -y jq + +systemctl start docker || true + +ROLE="node" +if [[ $(hostname) = *master* ]]; then + ROLE="master" +fi + +azure_file="/etc/kubernetes/azure.json" +config_file="/etc/kubernetes/k8s_config.json" + +mkdir /etc/kubernetes +# these get filled in from terraform +echo -n "${azure_json}" | base64 -d > "$azure_file" +echo -n "${k8s_config}" | base64 -d > "$config_file" +echo -n "${kubelet_tar}" | base64 -d > "/etc/kubernetes/kubelet.tar" +echo -n "${root_tar}" | base64 -d > "/etc/kubernetes/root.tar" +echo -n "${apiserver_tar}" | base64 -d > "/etc/kubernetes/apiserver.tar" + +MASTER_IP="$(cat "$config_file" | jq -r '.phase1.azure.master_private_ip')" + +jq ". + {\"role\": \"$ROLE\", \"master_ip\": \"$MASTER_IP\"}" "$config_file" > /etc/kubernetes/k8s_config.new; cp /etc/kubernetes/k8s_config.new "$config_file" + +mkdir -p /srv/kubernetes +for bundle in root kubelet apiserver; do + cat "/etc/kubernetes/$bundle.tar" | sudo tar xv -C /srv/kubernetes +done; + +installer_container_spec="$(cat "$config_file" | jq -r '.phase2.installer_container_spec')" + +cat << EOF > /etc/kubernetes/install.sh +systemctl stop docker +systemctl start docker +docker pull "$installer_container_spec" +docker run \ + --net=host \ + -v /:/host_root \ + -v /etc/kubernetes/k8s_config.json:/opt/playbooks/config.json:ro \ + "$installer_container_spec" \ + /opt/do_role.sh "$ROLE" +EOF + +chmod +x /etc/kubernetes/install.sh +/etc/kubernetes/install.sh + +#sudo reboot diff --git a/min-turnup/phase1/azure/gen b/min-turnup/phase1/azure/gen new file mode 100755 index 000000000..776e9167b --- /dev/null +++ b/min-turnup/phase1/azure/gen @@ -0,0 +1,8 @@ +#! /bin/bash + +set -x +set -o errexit +set -o pipefail +set -o nounset + +jsonnet --multi out/ all.jsonnet diff --git a/min-turnup/phase1/azure/lib/azure.jsonnet b/min-turnup/phase1/azure/lib/azure.jsonnet new file mode 100644 index 000000000..799f55a90 --- /dev/null +++ b/min-turnup/phase1/azure/lib/azure.jsonnet @@ -0,0 +1,284 @@ +function(cfg1) + local cfg = cfg1.phase1; + local master_private_ip = cfg.azure.master_private_ip; + local names = { + resource_group: "%(instance_prefix)s" % cfg, + master_public_ip: "%(instance_prefix)s-master-pip" % cfg, + availability_set: "%(instance_prefix)s-as" % cfg, + storage_account: "%(instance_prefix)sstrg" % cfg, + storage_container: "kube0000%(instance_prefix)s" % cfg, + vnet: "%(instance_prefix)s-vnet" % cfg, + subnet: "%(instance_prefix)s-subnet" % cfg, + route_table: "%(instance_prefix)s" % cfg, + security_group: "%(instance_prefix)s-nsg" % cfg, + master_nic: "%(instance_prefix)s-master-nic" % cfg, + master_vm: "%(instance_prefix)s-master" % cfg, + node_nic: "%(instance_prefix)s-node-nic" % cfg, + node_vm: "%(instance_prefix)s-node" % cfg, + }; + { + variable: { + subscription_id: { default: cfg.azure.subscription_id }, + tenant_id: { default: cfg.azure.tenant_id }, + client_id: { default: cfg.azure.client_id }, + client_secret: { default: cfg.azure.client_secret }, + }, + provider: { + azurerm: { + subscription_id: "${var.subscription_id}", + tenant_id: "${var.tenant_id}", + client_id: "${var.client_id}", + client_secret: "${var.client_secret}", + } + }, + resource: { + azurerm_resource_group: { + rg: { + name: names.resource_group, + location: cfg.azure.location, + } + }, + azurerm_storage_account: { + sa: { + resource_group_name: "${azurerm_resource_group.rg.name}", + name: names.storage_account, + location: "${azurerm_resource_group.rg.location}", + account_type: "Standard_LRS" + } + }, + azurerm_storage_container: { + sc: { + resource_group_name: "${azurerm_resource_group.rg.name}", + storage_account_name: "${azurerm_storage_account.sa.name}", + name: names.storage_container, + container_access_type: "private" + } + }, + azurerm_availability_set: { + as: { + resource_group_name: "${azurerm_resource_group.rg.name}", + name: names.availability_set, + location: "${azurerm_resource_group.rg.location}" + } + }, + azurerm_virtual_network: { + vnet: { + resource_group_name: "${azurerm_resource_group.rg.name}", + location: "${azurerm_resource_group.rg.location}", + name: names.vnet, + address_space: ["10.0.0.0/8"], + } + }, + azurerm_route_table: { + rt: { + resource_group_name: "${azurerm_resource_group.rg.name}", + location: "${azurerm_resource_group.rg.location}", + name: names.route_table, + } + }, + azurerm_subnet: { + subnet: { + resource_group_name: "${azurerm_resource_group.rg.name}", + name: names.subnet, + virtual_network_name: "${azurerm_virtual_network.vnet.name}", + address_prefix: "10.0.0.0/16", + network_security_group_id: "${azurerm_network_security_group.sg.id}", + route_table_id: "${azurerm_route_table.rt.id}", + } + }, + azurerm_network_security_group: { + sg: { + resource_group_name: "${azurerm_resource_group.rg.name}", + location: cfg.azure.location, + name: names.security_group, + }, + }, + azurerm_network_security_rule: { + [cfg.instance_prefix+"-master-ssh"]: { + name: "%(instance_prefix)s-master-ssh" % cfg, + priority: 100, + direction: "Inbound", + access: "Allow", + protocol: "Tcp", + source_port_range: "*", + destination_port_range: "22", + source_address_prefix: "*", + destination_address_prefix: "*", + resource_group_name: "${azurerm_resource_group.rg.name}", + network_security_group_name: "${azurerm_network_security_group.sg.name}", + }, + [cfg.instance_prefix+"-master-ssl"]: { + name: "%(instance_prefix)s-master-ssl" % cfg, + priority: 110, + direction: "Inbound", + access: "Allow", + protocol: "Tcp", + source_port_range: "*", + destination_port_range: "443", + source_address_prefix: "*", + destination_address_prefix: "*", + resource_group_name: "${azurerm_resource_group.rg.name}", + network_security_group_name: "${azurerm_network_security_group.sg.name}", + }, + }, + azurerm_public_ip: { + pip: { + resource_group_name: "${azurerm_resource_group.rg.name}", + location: cfg.azure.location, + name: names.master_public_ip, + public_ip_address_allocation: "static", + provisioner: [{ + "local-exec": { + command: ||| + cat < ../../phase1b/crypto/san-extras + DNS.1 = kubernetes + DNS.2 = kubernetes.default + DNS.3 = kubernetes.default.svc + DNS.4 = kubernetes.default.svc.cluster.local + DNS.5 = names.master_vm + IP.1 = ${azurerm_public_ip.pip.ip_address} + IP.2 = %s + EOF + ||| % master_private_ip + } + }] + } + }, + azurerm_network_interface: { + master_nic: { + resource_group_name: "${azurerm_resource_group.rg.name}", + location: "${azurerm_resource_group.rg.location}", + name: names.master_nic, + ip_configuration: { + name: "ipconfig", + subnet_id: "${azurerm_subnet.subnet.id}", + private_ip_address_allocation: "static", + private_ip_address: master_private_ip, + public_ip_address_id: "${azurerm_public_ip.pip.id}", + }, + enable_ip_forwarding: true, + }, + node_nic: { + resource_group_name: "${azurerm_resource_group.rg.name}", + location: "${azurerm_resource_group.rg.location}", + name: names.node_nic+"-${count.index}", + ip_configuration: { + name: "ipconfig", + subnet_id: "${azurerm_subnet.subnet.id}", + private_ip_address_allocation: "Dynamic", + }, + enable_ip_forwarding: true, + count: cfg.num_nodes + } + }, + template_file: { + azure_json: { + template: "${file(\"azure.json\")}", + vars: { + tenantId: "${var.tenant_id}", + subscriptionId: "${var.subscription_id}", + adClientId: "${var.client_id}", + adClientSecret: "${var.client_secret}", + resourceGroup: "${azurerm_resource_group.rg.name}", + location: "${azurerm_resource_group.rg.location}", + subnetName: "${azurerm_subnet.subnet.name}", + securityGroupName: "${azurerm_network_security_group.sg.name}", + vnetName: "${azurerm_virtual_network.vnet.name}", + }, + }, + configure_vm: { + template: "${file(\"configure-vm.sh\")}", + vars: { + apiserver_tar: "${base64encode(file(\"../../phase1b/crypto/apiserver.tar\"))}", + root_tar: "${base64encode(file(\"../../phase1b/crypto/root.tar\"))}", + kubelet_tar: "${base64encode(file(\"../../phase1b/crypto/kubelet.tar\"))}", + k8s_config: "${base64encode(file(\"../../.config.json\"))}", + azure_json: "${base64encode(template_file.azure_json.rendered)}", + }, + } + }, + azurerm_virtual_machine: { + master_vm: { + resource_group_name: names.resource_group, + location: "${azurerm_resource_group.rg.location}", + name: names.master_vm, + network_interface_ids: ["${azurerm_network_interface.master_nic.id}"], + vm_size: cfg.azure.master_vm_size, + availability_set_id: "${azurerm_availability_set.as.id}", + + storage_image_reference: { + publisher: cfg.azure.image_publisher, + offer: cfg.azure.image_offer, + sku: cfg.azure.image_sku, + version: cfg.azure.image_version + }, + + storage_os_disk: { + name: names.master_vm+"-osdisk", + vhd_uri: "${azurerm_storage_account.sa.primary_blob_endpoint}${azurerm_storage_container.sc.name}/"+names.master_vm+"-osdisk.vhd", + caching: "ReadWrite", + create_option: "FromImage" + }, + + os_profile: { + computer_name: names.master_vm, + admin_username: cfg.azure.admin_username, + admin_password: cfg.azure.admin_password, + custom_data: "${base64encode(template_file.configure_vm.rendered)}", + }, + + os_profile_linux_config: { + disable_password_authentication: false + }, + }, + node_vm: { + resource_group_name: names.resource_group, + location: "${azurerm_resource_group.rg.location}", + name: names.node_vm+"-${count.index}", + network_interface_ids: ["${element(azurerm_network_interface.node_nic.*.id, count.index)}"], + vm_size: cfg.azure.node_vm_size, + availability_set_id: "${azurerm_availability_set.as.id}", + + storage_image_reference: { + publisher: cfg.azure.image_publisher, + offer: cfg.azure.image_offer, + sku: cfg.azure.image_sku, + version: cfg.azure.image_version + }, + + storage_os_disk: { + name: names.node_vm+"-${count.index}-osdisk", + vhd_uri: "${azurerm_storage_account.sa.primary_blob_endpoint}${azurerm_storage_container.sc.name}/"+names.node_vm+"${count.index}-osdisk.vhd", + caching: "ReadWrite", + create_option: "FromImage" + }, + + os_profile: { + computer_name: names.node_vm+"-${count.index}", + admin_username: cfg.azure.admin_username, + admin_password: cfg.azure.admin_password, + custom_data: "${base64encode(template_file.configure_vm.rendered)}", + }, + + os_profile_linux_config: { + disable_password_authentication: false + }, + + count: cfg.num_nodes + } + }, + null_resource: { + crypto_assets: { + depends_on: [ + "azurerm_public_ip.pip", + ], + provisioner: [{ + "local-exec": { + # clean is covering up a bug, perhaps in the makefile? + command: "make -C ../../phase1b/crypto clean && make -C ../../phase1b/crypto" + }, + }], + }, + }, + }, + } diff --git a/min-turnup/phase2/Kconfig b/min-turnup/phase2/Kconfig index 492cd830b..4987ac860 100644 --- a/min-turnup/phase2/Kconfig +++ b/min-turnup/phase2/Kconfig @@ -3,14 +3,18 @@ menu "Phase 2: Node Bootstrapping" config phase2.docker_registry string "docker registry" - default "gcr.io/google-containers" + default "docker.io/colemickens" help The docker registry to pull cluster components from. config phase2.kubernetes_version string "kubernetes version" - default "v1.2.4" + default "v1.3.0-azure" help The version of Kubernetes to deploy. +config phase2.installer_container_spec + string "installer container" + default "docker.io/colemickens/install-k8s:v1" + endmenu diff --git a/min-turnup/phase2/ansible/Makefile b/min-turnup/phase2/ansible/Makefile index d886c1f5e..ecc1f1046 100644 --- a/min-turnup/phase2/ansible/Makefile +++ b/min-turnup/phase2/ansible/Makefile @@ -1,5 +1,6 @@ TAG=v1 -IMAGE=gcr.io/mikedanese-k8s/install-k8s +#IMAGE=gcr.io/mikedanese-k8s/install-k8s +IMAGE=docker.io/colemickens/install-k8s build: docker build -t "$(IMAGE):$(TAG)" . diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/tasks/manifests.yml b/min-turnup/phase2/ansible/playbooks/roles/master/tasks/manifests.yml index 21ee6941c..637bb88be 100644 --- a/min-turnup/phase2/ansible/playbooks/roles/master/tasks/manifests.yml +++ b/min-turnup/phase2/ansible/playbooks/roles/master/tasks/manifests.yml @@ -1,10 +1,12 @@ -- name: master manfiests - jsonnet: - src: "{{ item }}.jsonnet" - dest: "/etc/kubernetes/manifests/{{ item }}.json" - mode: 0644 - with_items: - - etcd - - kube-scheduler - - kube-apiserver - - kube-controller-manager +- template: + src: etcd.json.j2 + dest: /etc/kubernetes/manifests/etcd.json +- template: + src: kube-apiserver.json.j2 + dest: /etc/kubernetes/manifests/kube-apiserver.json +- template: + src: kube-controller-manager.json.j2 + dest: /etc/kubernetes/manifests/kube-controller-manager.json +- template: + src: kube-scheduler.json.j2 + dest: /etc/kubernetes/manifests/kube-scheduler.json diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/templates/etcd.json.j2 b/min-turnup/phase2/ansible/playbooks/roles/master/templates/etcd.json.j2 new file mode 100644 index 000000000..daf7a892e --- /dev/null +++ b/min-turnup/phase2/ansible/playbooks/roles/master/templates/etcd.json.j2 @@ -0,0 +1,49 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "etcd-server", + "namespace": "kube-system" + }, + "spec": { + "hostNetwork": true, + "containers": [ + { + "name": "etcd-container", + "image": "gcr.io/google_containers/etcd:2.2.1", + "resources": { + "requests": { + "cpu": "200m" + } + }, + "command": [ + "/usr/local/bin/etcd", + "--listen-peer-urls=http://127.0.0.1:2380", + "--addr=127.0.0.1:2379", + "--bind-addr=127.0.0.1:2379" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 2379, + "path": "/health" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "ports": [ + { + "name": "serverport", + "containerPort": 2380, + "hostPort": 2380 + }, + { + "name": "clientport", + "containerPort": 2379, + "hostPort": 2379 + } + ] + } + ] + } +} diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/templates/etcd.jsonnet b/min-turnup/phase2/ansible/playbooks/roles/master/templates/etcd.jsonnet deleted file mode 100644 index 676baa065..000000000 --- a/min-turnup/phase2/ansible/playbooks/roles/master/templates/etcd.jsonnet +++ /dev/null @@ -1,70 +0,0 @@ -function(cfg) - { - apiVersion: "v1", - kind: "Pod", - metadata: { - name: "etcd-server", - namespace: "kube-system", - }, - spec: { - hostNetwork: true, - containers: [ - { - name: "etcd-container", - image: "gcr.io/google_containers/etcd:2.2.1", - resources: { - requests: { - cpu: "200m", - }, - }, - command: [ - "/bin/sh", - "-c", - ||| - /usr/local/bin/etcd \ - --listen-peer-urls http://127.0.0.1:2380 \ - --addr 127.0.0.1:2379 \ - --bind-addr 127.0.0.1:2379 \ - --data-dir /var/etcd/data - |||, - ], - livenessProbe: { - httpGet: { - host: "127.0.0.1", - port: 2379, - path: "/health", - }, - initialDelaySeconds: 15, - timeoutSeconds: 15, - }, - ports: [ - { - name: "serverport", - containerPort: 2380, - hostPort: 2380, - }, - { - name: "clientport", - containerPort: 2379, - hostPort: 2379, - }, - ], - volumeMounts: [ - { - name: "varetcd", - mountPath: "/var/etcd", - readOnly: false, - }, - ], - }, - ], - volumes: [ - { - name: "varetcd", - hostPath: { - path: "/mnt/master-pd/var/etcd", - }, - }, - ], - }, - } diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-apiserver.json.j2 b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-apiserver.json.j2 new file mode 100644 index 000000000..901fc4d05 --- /dev/null +++ b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-apiserver.json.j2 @@ -0,0 +1,102 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "kube-apiserver", + "namespace": "kube-system", + "labels": { + "tier": "control-plane", + "component": "kube-apiserver" + } + }, + "spec": { + "hostNetwork": true, + "containers": [ + { + "name": "kube-apiserver", + "image": "{{ phase2['docker_registry'] }}/hyperkube-amd64:{{ phase2['kubernetes_version'] }}", + "resources": { + "requests": { + "cpu": "250m" + } + }, + "command": [ + "/hyperkube", + "apiserver", + "--address=127.0.0.1", + "--etcd-servers=http://127.0.0.1:2379", + "--cloud-provider={{ phase1['cloud_provider'] }}", +{% if phase1['cloud_provider'] == "azure" %} + "--cloud-config=/etc/kubernetes/azure.json", +{% endif %} + "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,ResourceQuota", + "--service-cluster-ip-range=10.3.0.0/16", + "--client-ca-file=/srv/kubernetes/ca.pem", + "--tls-cert-file=/srv/kubernetes/apiserver.pem", + "--tls-private-key-file=/srv/kubernetes/apiserver-key.pem", + "--secure-port=443", + "--allow-privileged", + "--v=4" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 8080, + "path": "/healthz" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "ports": [ + { + "name": "https", + "containerPort": 443, + "hostPort": 443 + }, + { + "name": "local", + "containerPort": 8080, + "hostPort": 8080 + } + ], + "volumeMounts": [ + { + "name": "etckube", + "mountPath": "/etc/kubernetes", + "readOnly": true + }, + { + "name": "srvkube", + "mountPath": "/srv/kubernetes", + "readOnly": true + }, + { + "name": "etcssl", + "mountPath": "/etc/ssl", + "readOnly": true + } + ] + } + ], + "volumes": [ + { + "name": "etckube", + "hostPath": { + "path": "/etc/kubernetes" + } + }, + { + "name": "srvkube", + "hostPath": { + "path": "/srv/kubernetes" + } + }, + { + "name": "etcssl", + "hostPath": { + "path": "/etc/ssl" + } + } + ] + } +} diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-apiserver.jsonnet b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-apiserver.jsonnet deleted file mode 100644 index 7fdf7131f..000000000 --- a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-apiserver.jsonnet +++ /dev/null @@ -1,94 +0,0 @@ -function(cfg) - { - apiVersion: "v1", - kind: "Pod", - metadata: { - name: "kube-apiserver", - namespace: "kube-system", - labels: { - tier: "control-plane", - component: "kube-apiserver", - }, - }, - spec: { - hostNetwork: true, - containers: [ - { - name: "kube-apiserver", - image: "%(docker_registry)s/kube-apiserver:%(kubernetes_version)s" % cfg.phase2, - resources: { - requests: { - cpu: "250m", - }, - }, - command: [ - "/bin/sh", - "-c", - ||| - /usr/local/bin/kube-apiserver \ - --address=127.0.0.1 \ - --etcd-servers=http://127.0.0.1:2379 \ - --cloud-provider=%(cloud_provider)s \ - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,ResourceQuota \ - --service-cluster-ip-range=10.0.0.0/16 \ - --client-ca-file=/srv/kubernetes/ca.pem \ - --tls-cert-file=/srv/kubernetes/apiserver.pem \ - --tls-private-key-file=/srv/kubernetes/apiserver-key.pem \ - --secure-port=443 \ - --allow-privileged \ - --v=4 - ||| % cfg.phase1, - # --basic-auth-file=/srv/kubernetes/basic_auth.csv \ - # --token-auth-file=/srv/kubernetes/known_tokens.csv \ - ], - livenessProbe: { - httpGet: { - host: "127.0.0.1", - port: 8080, - path: "/healthz", - }, - initialDelaySeconds: 15, - timeoutSeconds: 15, - }, - ports: [ - { - name: "https", - containerPort: 443, - hostPort: 443, - }, - { - name: "local", - containerPort: 8080, - hostPort: 8080, - }, - ], - volumeMounts: [ - { - name: "srvkube", - mountPath: "/srv/kubernetes", - readOnly: true, - }, - { - name: "etcssl", - mountPath: "/etc/ssl", - readOnly: true, - }, - ], - }, - ], - volumes: [ - { - name: "srvkube", - hostPath: { - path: "/srv/kubernetes", - }, - }, - { - name: "etcssl", - hostPath: { - path: "/etc/ssl", - }, - }, - ], - }, - } diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-controller-manager.json.j2 b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-controller-manager.json.j2 new file mode 100644 index 000000000..ab55c45f5 --- /dev/null +++ b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-controller-manager.json.j2 @@ -0,0 +1,87 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "kube-controller-manager", + "namespace": "kube-system", + "labels": { + "tier": "control-plane", + "component": "kube-controller-manager" + } + }, + "spec": { + "hostNetwork": true, + "containers": [ + { + "name": "kube-controller-manager", + "image": "{{ phase2['docker_registry'] }}/hyperkube-amd64:{{ phase2['kubernetes_version'] }}", + "resources": { + "requests": { + "cpu": "200m" + } + }, + "command": [ + "/hyperkube", + "controller-manager", + "--master=127.0.0.1:8080", + "--cluster-name={{ phase1['instance_prefix'] }}", + "--cluster-cidr=10.244.0.0/16", + "--allocate-node-cidrs=true", + "--cloud-provider={{ phase1['cloud_provider'] }}", +{% if phase1['cloud_provider'] == "azure" %} + "--cloud-config=/etc/kubernetes/azure.json", +{% endif %} + "--service-account-private-key-file=/srv/kubernetes/apiserver-key.pem", + "--root-ca-file=/srv/kubernetes/ca.pem", + "--v=2" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 10252, + "path": "/healthz" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "volumeMounts": [ + { + "name": "etckube", + "mountPath": "/etc/kubernetes", + "readOnly": true + }, + { + "name": "srvkube", + "mountPath": "/srv/kubernetes", + "readOnly": true + }, + { + "name": "etcssl", + "mountPath": "/etc/ssl", + "readOnly": true + } + ] + } + ], + "volumes": [ + { + "name": "etckube", + "hostPath": { + "path": "/etc/kubernetes" + } + }, + { + "name": "srvkube", + "hostPath": { + "path": "/srv/kubernetes" + } + }, + { + "name": "etcssl", + "hostPath": { + "path": "/etc/ssl" + } + } + ] + } +} diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-controller-manager.jsonnet b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-controller-manager.jsonnet deleted file mode 100644 index 58e79f547..000000000 --- a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-controller-manager.jsonnet +++ /dev/null @@ -1,77 +0,0 @@ -function(cfg) - { - apiVersion: "v1", - kind: "Pod", - metadata: { - name: "kube-controller-manager", - namespace: "kube-system", - labels: { - tier: "control-plane", - component: "kube-controller-manager", - }, - }, - spec: { - hostNetwork: true, - containers: [ - { - name: "kube-controller-manager", - image: "%(docker_registry)s/kube-controller-manager:%(kubernetes_version)s" % cfg.phase2, - resources: { - requests: { - cpu: "200m", - }, - }, - command: [ - "/bin/sh", - "-c", - ||| - /usr/local/bin/kube-controller-manager \ - --master=127.0.0.1:8080 \ - --cluster-name=k-1 \ - --cluster-cidr=10.244.0.0/16 \ - --allocate-node-cidrs=true \ - --cloud-provider=%(cloud_provider)s \ - --service-account-private-key-file=/srv/kubernetes/apiserver-key.pem \ - --root-ca-file=/srv/kubernetes/ca.pem \ - --v=2 - ||| % cfg.phase1, - ], - livenessProbe: { - httpGet: { - host: "127.0.0.1", - port: 10252, - path: "/healthz", - }, - initialDelaySeconds: 15, - timeoutSeconds: 15, - }, - volumeMounts: [ - { - name: "srvkube", - mountPath: "/srv/kubernetes", - readOnly: true, - }, - { - name: "etcssl", - mountPath: "/etc/ssl", - readOnly: true, - }, - ], - }, - ], - volumes: [ - { - name: "srvkube", - hostPath: { - path: "/srv/kubernetes", - }, - }, - { - name: "etcssl", - hostPath: { - path: "/etc/ssl", - }, - }, - ], - }, - } diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-scheduler.json.j2 b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-scheduler.json.j2 new file mode 100644 index 000000000..ff7edf306 --- /dev/null +++ b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-scheduler.json.j2 @@ -0,0 +1,41 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "kube-scheduler", + "namespace": "kube-system", + "labels": { + "tier": "control-plane", + "component": "kube-scheduler" + } + }, + "spec": { + "hostNetwork": true, + "containers": [ + { + "name": "kube-scheduler", + "image": "{{ phase2['docker_registry'] }}/hyperkube-amd64:{{ phase2['kubernetes_version'] }}", + "resources": { + "requests": { + "cpu": "100m" + } + }, + "command": [ + "/hyperkube", + "scheduler", + "--master=127.0.0.1:8080", + "--v=2" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 10251, + "path": "/healthz" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + } + } + ] + } +} diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-scheduler.jsonnet b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-scheduler.jsonnet deleted file mode 100644 index 4f63b7439..000000000 --- a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kube-scheduler.jsonnet +++ /dev/null @@ -1,41 +0,0 @@ -function(cfg) - { - apiVersion: "v1", - kind: "Pod", - metadata: { - name: "kube-scheduler", - namespace: "kube-system", - labels: { - tier: "control-plane", - component: "kube-scheduler", - }, - }, - spec: { - hostNetwork: true, - containers: [ - { - name: "kube-scheduler", - image: "%(docker_registry)s/kube-scheduler:%(kubernetes_version)s" % cfg.phase2, - resources: { - requests: { - cpu: "100m", - }, - }, - command: [ - "/bin/sh", - "-c", - "/usr/local/bin/kube-scheduler --master=127.0.0.1:8080 --v=2", - ], - livenessProbe: { - httpGet: { - host: "127.0.0.1", - port: 10251, - path: "/healthz", - }, - initialDelaySeconds: 15, - timeoutSeconds: 15, - }, - }, - ], - }, - } diff --git a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kubeconfig.jsonnet b/min-turnup/phase2/ansible/playbooks/roles/master/templates/kubeconfig.jsonnet deleted file mode 100644 index 80464c7be..000000000 --- a/min-turnup/phase2/ansible/playbooks/roles/master/templates/kubeconfig.jsonnet +++ /dev/null @@ -1,26 +0,0 @@ -function(cfg) - { - apiVersion: "v1", - kind: "Config", - users: [{ - name: "kubelet", - user: { - "client-certificate-data": std.base64(cfg.kubelet_pem.stdout), - "client-key-data": std.base64(cfg.kubelet_key_pem.stdout), - }, - }], - clusters: [{ - name: "local", - cluster: { - "certificate-authority-data": std.base64(cfg.ca_pem.stdout), - }, - }], - contexts: [{ - context: { - cluster: "local", - user: "kubelet", - }, - name: "service-account-context", - }], - "current-context": "service-account-context", - } diff --git a/min-turnup/phase2/ansible/playbooks/roles/node/tasks/kubelet.yml b/min-turnup/phase2/ansible/playbooks/roles/node/tasks/kubelet.yml index a10dcd3e1..0c4f9bc4d 100644 --- a/min-turnup/phase2/ansible/playbooks/roles/node/tasks/kubelet.yml +++ b/min-turnup/phase2/ansible/playbooks/roles/node/tasks/kubelet.yml @@ -5,3 +5,4 @@ - service: name: kubelet state: started + enabled: yes diff --git a/min-turnup/phase2/ansible/playbooks/roles/node/tasks/main.yml b/min-turnup/phase2/ansible/playbooks/roles/node/tasks/main.yml index 21707f31e..f07c85ba8 100644 --- a/min-turnup/phase2/ansible/playbooks/roles/node/tasks/main.yml +++ b/min-turnup/phase2/ansible/playbooks/roles/node/tasks/main.yml @@ -1,4 +1,3 @@ - include: kubeconfig.yml - include: etc.yml - include: kubelet.yml -- include: kubectl.yml diff --git a/min-turnup/phase2/ansible/playbooks/roles/node/templates/kubelet.service.j2 b/min-turnup/phase2/ansible/playbooks/roles/node/templates/kubelet.service.j2 index f050aee67..77517d88e 100644 --- a/min-turnup/phase2/ansible/playbooks/roles/node/templates/kubelet.service.j2 +++ b/min-turnup/phase2/ansible/playbooks/roles/node/templates/kubelet.service.j2 @@ -15,29 +15,33 @@ ExecStart=/usr/bin/docker run \ -v /var/lib/docker/:/var/lib/docker:rw \ -v /var/lib/kubelet/:/var/lib/kubelet:shared \ -v /srv/kubernetes:/srv/kubernetes:ro \ - -v /etc/kubernetes/manifests:/etc/kubernetes/manifests:ro \ + -v /etc/kubernetes:/etc/kubernetes:ro \ {{ phase2['docker_registry'] }}/hyperkube-amd64:{{ phase2['kubernetes_version'] }} \ /hyperkube kubelet \ --address=0.0.0.0 \ --allow-privileged=true \ --cloud-provider={{ phase1['cloud_provider'] }} \ +{% if phase1['cloud_provider'] == "azure" %} + --cloud-config="/etc/kubernetes/azure.json" \ +{% endif %} --enable-server \ -{% if role == "master" %} - --api-servers=http://localhost:8080 \ -{% elif role == "node" %} + --register-node=true \ + --register-schedulable={{ role == "node" }} \ --enable-debugging-handlers \ --api-servers=https://{{ master_ip }} \ - --hairpin-mode=promiscuous-bridge \ --kubeconfig=/srv/kubernetes/kubeconfig.json \ +{% if role == "master" %} + --config=/etc/kubernetes/manifests \ +{% elif role == "node" %} + --hairpin-mode=promiscuous-bridge \ --network-plugin=kubenet \ --reconcile-cidr \ {% endif %} - --config=/etc/kubernetes/manifests \ - --cluster-dns=10.0.0.10 \ + --cluster-dns=10.3.0.10 \ --cluster-domain=cluster.local \ - --v=2 -Restart=always -KillMode=process + --v=9 +#Restart=always +#KillMode=process [Install] WantedBy=multi-user.target diff --git a/min-turnup/util/config_to_json b/min-turnup/util/config_to_json index fd535739c..ba06842bd 100755 --- a/min-turnup/util/config_to_json +++ b/min-turnup/util/config_to_json @@ -22,7 +22,7 @@ while read line; do field=$(echo "${line}" \ | cut -f1 -d"=") value=$(echo "${line}" \ - | cut -f2 -d"=" \ + | cut -f2- -d"=" \ | sed \ -e 's/^y$/true/' \ -e 's/^n$/false/')