-
Notifications
You must be signed in to change notification settings - Fork 136
/
Copy path00_infra.sh
executable file
·131 lines (111 loc) · 3.88 KB
/
00_infra.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# This script sets up a local development cluster. It's roughly equivalent to
# a managed K8s setup.
# For ease of development and to save disk space we pipe a local container
# registry through to kind.
#
# See https://kind.sigs.k8s.io/docs/user/local-registry/.
set -xeuo pipefail
reg_name='kind-registry'
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --network bridge --name "${reg_name}" \
registry:2
fi
# Start a basic cluster. We use cilium's CNI and eBPF kube-proxy replacement.
SRC_ROOT=$(git rev-parse --show-toplevel)
cat <<EOF | kind create cluster --config -
---
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
extraMounts:
- hostPath: $SRC_ROOT
containerPath: /mnt/src_root
- role: worker
extraMounts:
- hostPath: $SRC_ROOT
containerPath: /mnt/src_root
- role: worker
extraMounts:
- hostPath: $SRC_ROOT
containerPath: /mnt/src_root
networking:
disableDefaultCNI: true
kubeProxyMode: none
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
# Enable the registry on the nodes.
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done
# Connect the registry to the cluster network.
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# Advertise the registry location.
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF
# Prepare Gateway API CRDs. These MUST be available before we start cilium.
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/experimental-install.yaml
kubectl wait --for condition=Established crd/gatewayclasses.gateway.networking.k8s.io
kubectl wait --for condition=Established crd/gateways.gateway.networking.k8s.io
kubectl wait --for condition=Established crd/httproutes.gateway.networking.k8s.io
kubectl wait --for condition=Established crd/tlsroutes.gateway.networking.k8s.io
kubectl wait --for condition=Established crd/grpcroutes.gateway.networking.k8s.io
kubectl wait --for condition=Established crd/referencegrants.gateway.networking.k8s.io
# Start cilium.
helm repo add cilium https://helm.cilium.io
helm repo update cilium
helm upgrade \
--install cilium cilium/cilium \
--version 1.14.5 \
--namespace kube-system \
--set k8sServiceHost=kind-control-plane \
--set k8sServicePort=6443 \
--set kubeProxyReplacement=strict \
--set gatewayAPI.enabled=true \
--set l2announcements.enabled=true \
--wait
# Kind's nodes are containers running on the local docker network. We reuse that
# network for LB-IPAM so that LoadBalancers are available via "real" local IPs.
KIND_NET_CIDR=$(docker network inspect kind -f '{{(index .IPAM.Config 0).Subnet}}')
CILIUM_IP_CIDR=$(echo ${KIND_NET_CIDR} | sed "[email protected]/[email protected]/28@")
cat <<EOF | kubectl apply -f -
---
apiVersion: cilium.io/v2alpha1
kind: CiliumL2AnnouncementPolicy
metadata:
name: l2-announcements
spec:
externalIPs: true
loadBalancerIPs: true
---
apiVersion: cilium.io/v2alpha1
kind: CiliumLoadBalancerIPPool
metadata:
name: default-pool
spec:
cidrs:
- cidr: ${CILIUM_IP_CIDR}
EOF
# At this point we have a similar setup to the one that we'd get with a cloud
# provider. Move on to `01_operations.sh` for the cluster setup.