Skip to content

Commit

Permalink
contrib: Set IPv6 for Kubenetes nodeIP on dual-stack dev VM
Browse files Browse the repository at this point in the history
The development setup using the provided Vagrantfile will give a
dual-stack Kubernetes cluster in the dev VM, but a hostNetwork pod
deployed on it cannot show an IPv6 address.

This is because the kubelet is deployed using the incomplete `--node-ip`
parameter: to enable dual-stack mode, we must pass both IPv6 address and
IPv4 address via `--node-ip` to kubelet.

This commit completes the `--node-ip` parameter and fixes the issue.

Fixes: cilium#23503

Signed-off-by: Zhichuan Liang <[email protected]>
  • Loading branch information
jschwinger233 authored and pchaigno committed Feb 22, 2023
1 parent 3aa42ea commit 1731f2a
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 9 deletions.
15 changes: 8 additions & 7 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,7 @@ Vagrant.configure(2) do |config|
master_vm_name = "#{$vm_base_name}1#{$build_id_name}#{$vm_kernel}"
config.vm.define master_vm_name, primary: true do |cm|
node_ip = "#{$master_ip}"
node_ipv6 = "#{$master_ipv6}"
cm.vm.network "forwarded_port", guest: 6443, host: 7443, auto_correct: true
cm.vm.network "forwarded_port", guest: 9081, host: 9081, auto_correct: true
# 2345 is the default delv server port
Expand Down Expand Up @@ -241,7 +242,7 @@ Vagrant.configure(2) do |config|
cm.vm.provision "k8s-install-master-part-1",
type: "shell",
run: "always",
env: {"node_ip" => node_ip},
env: {"node_ip" => node_ip, "node_ipv6" => node_ipv6},
privileged: true,
path: k8sinstall
end
Expand All @@ -255,7 +256,7 @@ Vagrant.configure(2) do |config|
cm.vm.provision "k8s-install-master-part-2",
type: "shell",
run: "always",
env: {"node_ip" => node_ip},
env: {"node_ip" => node_ip, "node_ipv6" => node_ipv6},
privileged: true,
path: k8sinstall
end
Expand All @@ -268,16 +269,16 @@ Vagrant.configure(2) do |config|
node_hostname = "#{$vm_base_name}#{n+2}"
config.vm.define node_vm_name do |node|
node_ip = $workers_ipv4_addrs[n]
node_ipv6 = $workers_ipv6_addrs[n]
node.vm.network "private_network", ip: "#{node_ip}",
virtualbox__intnet: "cilium-test-#{$build_id}"
nfs_ipv4_addr = $workers_ipv4_addrs_nfs[n]
ipv6_addr = $workers_ipv6_addrs[n]
node.vm.network "private_network", ip: "#{nfs_ipv4_addr}", bridge: "enp0s9"
# Add IPv6 address this way or we get hit by a virtualbox bug
node.vm.provision "ipv6-config",
type: "shell",
run: "always",
inline: "ip -6 a a #{ipv6_addr}/16 dev enp0s9"
inline: "ip -6 a a #{node_ipv6}/16 dev enp0s9"

# Interface for the IPv6 NAT Service. The IP address doesn't matter
# as it won't be used. We use an IPv4 address as newer versions of
Expand All @@ -294,7 +295,7 @@ Vagrant.configure(2) do |config|
inline: "ip -6 r a default via fd17:625c:f037:2::1 dev enp0s10 || true"

if ENV["IPV6_EXT"] then
node_ip = "#{ipv6_addr}"
node_ip = "#{node_ipv6}"
end
node.vm.hostname = "#{node_hostname}"
if ENV['CILIUM_TEMP'] then
Expand All @@ -303,7 +304,7 @@ Vagrant.configure(2) do |config|
node.vm.provision "k8s-install-node-part-1",
type: "shell",
run: "always",
env: {"node_ip" => node_ip},
env: {"node_ip" => node_ip, "node_ipv6" => node_ipv6},
privileged: true,
path: k8sinstall
end
Expand All @@ -314,7 +315,7 @@ Vagrant.configure(2) do |config|
node.vm.provision "k8s-install-node-part-2",
type: "shell",
run: "always",
env: {"node_ip" => node_ip},
env: {"node_ip" => node_ip, "node_ipv6" => node_ipv6},
privileged: true,
path: k8sinstall
end
Expand Down
2 changes: 1 addition & 1 deletion contrib/k8s/k8s-extract-clustermesh-nodeport-secret.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ set -e

NAMESPACE=$(kubectl get pod -l k8s-app=clustermesh-apiserver -o jsonpath='{.items[0].metadata.namespace}' --all-namespaces)
NODE_NAME=$(kubectl -n $NAMESPACE get pod -l k8s-app=clustermesh-apiserver -o jsonpath='{.items[0].spec.nodeName}')
NODE_IP=$(kubectl -n $NAMESPACE get node $NODE_NAME -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
NODE_IP=$(kubectl -n $NAMESPACE get node $NODE_NAME -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
NODE_PORT=$(kubectl -n $NAMESPACE get svc clustermesh-apiserver -o jsonpath='{.spec.ports[0].nodePort}')
CLUSTER_NAME=$(kubectl -n $NAMESPACE get cm cilium-config -o jsonpath='{.data.cluster-name}')
# TODO: once v1.10 is the minimum version supported, we can replace the
Expand Down
18 changes: 17 additions & 1 deletion contrib/vagrant/scripts/03-install-kubernetes-worker.sh
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,22 @@ EOF
sudo docker ps
}

# node_ip_addresses returns the parameter for kubelet --node-ip
# need to cover 3 scenarios:
# 1. $node_ipv6 == "", this happens when ipv6 is disabled
# 2. $node_ip == $node_ipv6, this happens when IPV6_EXT=1
# 3. $node_ip != $node_ipv6 && $node_ipv6 != ""
# we concatenate two vars on scenario 3 and return the non-empty var for the others
function node_ip_addresses() {
if [[ -z "$node_ipv6" ]]; then
echo -n $node_ip
elif [[ "$node_ipv6" == "$node_ip" ]]; then
echo -n $node_ipv6
else
echo -n "$node_ip,$node_ipv6"
fi
}

log "Installing kubernetes worker components..."

set -e
Expand Down Expand Up @@ -335,7 +351,7 @@ ExecStart=/usr/bin/kubelet \\
--kubeconfig=/var/lib/kubelet/kubelet.kubeconfig \\
--fail-swap-on=false \\
--make-iptables-util-chains=false \\
--node-ip=${node_ip} \\
--node-ip=$(node_ip_addresses) \\
--register-node=true \\
--serialize-image-pulls=false \\
--tls-cert-file=/var/lib/kubelet/kubelet-kubelet-${hostname}.pem \\
Expand Down

0 comments on commit 1731f2a

Please sign in to comment.