Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
3051cca
Fixes accidental override of file conf settings with defaults
trozet Feb 4, 2020
a158c45
Remove assumptions on defaults for specific types
trozet Feb 10, 2020
a07b1a0
Clustered database (aka RAFT) support for OVN DBs
girishmg Feb 12, 2020
570e865
remove unused method from KClient
Feb 12, 2020
963a84a
ci: always export logs
dcbw Feb 12, 2020
5b33ab8
Merge pull request #1051 from girishmg/us_raft
dcbw Feb 12, 2020
5914b37
factory: fix race between queued informer initial add and other events
dcbw Feb 3, 2020
9b06a4e
pkg/util: make GetNodeIP and GetNodeHostname return API values
Jan 23, 2020
b5da6a3
Merge pull request #1054 from dcbw/ci-always-export-logs
dcbw Feb 13, 2020
e9946e6
Downgrade OVN and see if CI passes
trozet Feb 14, 2020
f4ba4b9
use the macros defined to work with pre-split and post-split OVN
girishmg Feb 13, 2020
267b2e8
cluster: Make GatewayReady() IPv6/dual-stack safe
danwinship Feb 12, 2020
130c96c
cluster: fix "Gateway and ManagementPort are Ready" message
danwinship Feb 13, 2020
841be69
ensure logs are always exported
as-com Feb 14, 2020
2db1e1d
Merge pull request #1065 from as-com/as-com-fix-log-ci
dcbw Feb 14, 2020
0fe8868
Merge pull request #1053 from moshe010/remove_unused
dcbw Feb 15, 2020
173e25e
Remove kube-proxy in KIND deployment
trozet Feb 17, 2020
3d5049c
Fix delete order for network policy
trozet Feb 17, 2020
925c6b0
Merge pull request #1067 from trozet/remove_kube_proxy
dcbw Feb 17, 2020
355c519
Merge pull request #1068 from trozet/fix_np_delete
dcbw Feb 17, 2020
3bbdf51
Merge pull request #1044 from trozet/dont_override_with_defaults
dcbw Feb 18, 2020
90a7829
config: don't let environment affect test results
danwinship Feb 18, 2020
3cad53e
ovn: fix use of other-config:subnet vs other-config:ipv6_prefix
danwinship Feb 17, 2020
433867b
add the ability for ovn-kubernetes to not assin hostsubnet by label
JacobTanenbaum Feb 5, 2020
ad583b0
Merge pull request #1048 from JacobTanenbaum/ignore-node-label
dcbw Feb 18, 2020
4e846e6
Merge pull request #1071 from danwinship/make-test-env-vars
dcbw Feb 18, 2020
6911561
config: OVN only supports /64 hostsubnets for IPv6
danwinship Feb 18, 2020
9fac27e
Merge pull request #1072 from danwinship/fix-subnet-ipv6-prefix
dcbw Feb 18, 2020
49d8436
kube: add node annotator
dcbw Sep 26, 2019
4de927d
util: add SetExecWithoutOVS() variant
dcbw Oct 9, 2019
edf871d
vendor: update k8s.io/client-go to get fix for kubernetes #78743
dcbw Dec 21, 2019
dfd7e69
hybrid-overlay: framework for extending the OVN network via VXLAN tun…
dcbw Oct 11, 2019
f0f3421
hybrid-overlay: initial HNS-based Windows node implementation
JocelynBerrendonner Aug 20, 2019
7de2075
ovnkube.sh: add support for extensions
dcbw Sep 24, 2019
af32c8c
Merge remote-tracking branch 'dcbwupstream/extensions'
dcbw Feb 18, 2020
d2e186b
hack: bump master loglevel to 5 for debugging
dcbw Feb 19, 2020
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/workflow-templates/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -83,15 +83,15 @@ jobs:
kubetest --ginkgo-parallel=2 --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--ginkgo.focus=\[sig-network\].*Conformance --disable-log-dump=false --ginkgo.skip=\[Serial\]'
kubetest --ginkgo-parallel=2 --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--ginkgo.focus=\[sig-network\].*NetworkPolicy --disable-log-dump=false --ginkgo.skip=ingress\saccess|multiple\segress\spolicies|allow\segress\saccess|\[Serial\]'

- name: Export logs on failure
- name: Export logs
if: always()
run: |
mkdir -p /tmp/kind/logs
kind export logs --name ${KIND_CLUSTER_NAME} /tmp/kind/logs
working-directory: ${{ env.WORKDIR }}
if: failure()
- name: Upload logs
if: always()
uses: actions/upload-artifact@v1
if: failure()
with:
name: kind-logs
path: /tmp/kind/logs
6 changes: 3 additions & 3 deletions .github/workflows/test_generated.yml
Original file line number Diff line number Diff line change
Expand Up @@ -90,14 +90,14 @@ jobs:
export NODE_NAMES=${MASTER_NAME}
kubetest --ginkgo-parallel=2 --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--ginkgo.focus=\[sig-network\].*Conformance --disable-log-dump=false --ginkgo.skip=\[Serial\]'
kubetest --ginkgo-parallel=2 --provider=local --deployment=kind --kind-cluster-name=${KIND_CLUSTER_NAME} --test --test_args='--ginkgo.focus=\[sig-network\].*NetworkPolicy --disable-log-dump=false --ginkgo.skip=ingress\saccess|multiple\segress\spolicies|allow\segress\saccess|\[Serial\]'
- name: Export logs on failure
- name: Export logs
if: always()
run: "mkdir -p /tmp/kind/logs \nkind export logs --name ${KIND_CLUSTER_NAME}
/tmp/kind/logs\n"
working-directory: "${{ env.WORKDIR }}"
if: failure()
- name: Upload logs
if: always()
uses: actions/upload-artifact@v1
if: failure()
with:
name: kind-logs
path: "/tmp/kind/logs"
1 change: 1 addition & 0 deletions contrib/kind.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ kubectl create -f ovnkube-db.yaml
kubectl create -f ovnkube-master.yaml
kubectl create -f ovnkube-node.yaml
popd
kubectl -n kube-system delete ds kube-proxy
kind get clusters
kind get nodes --name ${CLUSTER_NAME}

Expand Down
1 change: 1 addition & 0 deletions dist/images/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ COPY ovn-k8s-cni-overlay /usr/libexec/cni/ovn-k8s-cni-overlay
# variables to direct operation and configure ovn
COPY ovnkube.sh /root/
COPY ovn-debug.sh /root/
COPY ovndb-raft-functions /root/
# override the rpm's ovn_k8s.conf with this local copy
COPY ovn_k8s.conf /etc/openvswitch/ovn_k8s.conf

Expand Down
13 changes: 4 additions & 9 deletions dist/images/Dockerfile.fedora
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ RUN INSTALL_PKGS=" \
dnf install --refresh -y --setopt=tsflags=nodocs $INSTALL_PKGS && \
dnf clean all && rm -rf /var/cache/dnf/*

# REMOVE ME when ovn is fixed
RUN dnf -y downgrade ovn

RUN mkdir -p /var/run/openvswitch && \
mkdir -p /usr/libexec/cni/

Expand All @@ -38,15 +41,7 @@ COPY git_info /root
# variables to direct operation and configure ovn
COPY ovnkube.sh /root/
COPY ovn-debug.sh /root/

# iptables wrappers
COPY ./iptables-scripts/iptables /usr/sbin/
COPY ./iptables-scripts/iptables-save /usr/sbin/
COPY ./iptables-scripts/iptables-restore /usr/sbin/
COPY ./iptables-scripts/ip6tables /usr/sbin/
COPY ./iptables-scripts/ip6tables-save /usr/sbin/
COPY ./iptables-scripts/ip6tables-restore /usr/sbin/
COPY ./iptables-scripts/iptables /usr/sbin/
COPY ovndb-raft-functions /root/

LABEL io.k8s.display-name="ovn-kubernetes" \
io.k8s.description="This is a Kubernetes network plugin that provides an overlay network using OVN." \
Expand Down
1 change: 1 addition & 0 deletions dist/images/Dockerfile.ubuntu
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ COPY ovn-k8s-cni-overlay /usr/libexec/cni/ovn-k8s-cni-overlay
# variables to direct operation and configure ovn
COPY ovnkube.sh /root/
COPY ovn-debug.sh /root/
COPY ovndb-raft-functions /root/
# override the pkg's ovn_k8s.conf with this local copy
COPY ovn_k8s.conf /etc/openvswitch/ovn_k8s.conf

Expand Down
5 changes: 5 additions & 0 deletions dist/images/daemonset.sh
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,8 @@ ovn_db_replicas=${OVN_DB_REPLICAS:-3}
echo "ovn_db_replicas: ${ovn_db_replicas}"
ovn_db_vip=${OVN_DB_VIP}
echo "ovn_db_vip: ${ovn_db_vip}"
ovn_db_minAvailable=$(((${ovn_db_replicas} + 1) / 2))
echo "ovn_db_minAvailable: ${ovn_db_minAvailable}"

ovn_image=${image} ovn_image_pull_policy=${policy} kind=${KIND} ovn_gateway_mode=${ovn_gateway_mode} \
ovn_gateway_opts=${ovn_gateway_opts} j2 ../templates/ovnkube-node.yaml.j2 -o ../yaml/ovnkube-node.yaml
Expand All @@ -133,6 +135,9 @@ ovn_image=${image} ovn_image_pull_policy=${policy} j2 ../templates/ovnkube-db.ya
ovn_db_vip_image=${ovn_db_vip_image} ovn_image_pull_policy=${policy} ovn_db_replicas=${ovn_db_replicas} \
ovn_db_vip=${ovn_db_vip} j2 ../templates/ovnkube-db-vip.yaml.j2 -o ../yaml/ovnkube-db-vip.yaml

ovn_image=${image} ovn_image_pull_policy=${policy} ovn_db_replicas=${ovn_db_replicas} \
ovn_db_minAvailable=${ovn_db_minAvailable} j2 ../templates/ovnkube-db-raft.yaml.j2 > ../yaml/ovnkube-db-raft.yaml

# ovn-setup.yaml
# net_cidr=10.128.0.0/14/23
# svc_cidr=172.30.0.0/16
Expand Down
135 changes: 135 additions & 0 deletions dist/images/ovndb-raft-functions
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
#!/bin/bash
#set -euo pipefail

verify-ovsdb-raft () {
check_ovn_daemonset_version "3"

replicas=$(kubectl --server=${K8S_APISERVER} --token=${k8s_token} --certificate-authority=${K8S_CACERT} \
get statefulset -n ovn-kubernetes ovnkube-db -o=jsonpath='{.spec.replicas}')
if [[ ${replicas} -lt 3 || $((${replicas} % 2)) -eq 0 ]]; then
echo "at least 3 nodes need to be configured, and it must be odd number of nodes"
exit 1
fi
}

# OVN DB must be up in the first DB node
# This waits for ovnkube-db-0 POD to come up
ready_to_join_cluster () {
# See if ep is available ...
db=${1}
port=${2}

init_ip="$(kubectl --server=${K8S_APISERVER} --token=${k8s_token} --certificate-authority=${K8S_CACERT} \
get pod -n ovn-kubernetes ovnkube-db-0 -o=jsonpath='{.status.podIP}')"
if [[ $? != 0 ]]; then
return 1
fi
ovsdb-client list-dbs tcp:${init_ip}:${port} > /dev/null 2>&1
if [[ $? != 0 ]] ; then
return 1
fi
return 0
}

check_ovnkube_db_ep () {
local dbaddr=${1}
local dbport=${2}

# TODO: Right now only checks for NB ovsdb instances
echo "======= checking ${dbaddr}:${dbport} OVSDB instance ==============="
ovsdb-client list-dbs tcp:${dbaddr}:${dbport} > /dev/null 2>&1
if [[ $? != 0 ]] ; then
return 1
fi
return 0
}

check_and_apply_ovnkube_db_ep () {
local port=${1}

# Get IPs of all ovnkube-db PODs
ips=()
for (( i=0; i<${replicas}; i++ )); do
ip=$(kubectl --server=${K8S_APISERVER} --token=${k8s_token} --certificate-authority=${K8S_CACERT} \
get pod -n ovn-kubernetes ovnkube-db-${i} -o=jsonpath='{.status.podIP}' 2>/dev/null)
if [[ ${ip} == "" ]]; then
break
fi
ips+=(${ip})
done

if [[ ${i} -eq ${replicas} ]]; then
# Number of POD IPs is same as number of statefulset replicas. Now, if the number of ovnkube-db endpoints
# is 0, then we are applying the endpoint for the first time. So, we need to make sure that each of the
# pod IP responds to the `ovsdb-client list-dbs` call before we set the endpoint. If they don't, retry several
# times and then give up.

# Get the current set of ovnkube-db endpoints, if any
IFS=" " read -a old_ips <<< "$(kubectl --server=${K8S_APISERVER} --token=${k8s_token} --certificate-authority=${K8S_CACERT} \
get ep -n ovn-kubernetes ovnkube-db -o=jsonpath='{range .subsets[0].addresses[*]}{.ip}{" "}' 2>/dev/null)"
if [[ ${#old_ips[@]} -ne 0 ]]; then
return
fi

for ip in ${ips[@]} ; do
wait_for_event attempts=10 check_ovnkube_db_ep ${ip} ${port}
done
set_ovnkube_db_ep ${ips[@]}
else
# ideally shouldn't happen
echo "Not all the pods in the statefulset are up. Expecting ${replicas} pods, but found ${i} pods."
echo "Exiting...."
exit 10
fi
}

# v3 - create nb_ovsdb/sb_ovsdb cluster in a separate container
ovsdb-raft () {
trap 'kill $(jobs -p); exit 0' TERM

local db=${1}
local port=${2}

ovn_db_pidfile=${OVN_RUNDIR}/ovn${db}_db.pid
eval ovn_log_db=\$ovn_log_${db}
ovn_db_file=${OVN_ETCDIR}/ovn${db}.db

rm -f ${ovn_db_pidfile}
verify-ovsdb-raft
local_ip=$(getent ahostsv4 $(hostname) | grep -v "^127\." | head -1 | awk '{ print $1 }')
echo "=============== run ${db}-ovsdb-raft pod ${POD_NAME} =========="

if [[ "${POD_NAME}" == "ovnkube-db-0" ]]; then
run_as_ovs_user_if_needed \
${OVNCTL_PATH} run_${db}_ovsdb --no-monitor \
--db-${db}-create-insecure-remote=yes --db-${db}-cluster-local-addr=${local_ip} \
--ovn-${db}-log="${ovn_log_db}" &
else
# join the remote cluster node if the DB is not created
if [[ ! -e ${ovn_db_file} ]] || ovsdb-tool db-is-standalone ${ovn_db_file} ; then
wait_for_event ready_to_join_cluster ${db} ${port}
fi
run_as_ovs_user_if_needed \
${OVNCTL_PATH} run_${db}_ovsdb --no-monitor \
--db-${db}-create-insecure-remote=yes --db-${db}-cluster-local-addr=${local_ip} \
--db-${db}-cluster-remote-addr=${init_ip} \
--ovn-${db}-log="${ovn_log_db}" &
fi

wait_for_event process_ready ovn${db}_db
echo "=============== ${db}-ovsdb-raft ========== RUNNING"
sleep 3

last_node_index=$(expr ${replicas} - 1)
# Create endpoints only if all ovnkube-db pods have started and are running. We do this
# from the last pod of the statefulset.
if [[ ${db} == "nb" && "${POD_NAME}" == "ovnkube-db-"${last_node_index} ]]; then
check_and_apply_ovnkube_db_ep ${port}
fi

tail --follow=name ${OVN_LOGDIR}/ovsdb-server-${db}.log &
ovn_tail_pid=$!

process_healthy ovn${db}_db ${ovn_tail_pid}
echo "=============== run ${db}_ovsdb-raft ========== terminated"
}
Loading