Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion FAQ.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ need to be made aware of it by following the instructions below.
OVN CNI requires several TCP and UDP ports to be opened on each of the node
that is part of the K8s cluster.

1. The node on which ovnkube-master or ovnkube-network-controller-manager runs, open following ports:
1. The node on which ovnkube-master or ovnkube-controller runs, open following ports:
```text
TCP:
port 9409 (prometheus port to export ovnkube-master metrics)
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ kubectl create -f $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml/k8s
kubectl create -f $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml/ovnkube-db.yaml

# Run ovnkube-master deployment
# To run ovnkube-master deployment with both cluster manager and network controller manager as one container)
# To run ovnkube-master deployment with both cluster manager and ovnkube controller as one container)
kubectl create -f $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml/ovnkube-master.yaml

# Run ovnkube daemonset for nodes
Expand Down
34 changes: 17 additions & 17 deletions dist/images/ovnkube.sh
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,7 @@ process_healthy() {
check_health() {
ctl_file=""
case ${1} in
"ovnkube" | "ovnkube-master" | "ovn-dbchecker" | "ovnkube-cluster-manager" | "ovnkube-network-controller-manager")
"ovnkube" | "ovnkube-master" | "ovn-dbchecker" | "ovnkube-cluster-manager" | "ovnkube-controller")
# just check for presence of pid
;;
"ovnnb_db" | "ovnsb_db")
Expand Down Expand Up @@ -987,7 +987,7 @@ run-ovn-northd() {
exit 8
}

# v3 - run ovnkube --master (both cluster-manager and network-controller-manager)
# v3 - run ovnkube --master (both cluster-manager and ovnkube-controller)
ovn-master() {
trap 'kill $(jobs -p); exit 0' TERM
check_ovn_daemonset_version "3"
Expand Down Expand Up @@ -1178,21 +1178,21 @@ ovn-master() {
exit 9
}

# v3 - run ovnkube --network-controller-manager
ovn-network-controller-manager() {
# v3 - run ovnkube --ovnkube-controller
ovnkube-controller() {
trap 'kill $(jobs -p); exit 0' TERM
check_ovn_daemonset_version "3"
rm -f ${OVN_RUNDIR}/ovnkube-network-controller-manager.pid
rm -f ${OVN_RUNDIR}/ovnkube-controller.pid

echo "=============== ovn-network-controller-manager (wait for ready_to_start_node) =========="
echo "=============== ovnkube-controller (wait for ready_to_start_node) =========="
wait_for_event ready_to_start_node
echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb}"

# wait for northd to start
wait_for_event process_ready ovn-northd

# wait for ovs-servers to start since ovn-master sets some fields in OVS DB
echo "=============== ovn-network-controller-manager - (wait for ovs)"
echo "=============== ovnkube-controller - (wait for ovs)"
wait_for_event ovs_ready

hybrid_overlay_flags=
Expand Down Expand Up @@ -1316,7 +1316,7 @@ ovn-network-controller-manager() {
echo "ovnkube_config_duration_enable_flag: ${ovnkube_config_duration_enable_flag}"

ovn_zone=$(get_node_zone)
echo "ovn-network-controller-manager's configured zone is ${ovn_zone}"
echo "ovnkube-controller's configured zone is ${ovn_zone}"

ovn_dbs=""
if [[ $ovn_nbdb != "local" ]]; then
Expand All @@ -1332,9 +1332,9 @@ ovn-network-controller-manager() {
fi
echo "ovnkube_enable_interconnect_flag: ${ovnkube_enable_interconnect_flag}"

echo "=============== ovn-network-controller-manager ========== MASTER ONLY"
echo "=============== ovnkube-controller ========== MASTER ONLY"
/usr/bin/ovnkube \
--init-network-controller-manager ${K8S_NODE} \
--init-ovnkube-controller ${K8S_NODE} \
--cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \
${ovn_dbs} \
--gateway-mode=${ovn_gateway_mode} \
Expand All @@ -1347,8 +1347,8 @@ ovn-network-controller-manager() {
${empty_lb_events_flag} \
${ovn_v4_join_subnet_opt} \
${ovn_v6_join_subnet_opt} \
--pidfile ${OVN_RUNDIR}/ovnkube-network-controller-manager.pid \
--logfile /var/log/ovn-kubernetes/ovnkube-network-controller-manager.log \
--pidfile ${OVN_RUNDIR}/ovnkube-controller.pid \
--logfile /var/log/ovn-kubernetes/ovnkube-controller.log \
${ovn_master_ssl_opts} \
${ovnkube_metrics_tls_opts} \
${multicast_enabled_flag} \
Expand All @@ -1365,10 +1365,10 @@ ovn-network-controller-manager() {
--metrics-bind-address ${ovnkube_master_metrics_bind_address} \
--host-network-namespace ${ovn_host_network_namespace} &

echo "=============== ovn-network-controller-manager ========== running"
wait_for_event attempts=3 process_ready ovnkube-network-controller-manager
echo "=============== ovnkube-controller ========== running"
wait_for_event attempts=3 process_ready ovnkube-controller

process_healthy ovnkube-network-controller-manager
process_healthy ovnkube-controller
exit 9
}

Expand Down Expand Up @@ -1876,8 +1876,8 @@ case ${cmd} in
"ovn-master") # pod ovnkube-master container ovnkube-master
ovn-master
;;
"ovn-network-controller-manager") # pod ovnkube-master container ovnkube-network-controller-manager
ovn-network-controller-manager
"ovnkube-controller") # pod ovnkube-master container ovnkube-controller
ovnkube-controller
;;
"ovn-cluster-manager") # pod ovnkube-master container ovnkube-cluster-manager
ovn-cluster-manager
Expand Down
2 changes: 1 addition & 1 deletion dist/templates/ovnkube-monitor.yaml.j2
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# define ServiceMontior and Service resources for ovnkube-cluster-manager,
# ovnkube-master (or ovnkube-network-controller-manager), ovnkube-node and ovnkube-db (required for prometheus monitoring)
# ovnkube-master (or ovnkube-controller), ovnkube-node and ovnkube-db (required for prometheus monitoring)

apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
Expand Down
2 changes: 1 addition & 1 deletion dist/templates/ovnkube-single-node-zone.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ spec:
image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"

command: ["/root/ovnkube.sh", "ovn-network-controller-manager"]
command: ["/root/ovnkube.sh", "ovnkube-controller"]

securityContext:
runAsUser: 0
Expand Down
4 changes: 2 additions & 2 deletions dist/templates/ovnkube-zone-controller.yaml.j2
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
# ovnkube-zone-controller
# daemonset version 3
# starts zone controller daemons - ovn dbs, ovn-northd and ovnkube-network-controller-manager containers
# starts zone controller daemons - ovn dbs, ovn-northd and ovnkube-controller containers
kind: DaemonSet
apiVersion: apps/v1
metadata:
Expand Down Expand Up @@ -257,7 +257,7 @@ spec:
image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"

command: ["/root/ovnkube.sh", "ovn-network-controller-manager"]
command: ["/root/ovnkube.sh", "ovnkube-controller"]

securityContext:
runAsUser: 0
Expand Down
14 changes: 7 additions & 7 deletions docs/ha.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ sudo ovs-appctl -t /var/run/openvswitch/ovnsb_db.ctl \

## ovnkube master HA setup

ovnkube master has 2 main components - cluster-manager and network-controller-manager.
ovnkube master has 2 main components - cluster-manager and ovnkube-controller.

Starting ovnkube with '-init-master', runs both the components. It is also possible
to run these components individually by starting 2 ovnkube's one with '-init-cluster-manager'
and the other with '-init-network-controller-manager'.
and the other with '-init-ovnkube-controller'.

On the master nodes, we can either
* start ovnkube with '-init-master'
Expand All @@ -105,25 +105,25 @@ nohup sudo ovnkube -k8s-kubeconfig kubeconfig.yaml \
-nb-address="${ovn_nb}" \
-sb-address="${ovn_sb}" 2>&1 &

* start 'ovnkube -init-cluster-manager' and 'ovnkube -init-network-controller-manager'
* start 'ovnkube -init-cluster-manager' and 'ovnkube -init-ovnkube-controller'
This should be a deployment with these 2 as containers

Eg.


ovnkube master supports running in 3 modes.
init-master mode, init-cluster-manager mode or init-network-controller-manager
init-master mode, init-cluster-manager mode or init-ovnkube-controller
mode. If ovnkube is run with "-init-master" mode, then there is
no need to run the other modes because master mode enables both cluster-manager
and network-controller-manager. If the user desires to run cluster-manager
and network-controller-manager separately, then it is possible to do
and ovnkube-controller. If the user desires to run cluster-manager
and ovnkube-controller separately, then it is possible to do
so by running

nohup sudo ovnkube -k8s-kubeconfig kubeconfig.yaml \
-loglevel=4 \
-k8s-apiserver="http://$K8S_APISERVER_IP:8080" \
-logfile="/var/log/openvswitch/ovnkube.log" \
-init-network-controller-manager="$NODENAME" -cluster-subnets="$CLUSTER_IP_SUBNET" \
-init-ovnkube-controller="$NODENAME" -cluster-subnets="$CLUSTER_IP_SUBNET" \
-init-node="$NODENAME" \
-k8s-service-cidr="$SERVICE_IP_SUBNET" \
-k8s-token="$TOKEN" \
Expand Down
16 changes: 8 additions & 8 deletions go-controller/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,11 @@ Usage:
-cluster-subnets string
cluster wide IP subnet to use (default: 11.11.0.0/16)
-init-master string
initialize master which enables both cluster manager (allocates node subnets) and network controller manager (which watches pods/nodes/services/policies and creates OVN db resources), requires the hostname as argument
initialize master which enables both cluster manager (allocates node subnets) and ovnkube controller (which watches pods/nodes/services/policies and creates OVN db resources), requires the hostname as argument
-init-cluster-manager string
initialize cluster manager that watches nodes (allocates subnet for each node from the cluster-subnets), requires the hostname as argument and doesn't connect to the OVN dbs.
-init-network-controller-manager string
initialize network-controller-manager (which watches pods/nodes/services/policies and create OVN db resources), requires the hostname as argument.
-init-ovnkube-controller string
initialize ovnkube-controller (which watches pods/nodes/services/policies and create OVN db resources), requires the hostname as argument.
-init-node string
initialize node, requires the name that node is registered with in kubernetes cluster
-cleanup-node string
Expand Down Expand Up @@ -154,7 +154,7 @@ server-cacert=path/to/server-ca.crt

## Example

#### Initialize the master (both cluster manager and network controller manager)
#### Initialize the master (both cluster manager and ovnkube controller)

```
ovnkube --init-master <master-host-name> \
Expand All @@ -165,14 +165,14 @@ ovnkube --init-master <master-host-name> \
```

The aforementioned master ovnkube controller will enable both the cluster manager (which watches nodes and allocates node subnets)
and network controller manager which initialize the central master logical router and establish the watcher loops for the following:
and ovnkube controller which initialize the central master logical router and establish the watcher loops for the following:
- nodes: as new nodes are born and init-node is called, the logical switches will be created automatically by giving out IPAM for the respective nodes
- pods: as new pods are born, allocate the logical port with dynamic addressing from the switch it belongs to
- services/endpoints: as new endpoints of services are born, create/update the logical load balancer on all logical switches
- network policies and a few other k8s resources


#### Initialize the cluster manager and network controller manager separately
#### Initialize the cluster manager and ovnkube controller separately

```
ovnkube --init-cluster-manager <master-host-name> \
Expand All @@ -186,14 +186,14 @@ The aforementioned ovnkube cluster manager will establish the watcher loops for
- nodes: as new nodes are born and init-node is called, the subnet IPAM is allocated for the respective nodes

```
ovnkube --init-network-controller-manager <master-host-name> \
ovnkube --init-ovnkube-controller <master-host-name> \
--k8s-cacert <path to the cacert file> \
--k8s-token <token string for authentication with kube apiserver> \
--k8s-apiserver <url to the kube apiserver e.g. https://10.11.12.13.8443> \
--cluster-subnets <cidr representing the global pod network e.g. 192.168.0.0/16>
```

The aforementioned ovnkube network controller manager will initialize the central master logical router and establish the watcher loops for the following:
The aforementioned ovnkube controller will initialize the central master logical router and establish the watcher loops for the following:
- nodes: as new nodes are born and init-node is called, the logical switches will be created automatically by giving out IPAM for the respective nodes
- pods: as new pods are born, allocate the logical port with dynamic addressing from the switch it belongs to
- services/endpoints: as new endpoints of services are born, create/update the logical load balancer on all logical switches
Expand Down
Loading