diff --git a/contrib/kind.sh b/contrib/kind.sh index e30be610bc..3e2afd97aa 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -579,6 +579,7 @@ create_ovn_kube_manifests() { --ovn-loglevel-controller="${OVN_LOG_LEVEL_CONTROLLER}" \ --egress-ip-enable=true \ --egress-firewall-enable=true \ + --egress-qos-enable=true \ --v4-join-subnet="${JOIN_SUBNET_IPV4}" \ --v6-join-subnet="${JOIN_SUBNET_IPV6}" \ --ex-gw-network-interface="${OVN_EX_GW_NETWORK_INTERFACE}" @@ -606,6 +607,7 @@ install_ovn() { run_kubectl apply -f k8s.ovn.org_egressfirewalls.yaml run_kubectl apply -f k8s.ovn.org_egressips.yaml + run_kubectl apply -f k8s.ovn.org_egressqoses.yaml run_kubectl apply -f ovn-setup.yaml MASTER_NODES=$(kind get nodes --name "${KIND_CLUSTER_NAME}" | sort | head -n "${KIND_NUM_MASTER}") # We want OVN HA not Kubernetes HA diff --git a/dist/images/daemonset.sh b/dist/images/daemonset.sh index 3a0424f08a..1703f8833e 100755 --- a/dist/images/daemonset.sh +++ b/dist/images/daemonset.sh @@ -60,6 +60,7 @@ OVN_EMPTY_LB_EVENTS="" OVN_MULTICAST_ENABLE="" OVN_EGRESSIP_ENABLE= OVN_EGRESSFIREWALL_ENABLE= +OVN_EGRESSQOS_ENABLE= OVN_DISABLE_OVN_IFACE_ID_VER="false" OVN_V4_JOIN_SUBNET="" OVN_V6_JOIN_SUBNET="" @@ -204,6 +205,9 @@ while [ "$1" != "" ]; do --egress-firewall-enable) OVN_EGRESSFIREWALL_ENABLE=$VALUE ;; + --egress-qos-enable) + OVN_EGRESSQOS_ENABLE=$VALUE + ;; --v4-join-subnet) OVN_V4_JOIN_SUBNET=$VALUE ;; @@ -302,6 +306,8 @@ ovn_egress_ip_enable=${OVN_EGRESSIP_ENABLE} echo "ovn_egress_ip_enable: ${ovn_egress_ip_enable}" ovn_egress_firewall_enable=${OVN_EGRESSFIREWALL_ENABLE} echo "ovn_egress_firewall_enable: ${ovn_egress_firewall_enable}" +ovn_egress_qos_enable=${OVN_EGRESSQOS_ENABLE} +echo "ovn_egress_qos_enable: ${ovn_egress_qos_enable}" ovn_disable_ovn_iface_id_ver=${OVN_DISABLE_OVN_IFACE_ID_VER} echo "ovn_disable_ovn_iface_id_ver: ${ovn_disable_ovn_iface_id_ver}" ovn_hybrid_overlay_net_cidr=${OVN_HYBRID_OVERLAY_NET_CIDR} @@ -448,6 +454,7 @@ ovn_image=${image} \ ovn_multicast_enable=${ovn_multicast_enable} \ ovn_egress_ip_enable=${ovn_egress_ip_enable} \ ovn_egress_firewall_enable=${ovn_egress_firewall_enable} \ + ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_ssl_en=${ovn_ssl_en} \ ovn_master_count=${ovn_master_count} \ ovn_gateway_mode=${ovn_gateway_mode} \ @@ -506,5 +513,6 @@ net_cidr=${net_cidr} svc_cidr=${svc_cidr} \ cp ../templates/ovnkube-monitor.yaml.j2 ${output_dir}/ovnkube-monitor.yaml cp ../templates/k8s.ovn.org_egressfirewalls.yaml.j2 ${output_dir}/k8s.ovn.org_egressfirewalls.yaml cp ../templates/k8s.ovn.org_egressips.yaml.j2 ${output_dir}/k8s.ovn.org_egressips.yaml +cp ../templates/k8s.ovn.org_egressqoses.yaml.j2 ${output_dir}/k8s.ovn.org_egressqoses.yaml exit 0 diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 4deeff0a34..7f54ce76e7 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -73,6 +73,7 @@ fi # OVN_LFLOW_CACHE_LIMIT_KB - maximum size of the logical flow cache of ovn-controller # OVN_EGRESSIP_ENABLE - enable egress IP for ovn-kubernetes # OVN_EGRESSFIREWALL_ENABLE - enable egressFirewall for ovn-kubernetes +# OVN_EGRESSQOS_ENABLE - enable egress QoS for ovn-kubernetes # OVN_UNPRIVILEGED_MODE - execute CNI ovs/netns commands from host (default no) # OVNKUBE_NODE_MODE - ovnkube node mode of operation, one of: full, dpu, dpu-host (default: full) # OVNKUBE_NODE_MGMT_PORT_NETDEV - ovnkube node management port netdev. valid when ovnkube node mode is: dpu, dpu-host @@ -209,6 +210,8 @@ ovn_multicast_enable=${OVN_MULTICAST_ENABLE:-} ovn_egressip_enable=${OVN_EGRESSIP_ENABLE:-false} #OVN_EGRESSFIREWALL_ENABLE - enable egressFirewall for ovn-kubernetes ovn_egressfirewall_enable=${OVN_EGRESSFIREWALL_ENABLE:-false} +#OVN_EGRESSQOS_ENABLE - enable egress QoS for ovn-kubernetes +ovn_egressqos_enable=${OVN_EGRESSQOS_ENABLE:-false} #OVN_DISABLE_OVN_IFACE_ID_VER - disable usage of the OVN iface-id-ver option ovn_disable_ovn_iface_id_ver=${OVN_DISABLE_OVN_IFACE_ID_VER:-false} ovn_acl_logging_rate_limit=${OVN_ACL_LOGGING_RATE_LIMIT:-"20"} @@ -946,8 +949,19 @@ ovn-master() { egressfirewall_enabled_flag="--enable-egress-firewall" fi echo "egressfirewall_enabled_flag=${egressfirewall_enabled_flag}" + egressqos_enabled_flag= + if [[ ${ovn_egressqos_enable} == "true" ]]; then + egressqos_enabled_flag="--enable-egress-qos" + fi ovnkube_master_metrics_bind_address="${metrics_endpoint_ip}:9409" + local ovnkube_metrics_tls_opts="" + if [[ ${OVNKUBE_METRICS_PK} != "" && ${OVNKUBE_METRICS_CERT} != "" ]]; then + ovnkube_metrics_tls_opts=" + --node-server-privkey ${OVNKUBE_METRICS_PK} + --node-server-cert ${OVNKUBE_METRICS_CERT} + " + fi echo "=============== ovn-master ========== MASTER ONLY" /usr/bin/ovnkube \ @@ -967,10 +981,12 @@ ovn-master() { --pidfile ${OVN_RUNDIR}/ovnkube-master.pid \ --logfile /var/log/ovn-kubernetes/ovnkube-master.log \ ${ovn_master_ssl_opts} \ + ${ovnkube_metrics_tls_opts} \ ${multicast_enabled_flag} \ ${ovn_acl_logging_rate_limit_flag} \ ${egressip_enabled_flag} \ ${egressfirewall_enabled_flag} \ + ${egressqos_enabled_flag} \ --metrics-bind-address ${ovnkube_master_metrics_bind_address} \ --host-network-namespace ${ovn_host_network_namespace} & @@ -1187,6 +1203,14 @@ ovn-node() { ovn_metrics_bind_address="${metrics_endpoint_ip}:9476" ovnkube_node_metrics_bind_address="${metrics_endpoint_ip}:9410" + local ovnkube_metrics_tls_opts="" + if [[ ${OVNKUBE_METRICS_PK} != "" && ${OVNKUBE_METRICS_CERT} != "" ]]; then + ovnkube_metrics_tls_opts=" + --node-server-privkey ${OVNKUBE_METRICS_PK} + --node-server-cert ${OVNKUBE_METRICS_CERT} + " + fi + echo "=============== ovn-node --init-node" /usr/bin/ovnkube --init-node ${K8S_NODE} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ @@ -1208,6 +1232,7 @@ ovn-node() { --pidfile ${OVN_RUNDIR}/ovnkube.pid \ --logfile /var/log/ovn-kubernetes/ovnkube.log \ ${ovn_node_ssl_opts} \ + ${ovnkube_metrics_tls_opts} \ --inactivity-probe=${ovn_remote_probe_interval} \ ${monitor_all} \ ${enable_lflow_cache} \ diff --git a/dist/templates/k8s.ovn.org_egressqoses.yaml.j2 b/dist/templates/k8s.ovn.org_egressqoses.yaml.j2 new file mode 100644 index 0000000000..beb0e3f762 --- /dev/null +++ b/dist/templates/k8s.ovn.org_egressqoses.yaml.j2 @@ -0,0 +1,128 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: egressqoses.k8s.ovn.org +spec: + group: k8s.ovn.org + names: + kind: EgressQoS + listKind: EgressQoSList + plural: egressqoses + singular: egressqos + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: EgressQoS is a CRD that allows the user to define a DSCP value + for pods egress traffic on its namespace to specified CIDRs. Traffic from + these pods will be checked against each EgressQoSRule in the namespace's + EgressQoS, and if there is a match the traffic is marked with the relevant + DSCP value. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + properties: + name: + type: string + pattern: ^default$ + spec: + description: EgressQoSSpec defines the desired state of EgressQoS + properties: + egress: + description: a collection of Egress QoS rule objects + items: + properties: + dscp: + description: DSCP marking value for matching pods' traffic. + maximum: 63 + minimum: 0 + type: integer + dstCIDR: + description: DstCIDR specifies the destination's CIDR. Only + traffic heading to this CIDR will be marked with the DSCP + value. This field is optional, and in case it is not set the + rule is applied to all egress traffic regardless of the destination. + type: string + podSelector: + description: PodSelector applies the QoS rule only to the pods + in the namespace whose label matches this definition. This + field is optional, and in case it is not set results in the + rule being applied to all pods in the namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + required: + - dscp + type: object + type: array + required: + - egress + type: object + status: + description: EgressQoSStatus defines the observed state of EgressQoS + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/dist/templates/ovn-setup.yaml.j2 b/dist/templates/ovn-setup.yaml.j2 index 31f897a849..68d838bd79 100644 --- a/dist/templates/ovn-setup.yaml.j2 +++ b/dist/templates/ovn-setup.yaml.j2 @@ -81,6 +81,7 @@ rules: resources: - egressfirewalls - egressips + - egressqoses verbs: ["list", "get", "watch", "update", "patch"] - apiGroups: - apiextensions.k8s.io diff --git a/dist/templates/ovnkube-master.yaml.j2 b/dist/templates/ovnkube-master.yaml.j2 index 91e419abac..fc3f6b84d3 100644 --- a/dist/templates/ovnkube-master.yaml.j2 +++ b/dist/templates/ovnkube-master.yaml.j2 @@ -195,6 +195,8 @@ spec: value: "{{ ovn_egress_ip_enable }}" - name: OVN_EGRESSFIREWALL_ENABLE value: "{{ ovn_egress_firewall_enable }}" + - name: OVN_EGRESSQOS_ENABLE + value: "{{ ovn_egress_qos_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR value: "{{ ovn_hybrid_overlay_net_cidr }}" - name: OVN_DISABLE_SNAT_MULTIPLE_GWS diff --git a/go-controller/cmd/ovnkube/ovnkube.go b/go-controller/cmd/ovnkube/ovnkube.go index b76f7adf43..4df5e884b9 100644 --- a/go-controller/cmd/ovnkube/ovnkube.go +++ b/go-controller/cmd/ovnkube/ovnkube.go @@ -279,7 +279,8 @@ func runOvnKube(ctx *cli.Context) error { // now that ovnkube master/node are running, lets expose the metrics HTTP endpoint if configured // start the prometheus server to serve OVN K8s Metrics (default master port: 9409, node port: 9410) if config.Metrics.BindAddress != "" { - metrics.StartMetricsServer(config.Metrics.BindAddress, config.Metrics.EnablePprof) + metrics.StartMetricsServer(config.Metrics.BindAddress, config.Metrics.EnablePprof, + config.Metrics.NodeServerCert, config.Metrics.NodeServerPrivKey) } // start the prometheus server to serve OVS and OVN Metrics (default port: 9476) @@ -289,7 +290,8 @@ func runOvnKube(ctx *cli.Context) error { metrics.RegisterOvsMetricsWithOvnMetrics() } metrics.RegisterOvnMetrics(ovnClientset.KubeClient, node) - metrics.StartOVNMetricsServer(config.Metrics.OVNMetricsBindAddress) + metrics.StartOVNMetricsServer(config.Metrics.OVNMetricsBindAddress, + config.Metrics.NodeServerCert, config.Metrics.NodeServerPrivKey) } // run until cancelled diff --git a/go-controller/hack/boilerplate.go.txt b/go-controller/hack/boilerplate.go.txt new file mode 100644 index 0000000000..767efde981 --- /dev/null +++ b/go-controller/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/go-controller/hack/update-codegen.sh b/go-controller/hack/update-codegen.sh index ca9fb45d5e..0b2162c4d8 100755 --- a/go-controller/hack/update-codegen.sh +++ b/go-controller/hack/update-codegen.sh @@ -40,6 +40,7 @@ fi for crd in ${crds}; do echo "Generating deepcopy funcs for $crd" deepcopy-gen \ + --go-header-file hack/boilerplate.go.txt \ --input-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ -O zz_generated.deepcopy \ --bounding-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd @@ -47,6 +48,7 @@ for crd in ${crds}; do echo "Generating clientset for $crd" client-gen \ + --go-header-file hack/boilerplate.go.txt \ --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" \ --input-base "" \ --input github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ @@ -55,12 +57,14 @@ for crd in ${crds}; do echo "Generating listers for $crd" lister-gen \ + --go-header-file hack/boilerplate.go.txt \ --input-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ --output-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/listers \ "$@" echo "Generating informers for $crd" informer-gen \ + --go-header-file hack/boilerplate.go.txt \ --input-dirs github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1 \ --versioned-clientset-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/clientset/versioned \ --listers-package github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/$crd/v1/apis/listers \ @@ -82,3 +86,8 @@ sed -i -e':begin;$!N;s/.*metadata:\n.*type: object/&\n properties:\n ## adding validation to objects only to the fields sed -i -e ':begin;$!N;s/ type: string\n.*type: object/&\n minProperties: 1\n maxProperties: 1/;P;D' \ _output/crds/k8s.ovn.org_egressfirewalls.yaml + +echo "Editing EgressQoS CRD" +## We desire that only EgressQoS with the name "default" are accepted by the apiserver. +sed -i -e':begin;$!N;s/.*metadata:\n.*type: object/&\n properties:\n name:\n type: string\n pattern: ^default$/;P;D' \ + _output/crds/k8s.ovn.org_egressqoses.yaml \ No newline at end of file diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index ecdbe3915d..4b2382676a 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -309,12 +309,15 @@ type MetricsConfig struct { OVNMetricsBindAddress string `gcfg:"ovn-metrics-bind-address"` ExportOVSMetrics bool `gcfg:"export-ovs-metrics"` EnablePprof bool `gcfg:"enable-pprof"` + NodeServerPrivKey string `gcfg:"node-server-privkey"` + NodeServerCert string `gcfg:"node-server-cert"` } // OVNKubernetesFeatureConfig holds OVN-Kubernetes feature enhancement config file parameters and command-line overrides type OVNKubernetesFeatureConfig struct { EnableEgressIP bool `gcfg:"enable-egress-ip"` EnableEgressFirewall bool `gcfg:"enable-egress-firewall"` + EnableEgressQoS bool `gcfg:"enable-egress-qos"` } // GatewayMode holds the node gateway mode @@ -834,6 +837,12 @@ var OVNK8sFeatureFlags = []cli.Flag{ Destination: &cliConfig.OVNKubernetesFeature.EnableEgressFirewall, Value: OVNKubernetesFeature.EnableEgressFirewall, }, + &cli.BoolFlag{ + Name: "enable-egress-qos", + Usage: "Configure to use EgressQoS CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableEgressQoS, + Value: OVNKubernetesFeature.EnableEgressQoS, + }, } // K8sFlags capture Kubernetes-related options @@ -944,6 +953,16 @@ var MetricsFlags = []cli.Flag{ Destination: &cliConfig.Metrics.EnablePprof, Value: Metrics.EnablePprof, }, + &cli.StringFlag{ + Name: "node-server-privkey", + Usage: "Private key that the OVN node K8s metrics server uses to serve metrics over TLS.", + Destination: &cliConfig.Metrics.NodeServerPrivKey, + }, + &cli.StringFlag{ + Name: "node-server-cert", + Usage: "Certificate that the OVN node K8s metrics server uses to serve metrics over TLS.", + Destination: &cliConfig.Metrics.NodeServerCert, + }, } // OvnNBFlags capture OVN northbound database options diff --git a/go-controller/pkg/config/config_test.go b/go-controller/pkg/config/config_test.go index 3505273715..8c82deeae5 100644 --- a/go-controller/pkg/config/config_test.go +++ b/go-controller/pkg/config/config_test.go @@ -154,6 +154,8 @@ bind-address=1.1.1.1:8080 ovn-metrics-bind-address=1.1.1.2:8081 export-ovs-metrics=true enable-pprof=true +node-server-privkey=/path/to/node-metrics-private.key +node-server-cert=/path/to/node-metrics.crt [logging] loglevel=5 @@ -278,6 +280,8 @@ var _ = Describe("Config Operations", func() { gomega.Expect(Kubernetes.APIServer).To(gomega.Equal(DefaultAPIServer)) gomega.Expect(Kubernetes.RawServiceCIDRs).To(gomega.Equal("172.16.1.0/24")) gomega.Expect(Kubernetes.RawNoHostSubnetNodes).To(gomega.Equal("")) + gomega.Expect(Metrics.NodeServerPrivKey).To(gomega.Equal("")) + gomega.Expect(Metrics.NodeServerCert).To(gomega.Equal("")) gomega.Expect(Default.ClusterSubnets).To(gomega.Equal([]CIDRNetworkEntry{ {ovntest.MustParseIPNet("10.128.0.0/14"), 23}, })) @@ -561,6 +565,8 @@ var _ = Describe("Config Operations", func() { gomega.Expect(Metrics.OVNMetricsBindAddress).To(gomega.Equal("1.1.1.2:8081")) gomega.Expect(Metrics.ExportOVSMetrics).To(gomega.Equal(true)) gomega.Expect(Metrics.EnablePprof).To(gomega.Equal(true)) + gomega.Expect(Metrics.NodeServerPrivKey).To(gomega.Equal("/path/to/node-metrics-private.key")) + gomega.Expect(Metrics.NodeServerCert).To(gomega.Equal("/path/to/node-metrics.crt")) gomega.Expect(OvnNorth.Scheme).To(gomega.Equal(OvnDBSchemeSSL)) gomega.Expect(OvnNorth.PrivKey).To(gomega.Equal("/path/to/nb-client-private.key")) @@ -640,6 +646,8 @@ var _ = Describe("Config Operations", func() { gomega.Expect(Metrics.OVNMetricsBindAddress).To(gomega.Equal("2.2.2.3:8081")) gomega.Expect(Metrics.ExportOVSMetrics).To(gomega.Equal(true)) gomega.Expect(Metrics.EnablePprof).To(gomega.Equal(true)) + gomega.Expect(Metrics.NodeServerPrivKey).To(gomega.Equal("/tls/nodeprivkey")) + gomega.Expect(Metrics.NodeServerCert).To(gomega.Equal("/tls/nodecert")) gomega.Expect(OvnNorth.Scheme).To(gomega.Equal(OvnDBSchemeSSL)) gomega.Expect(OvnNorth.PrivKey).To(gomega.Equal("/client/privkey")) @@ -699,6 +707,8 @@ var _ = Describe("Config Operations", func() { "-sb-client-cert=/client/cert2", "-sb-client-cacert=/client/cacert2", "-sb-cert-common-name=testsbcommonname", + "-node-server-privkey=/tls/nodeprivkey", + "-node-server-cert=/tls/nodecert", "-gateway-mode=shared", "-nodeport", "-gateway-v4-join-subnet=100.63.0.0/16", diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/clientset.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/clientset.go new file mode 100644 index 0000000000..ecf178e853 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/clientset.go @@ -0,0 +1,97 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + K8sV1() k8sv1.K8sV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + k8sV1 *k8sv1.K8sV1Client +} + +// K8sV1 retrieves the K8sV1Client +func (c *Clientset) K8sV1() k8sv1.K8sV1Interface { + return c.k8sV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.k8sV1, err = k8sv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.k8sV1 = k8sv1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.k8sV1 = k8sv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/doc.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/doc.go new file mode 100644 index 0000000000..41721ca52d --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..b042e5f0d8 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,85 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned" + k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1" + fakek8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// K8sV1 retrieves the K8sV1Client +func (c *Clientset) K8sV1() k8sv1.K8sV1Interface { + return &fakek8sv1.FakeK8sV1{Fake: &c.Fake} +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/doc.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..9b99e71670 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/register.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..a8c27c14a3 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + k8sv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/scheme/doc.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..7dc3756168 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/scheme/register.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..b1ff884313 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + k8sv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + k8sv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/doc.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/doc.go new file mode 100644 index 0000000000..3af5d054f1 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos.go new file mode 100644 index 0000000000..dcdd2b77b1 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos.go @@ -0,0 +1,195 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + scheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// EgressQoSesGetter has a method to return a EgressQoSInterface. +// A group's client should implement this interface. +type EgressQoSesGetter interface { + EgressQoSes(namespace string) EgressQoSInterface +} + +// EgressQoSInterface has methods to work with EgressQoS resources. +type EgressQoSInterface interface { + Create(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.CreateOptions) (*v1.EgressQoS, error) + Update(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (*v1.EgressQoS, error) + UpdateStatus(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (*v1.EgressQoS, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.EgressQoS, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.EgressQoSList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressQoS, err error) + EgressQoSExpansion +} + +// egressQoSes implements EgressQoSInterface +type egressQoSes struct { + client rest.Interface + ns string +} + +// newEgressQoSes returns a EgressQoSes +func newEgressQoSes(c *K8sV1Client, namespace string) *egressQoSes { + return &egressQoSes{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the egressQoS, and returns the corresponding egressQoS object, and an error if there is any. +func (c *egressQoSes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EgressQoS, err error) { + result = &v1.EgressQoS{} + err = c.client.Get(). + Namespace(c.ns). + Resource("egressqoses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EgressQoSes that match those selectors. +func (c *egressQoSes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EgressQoSList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.EgressQoSList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("egressqoses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested egressQoSes. +func (c *egressQoSes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("egressqoses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a egressQoS and creates it. Returns the server's representation of the egressQoS, and an error, if there is any. +func (c *egressQoSes) Create(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.CreateOptions) (result *v1.EgressQoS, err error) { + result = &v1.EgressQoS{} + err = c.client.Post(). + Namespace(c.ns). + Resource("egressqoses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(egressQoS). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a egressQoS and updates it. Returns the server's representation of the egressQoS, and an error, if there is any. +func (c *egressQoSes) Update(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (result *v1.EgressQoS, err error) { + result = &v1.EgressQoS{} + err = c.client.Put(). + Namespace(c.ns). + Resource("egressqoses"). + Name(egressQoS.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(egressQoS). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *egressQoSes) UpdateStatus(ctx context.Context, egressQoS *v1.EgressQoS, opts metav1.UpdateOptions) (result *v1.EgressQoS, err error) { + result = &v1.EgressQoS{} + err = c.client.Put(). + Namespace(c.ns). + Resource("egressqoses"). + Name(egressQoS.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(egressQoS). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the egressQoS and deletes it. Returns an error if one occurs. +func (c *egressQoSes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("egressqoses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *egressQoSes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("egressqoses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched egressQoS. +func (c *egressQoSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EgressQoS, err error) { + result = &v1.EgressQoS{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("egressqoses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos_client.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos_client.go new file mode 100644 index 0000000000..ddfc4afa51 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/egressqos_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type K8sV1Interface interface { + RESTClient() rest.Interface + EgressQoSesGetter +} + +// K8sV1Client is used to interact with features provided by the k8s.ovn.org group. +type K8sV1Client struct { + restClient rest.Interface +} + +func (c *K8sV1Client) EgressQoSes(namespace string) EgressQoSInterface { + return newEgressQoSes(c, namespace) +} + +// NewForConfig creates a new K8sV1Client for the given config. +func NewForConfig(c *rest.Config) (*K8sV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &K8sV1Client{client}, nil +} + +// NewForConfigOrDie creates a new K8sV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *K8sV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new K8sV1Client for the given RESTClient. +func New(c rest.Interface) *K8sV1Client { + return &K8sV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *K8sV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/doc.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/doc.go new file mode 100644 index 0000000000..16f4439906 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos.go new file mode 100644 index 0000000000..f6107c543f --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos.go @@ -0,0 +1,142 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + egressqosv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeEgressQoSes implements EgressQoSInterface +type FakeEgressQoSes struct { + Fake *FakeK8sV1 + ns string +} + +var egressqosesResource = schema.GroupVersionResource{Group: "k8s.ovn.org", Version: "v1", Resource: "egressqoses"} + +var egressqosesKind = schema.GroupVersionKind{Group: "k8s.ovn.org", Version: "v1", Kind: "EgressQoS"} + +// Get takes name of the egressQoS, and returns the corresponding egressQoS object, and an error if there is any. +func (c *FakeEgressQoSes) Get(ctx context.Context, name string, options v1.GetOptions) (result *egressqosv1.EgressQoS, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(egressqosesResource, c.ns, name), &egressqosv1.EgressQoS{}) + + if obj == nil { + return nil, err + } + return obj.(*egressqosv1.EgressQoS), err +} + +// List takes label and field selectors, and returns the list of EgressQoSes that match those selectors. +func (c *FakeEgressQoSes) List(ctx context.Context, opts v1.ListOptions) (result *egressqosv1.EgressQoSList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(egressqosesResource, egressqosesKind, c.ns, opts), &egressqosv1.EgressQoSList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &egressqosv1.EgressQoSList{ListMeta: obj.(*egressqosv1.EgressQoSList).ListMeta} + for _, item := range obj.(*egressqosv1.EgressQoSList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested egressQoSes. +func (c *FakeEgressQoSes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(egressqosesResource, c.ns, opts)) + +} + +// Create takes the representation of a egressQoS and creates it. Returns the server's representation of the egressQoS, and an error, if there is any. +func (c *FakeEgressQoSes) Create(ctx context.Context, egressQoS *egressqosv1.EgressQoS, opts v1.CreateOptions) (result *egressqosv1.EgressQoS, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(egressqosesResource, c.ns, egressQoS), &egressqosv1.EgressQoS{}) + + if obj == nil { + return nil, err + } + return obj.(*egressqosv1.EgressQoS), err +} + +// Update takes the representation of a egressQoS and updates it. Returns the server's representation of the egressQoS, and an error, if there is any. +func (c *FakeEgressQoSes) Update(ctx context.Context, egressQoS *egressqosv1.EgressQoS, opts v1.UpdateOptions) (result *egressqosv1.EgressQoS, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(egressqosesResource, c.ns, egressQoS), &egressqosv1.EgressQoS{}) + + if obj == nil { + return nil, err + } + return obj.(*egressqosv1.EgressQoS), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeEgressQoSes) UpdateStatus(ctx context.Context, egressQoS *egressqosv1.EgressQoS, opts v1.UpdateOptions) (*egressqosv1.EgressQoS, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(egressqosesResource, "status", c.ns, egressQoS), &egressqosv1.EgressQoS{}) + + if obj == nil { + return nil, err + } + return obj.(*egressqosv1.EgressQoS), err +} + +// Delete takes name of the egressQoS and deletes it. Returns an error if one occurs. +func (c *FakeEgressQoSes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(egressqosesResource, c.ns, name), &egressqosv1.EgressQoS{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEgressQoSes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(egressqosesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &egressqosv1.EgressQoSList{}) + return err +} + +// Patch applies the patch and returns the patched egressQoS. +func (c *FakeEgressQoSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *egressqosv1.EgressQoS, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(egressqosesResource, c.ns, name, pt, data, subresources...), &egressqosv1.EgressQoS{}) + + if obj == nil { + return nil, err + } + return obj.(*egressqosv1.EgressQoS), err +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos_client.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos_client.go new file mode 100644 index 0000000000..44a11afe35 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/fake/fake_egressqos_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeK8sV1 struct { + *testing.Fake +} + +func (c *FakeK8sV1) EgressQoSes(namespace string) v1.EgressQoSInterface { + return &FakeEgressQoSes{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeK8sV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/generated_expansion.go b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/generated_expansion.go new file mode 100644 index 0000000000..6d5de7124c --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/typed/egressqos/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type EgressQoSExpansion interface{} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/interface.go b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/interface.go new file mode 100644 index 0000000000..47740b1ce8 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package egressqos + +import ( + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/v1" + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/v1/egressqos.go b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/v1/egressqos.go new file mode 100644 index 0000000000..5f04d6eb93 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/v1/egressqos.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + egressqosv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned" + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/internalinterfaces" + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// EgressQoSInformer provides access to a shared informer and lister for +// EgressQoSes. +type EgressQoSInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.EgressQoSLister +} + +type egressQoSInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEgressQoSInformer constructs a new informer for EgressQoS type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEgressQoSInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEgressQoSInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEgressQoSInformer constructs a new informer for EgressQoS type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEgressQoSInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().EgressQoSes(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sV1().EgressQoSes(namespace).Watch(context.TODO(), options) + }, + }, + &egressqosv1.EgressQoS{}, + resyncPeriod, + indexers, + ) +} + +func (f *egressQoSInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEgressQoSInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *egressQoSInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&egressqosv1.EgressQoS{}, f.defaultInformer) +} + +func (f *egressQoSInformer) Lister() v1.EgressQoSLister { + return v1.NewEgressQoSLister(f.Informer().GetIndexer()) +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/v1/interface.go b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/v1/interface.go new file mode 100644 index 0000000000..4e608f27f6 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // EgressQoSes returns a EgressQoSInformer. + EgressQoSes() EgressQoSInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// EgressQoSes returns a EgressQoSInformer. +func (v *version) EgressQoSes() EgressQoSInformer { + return &egressQoSInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go new file mode 100644 index 0000000000..7eaa9060f1 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned" + egressqos "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos" + internalinterfaces "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + K8s() egressqos.Interface +} + +func (f *sharedInformerFactory) K8s() egressqos.Interface { + return egressqos.New(f, f.namespace, f.tweakListOptions) +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/generic.go b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/generic.go new file mode 100644 index 0000000000..826e442a7a --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/generic.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=k8s.ovn.org, Version=v1 + case v1.SchemeGroupVersion.WithResource("egressqoses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.K8s().V1().EgressQoSes().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..1a10112b04 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/egressqos.go b/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/egressqos.go new file mode 100644 index 0000000000..d5ec539ea9 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/egressqos.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EgressQoSLister helps list EgressQoSes. +// All objects returned here must be treated as read-only. +type EgressQoSLister interface { + // List lists all EgressQoSes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.EgressQoS, err error) + // EgressQoSes returns an object that can list and get EgressQoSes. + EgressQoSes(namespace string) EgressQoSNamespaceLister + EgressQoSListerExpansion +} + +// egressQoSLister implements the EgressQoSLister interface. +type egressQoSLister struct { + indexer cache.Indexer +} + +// NewEgressQoSLister returns a new EgressQoSLister. +func NewEgressQoSLister(indexer cache.Indexer) EgressQoSLister { + return &egressQoSLister{indexer: indexer} +} + +// List lists all EgressQoSes in the indexer. +func (s *egressQoSLister) List(selector labels.Selector) (ret []*v1.EgressQoS, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.EgressQoS)) + }) + return ret, err +} + +// EgressQoSes returns an object that can list and get EgressQoSes. +func (s *egressQoSLister) EgressQoSes(namespace string) EgressQoSNamespaceLister { + return egressQoSNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EgressQoSNamespaceLister helps list and get EgressQoSes. +// All objects returned here must be treated as read-only. +type EgressQoSNamespaceLister interface { + // List lists all EgressQoSes in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.EgressQoS, err error) + // Get retrieves the EgressQoS from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.EgressQoS, error) + EgressQoSNamespaceListerExpansion +} + +// egressQoSNamespaceLister implements the EgressQoSNamespaceLister +// interface. +type egressQoSNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all EgressQoSes in the indexer for a given namespace. +func (s egressQoSNamespaceLister) List(selector labels.Selector) (ret []*v1.EgressQoS, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.EgressQoS)) + }) + return ret, err +} + +// Get retrieves the EgressQoS from the indexer for a given namespace and name. +func (s egressQoSNamespaceLister) Get(name string) (*v1.EgressQoS, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("egressqos"), name) + } + return obj.(*v1.EgressQoS), nil +} diff --git a/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/expansion_generated.go b/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/expansion_generated.go new file mode 100644 index 0000000000..433562f362 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// EgressQoSListerExpansion allows custom methods to be added to +// EgressQoSLister. +type EgressQoSListerExpansion interface{} + +// EgressQoSNamespaceListerExpansion allows custom methods to be added to +// EgressQoSNamespaceLister. +type EgressQoSNamespaceListerExpansion interface{} diff --git a/go-controller/pkg/crd/egressqos/v1/doc.go b/go-controller/pkg/crd/egressqos/v1/doc.go new file mode 100644 index 0000000000..5703f91c44 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/doc.go @@ -0,0 +1,4 @@ +// Package v1 contains API Schema definitions for the network v1 API group +// +k8s:deepcopy-gen=package +// +groupName=k8s.ovn.org +package v1 diff --git a/go-controller/pkg/crd/egressqos/v1/register.go b/go-controller/pkg/crd/egressqos/v1/register.go new file mode 100644 index 0000000000..b2cd988275 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/register.go @@ -0,0 +1,34 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "k8s.ovn.org" + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EgressQoS{}, + &EgressQoSList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/go-controller/pkg/crd/egressqos/v1/types.go b/go-controller/pkg/crd/egressqos/v1/types.go new file mode 100644 index 0000000000..a90ab14ffc --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/types.go @@ -0,0 +1,85 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=egressqoses +// +kubebuilder::singular=egressqos +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// EgressQoS is a CRD that allows the user to define a DSCP value +// for pods egress traffic on its namespace to specified CIDRs. +// Traffic from these pods will be checked against each EgressQoSRule in +// the namespace's EgressQoS, and if there is a match the traffic is marked +// with the relevant DSCP value. +type EgressQoS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec EgressQoSSpec `json:"spec,omitempty"` + Status EgressQoSStatus `json:"status,omitempty"` +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// EgressQoSSpec defines the desired state of EgressQoS +type EgressQoSSpec struct { + // a collection of Egress QoS rule objects + Egress []EgressQoSRule `json:"egress"` +} + +type EgressQoSRule struct { + // DSCP marking value for matching pods' traffic. + // +kubebuilder:validation:Maximum:=63 + // +kubebuilder:validation:Minimum:=0 + DSCP int `json:"dscp"` + + // DstCIDR specifies the destination's CIDR. Only traffic heading + // to this CIDR will be marked with the DSCP value. + // This field is optional, and in case it is not set the rule is applied + // to all egress traffic regardless of the destination. + // +optional + DstCIDR *string `json:"dstCIDR,omitempty"` + + // PodSelector applies the QoS rule only to the pods in the namespace whose label + // matches this definition. This field is optional, and in case it is not set + // results in the rule being applied to all pods in the namespace. + // +optional + PodSelector metav1.LabelSelector `json:"podSelector,omitempty"` +} + +// EgressQoSStatus defines the observed state of EgressQoS +type EgressQoSStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=egressqoses +// +kubebuilder::singular=egressqos +// EgressQoSList contains a list of EgressQoS +type EgressQoSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EgressQoS `json:"items"` +} diff --git a/go-controller/pkg/crd/egressqos/v1/zz_generated.deepcopy.go b/go-controller/pkg/crd/egressqos/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..2411bb1fd1 --- /dev/null +++ b/go-controller/pkg/crd/egressqos/v1/zz_generated.deepcopy.go @@ -0,0 +1,148 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressQoS) DeepCopyInto(out *EgressQoS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressQoS. +func (in *EgressQoS) DeepCopy() *EgressQoS { + if in == nil { + return nil + } + out := new(EgressQoS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressQoS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressQoSList) DeepCopyInto(out *EgressQoSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EgressQoS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressQoSList. +func (in *EgressQoSList) DeepCopy() *EgressQoSList { + if in == nil { + return nil + } + out := new(EgressQoSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressQoSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressQoSRule) DeepCopyInto(out *EgressQoSRule) { + *out = *in + if in.DstCIDR != nil { + in, out := &in.DstCIDR, &out.DstCIDR + *out = new(string) + **out = **in + } + in.PodSelector.DeepCopyInto(&out.PodSelector) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressQoSRule. +func (in *EgressQoSRule) DeepCopy() *EgressQoSRule { + if in == nil { + return nil + } + out := new(EgressQoSRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressQoSSpec) DeepCopyInto(out *EgressQoSSpec) { + *out = *in + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]EgressQoSRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressQoSSpec. +func (in *EgressQoSSpec) DeepCopy() *EgressQoSSpec { + if in == nil { + return nil + } + out := new(EgressQoSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressQoSStatus) DeepCopyInto(out *EgressQoSStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressQoSStatus. +func (in *EgressQoSStatus) DeepCopy() *EgressQoSStatus { + if in == nil { + return nil + } + out := new(EgressQoSStatus) + in.DeepCopyInto(out) + return out +} diff --git a/go-controller/pkg/factory/factory.go b/go-controller/pkg/factory/factory.go index 45ea03b69d..9b53403746 100644 --- a/go-controller/pkg/factory/factory.go +++ b/go-controller/pkg/factory/factory.go @@ -7,11 +7,11 @@ import ( "time" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - egressfirewallapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" egressfirewallscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/scheme" egressfirewallinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions" + egressfirewalllister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/listers/egressfirewall/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" egressipapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" egressipscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/scheme" @@ -22,6 +22,11 @@ import ( ocpcloudnetworkinformerfactory "github.com/openshift/client-go/cloudnetwork/informers/externalversions" ocpcloudnetworklister "github.com/openshift/client-go/cloudnetwork/listers/cloudnetwork/v1" + egressqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + egressqosscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/scheme" + egressqosinformerfactory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions" + egressqosinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/v1" + kapi "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,11 +48,12 @@ type WatchFactory struct { // requirements with atomic accesses handlerCounter uint64 - iFactory informerfactory.SharedInformerFactory - eipFactory egressipinformerfactory.SharedInformerFactory - efFactory egressfirewallinformerfactory.SharedInformerFactory - cpipcFactory ocpcloudnetworkinformerfactory.SharedInformerFactory - informers map[reflect.Type]*informer + iFactory informerfactory.SharedInformerFactory + eipFactory egressipinformerfactory.SharedInformerFactory + efFactory egressfirewallinformerfactory.SharedInformerFactory + cpipcFactory ocpcloudnetworkinformerfactory.SharedInformerFactory + egressQoSFactory egressqosinformerfactory.SharedInformerFactory + informers map[reflect.Type]*informer stopChan chan struct{} } @@ -88,6 +94,7 @@ var ( EgressFirewallType reflect.Type = reflect.TypeOf(&egressfirewallapi.EgressFirewall{}) EgressIPType reflect.Type = reflect.TypeOf(&egressipapi.EgressIP{}) CloudPrivateIPConfigType reflect.Type = reflect.TypeOf(&ocpcloudnetworkapi.CloudPrivateIPConfig{}) + EgressQoSType reflect.Type = reflect.TypeOf(&egressqosapi.EgressQoS{}) PeerServiceType reflect.Type = reflect.TypeOf(&peerService{}) PeerNamespaceAndPodSelectorType reflect.Type = reflect.TypeOf(&peerNamespaceAndPodSelector{}) PeerPodForNamespaceAndPodSelectorType reflect.Type = reflect.TypeOf(&peerPodForNamespaceAndPodSelector{}) @@ -105,12 +112,13 @@ func NewMasterWatchFactory(ovnClientset *util.OVNClientset) (*WatchFactory, erro // the downside of making it tight (like 10 minutes) is needless spinning on all resources // However, AddEventHandlerWithResyncPeriod can specify a per handler resync period wf := &WatchFactory{ - iFactory: informerfactory.NewSharedInformerFactory(ovnClientset.KubeClient, resyncInterval), - eipFactory: egressipinformerfactory.NewSharedInformerFactory(ovnClientset.EgressIPClient, resyncInterval), - efFactory: egressfirewallinformerfactory.NewSharedInformerFactory(ovnClientset.EgressFirewallClient, resyncInterval), - cpipcFactory: ocpcloudnetworkinformerfactory.NewSharedInformerFactory(ovnClientset.CloudNetworkClient, resyncInterval), - informers: make(map[reflect.Type]*informer), - stopChan: make(chan struct{}), + iFactory: informerfactory.NewSharedInformerFactory(ovnClientset.KubeClient, resyncInterval), + eipFactory: egressipinformerfactory.NewSharedInformerFactory(ovnClientset.EgressIPClient, resyncInterval), + efFactory: egressfirewallinformerfactory.NewSharedInformerFactory(ovnClientset.EgressFirewallClient, resyncInterval), + cpipcFactory: ocpcloudnetworkinformerfactory.NewSharedInformerFactory(ovnClientset.CloudNetworkClient, resyncInterval), + egressQoSFactory: egressqosinformerfactory.NewSharedInformerFactory(ovnClientset.EgressQoSClient, resyncInterval), + informers: make(map[reflect.Type]*informer), + stopChan: make(chan struct{}), } if err := egressipapi.AddToScheme(egressipscheme.Scheme); err != nil { @@ -119,6 +127,9 @@ func NewMasterWatchFactory(ovnClientset *util.OVNClientset) (*WatchFactory, erro if err := egressfirewallapi.AddToScheme(egressfirewallscheme.Scheme); err != nil { return nil, err } + if err := egressqosapi.AddToScheme(egressqosscheme.Scheme); err != nil { + return nil, err + } // For Services and Endpoints, pre-populate the shared Informer with one that // has a label selector excluding headless services. @@ -189,6 +200,13 @@ func NewMasterWatchFactory(ovnClientset *util.OVNClientset) (*WatchFactory, erro return nil, err } } + if config.OVNKubernetesFeature.EnableEgressQoS { + wf.informers[EgressQoSType], err = newInformer(EgressQoSType, wf.egressQoSFactory.K8s().V1().EgressQoSes().Informer()) + if err != nil { + return nil, err + } + } + return wf, nil } @@ -224,6 +242,15 @@ func (wf *WatchFactory) Start() error { } } } + if config.OVNKubernetesFeature.EnableEgressQoS && wf.egressQoSFactory != nil { + wf.egressQoSFactory.Start(wf.stopChan) + for oType, synced := range wf.egressQoSFactory.WaitForCacheSync(wf.stopChan) { + if !synced { + return fmt.Errorf("error in syncing cache for %v informer", oType) + } + } + } + return nil } @@ -380,6 +407,12 @@ func (wf *WatchFactory) GetResourceHandlerFunc(objType reflect.Type) (AddHandler funcs cache.ResourceEventHandler, processExisting func([]interface{})) *Handler { return wf.AddFilteredPodHandler(namespace, sel, funcs, processExisting) }, nil + + case EgressFirewallType: + return func(namespace string, sel labels.Selector, + funcs cache.ResourceEventHandler, processExisting func([]interface{})) *Handler { + return wf.AddEgressFirewallHandler(funcs, processExisting) + }, nil } return nil, fmt.Errorf("cannot get ObjectMeta from type %v", objType) } @@ -499,6 +532,11 @@ func (wf *WatchFactory) RemoveEgressFirewallHandler(handler *Handler) { wf.removeHandler(EgressFirewallType, handler) } +// RemoveEgressQoSHandler removes an EgressQoS object event handler function +func (wf *WatchFactory) RemoveEgressQoSHandler(handler *Handler) { + wf.removeHandler(EgressQoSType, handler) +} + // AddEgressIPHandler adds a handler function that will be executed on EgressIP object changes func (wf *WatchFactory) AddEgressIPHandler(handlerFuncs cache.ResourceEventHandler, processExisting func([]interface{})) *Handler { return wf.addHandler(EgressIPType, "", nil, handlerFuncs, processExisting) @@ -655,10 +693,19 @@ func (wf *WatchFactory) GetNetworkPolicy(namespace, name string) (*knet.NetworkP return networkPolicyLister.NetworkPolicies(namespace).Get(name) } +func (wf *WatchFactory) GetEgressFirewall(namespace, name string) (*egressfirewallapi.EgressFirewall, error) { + egressFirewallLister := wf.informers[EgressFirewallType].lister.(egressfirewalllister.EgressFirewallLister) + return egressFirewallLister.EgressFirewalls(namespace).Get(name) +} + func (wf *WatchFactory) NodeInformer() cache.SharedIndexInformer { return wf.informers[NodeType].inf } +func (wf *WatchFactory) NodeCoreInformer() v1coreinformers.NodeInformer { + return wf.iFactory.Core().V1().Nodes() +} + // LocalPodInformer returns a shared Informer that may or may not only // return pods running on the local node. func (wf *WatchFactory) LocalPodInformer() cache.SharedIndexInformer { @@ -669,6 +716,10 @@ func (wf *WatchFactory) PodInformer() cache.SharedIndexInformer { return wf.informers[PodType].inf } +func (wf *WatchFactory) PodCoreInformer() v1coreinformers.PodInformer { + return wf.iFactory.Core().V1().Pods() +} + func (wf *WatchFactory) NamespaceInformer() cache.SharedIndexInformer { return wf.informers[NamespaceType].inf } @@ -677,6 +728,10 @@ func (wf *WatchFactory) ServiceInformer() cache.SharedIndexInformer { return wf.informers[ServiceType].inf } +func (wf *WatchFactory) EgressQoSInformer() egressqosinformer.EgressQoSInformer { + return wf.egressQoSFactory.K8s().V1().EgressQoSes() +} + // noHeadlessServiceSelector is a LabelSelector added to the watch for // Endpoints (and, eventually, EndpointSlices) that excludes endpoints // for headless services. diff --git a/go-controller/pkg/factory/factory_test.go b/go-controller/pkg/factory/factory_test.go index ebec52bd32..2137eebd0e 100644 --- a/go-controller/pkg/factory/factory_test.go +++ b/go-controller/pkg/factory/factory_test.go @@ -17,6 +17,7 @@ import ( "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" + "k8s.io/utils/pointer" egressfirewall "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" @@ -26,6 +27,9 @@ import ( egressip "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" egressipfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake" + egressqos "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" + ocpcloudnetworkapi "github.com/openshift/api/cloudnetwork/v1" ocpconfigapi "github.com/openshift/api/config/v1" ocpcloudnetworkclientsetfake "github.com/openshift/client-go/cloudnetwork/clientset/versioned/fake" @@ -148,6 +152,20 @@ func newCloudPrivateIPConfig(name string) *ocpcloudnetworkapi.CloudPrivateIPConf } } +func newEgressQoS(name, namespace string) *egressqos.EgressQoS { + return &egressqos.EgressQoS{ + ObjectMeta: newObjectMeta(name, namespace), + Spec: egressqos.EgressQoSSpec{ + Egress: []egressqos.EgressQoSRule{ + { + DSCP: 50, + DstCIDR: pointer.String("1.2.3.4/32"), + }, + }, + }, + } +} + func objSetup(c *fake.Clientset, objType string, listFn func(core.Action) (bool, runtime.Object, error)) *watch.FakeWatcher { w := watch.NewFake() c.AddWatchReactor(objType, core.DefaultWatchReactor(w, nil)) @@ -176,6 +194,13 @@ func cloudPrivateIPConfigObjSetup(c *ocpcloudnetworkclientsetfake.Clientset, obj return w } +func egressQoSObjSetup(c *egressqosfake.Clientset, objType string, listFn func(core.Action) (bool, runtime.Object, error)) *watch.FakeWatcher { + w := watch.NewFake() + c.AddWatchReactor(objType, core.DefaultWatchReactor(w, nil)) + c.AddReactor("list", objType, listFn) + return w +} + type handlerCalls struct { added int32 updated int32 @@ -201,11 +226,13 @@ var _ = Describe("Watch Factory Operations", func() { egressIPFakeClient *egressipfake.Clientset egressFirewallFakeClient *egressfirewallfake.Clientset cloudNetworkFakeClient *ocpcloudnetworkclientsetfake.Clientset + egressQoSFakeClient *egressqosfake.Clientset podWatch, namespaceWatch, nodeWatch *watch.FakeWatcher policyWatch, endpointsWatch, serviceWatch *watch.FakeWatcher egressFirewallWatch *watch.FakeWatcher egressIPWatch *watch.FakeWatcher cloudPrivateIPConfigWatch *watch.FakeWatcher + egressQoSWatch *watch.FakeWatcher pods []*v1.Pod namespaces []*v1.Namespace nodes []*v1.Node @@ -216,6 +243,7 @@ var _ = Describe("Watch Factory Operations", func() { cloudPrivateIPConfigs []*ocpcloudnetworkapi.CloudPrivateIPConfig wf *WatchFactory egressFirewalls []*egressfirewall.EgressFirewall + egressQoSes []*egressqos.EgressQoS err error ) @@ -225,18 +253,21 @@ var _ = Describe("Watch Factory Operations", func() { config.PrepareTestConfig() config.OVNKubernetesFeature.EnableEgressIP = true config.OVNKubernetesFeature.EnableEgressFirewall = true + config.OVNKubernetesFeature.EnableEgressQoS = true config.Kubernetes.PlatformType = string(ocpconfigapi.AWSPlatformType) fakeClient = &fake.Clientset{} egressFirewallFakeClient = &egressfirewallfake.Clientset{} egressIPFakeClient = &egressipfake.Clientset{} cloudNetworkFakeClient = &ocpcloudnetworkclientsetfake.Clientset{} + egressQoSFakeClient = &egressqosfake.Clientset{} ovnClientset = &util.OVNClientset{ KubeClient: fakeClient, EgressIPClient: egressIPFakeClient, EgressFirewallClient: egressFirewallFakeClient, CloudNetworkClient: cloudNetworkFakeClient, + EgressQoSClient: egressQoSFakeClient, } pods = make([]*v1.Pod, 0) @@ -319,6 +350,15 @@ var _ = Describe("Watch Factory Operations", func() { } return true, obj, nil }) + + egressQoSes = make([]*egressqos.EgressQoS, 0) + egressQoSWatch = egressQoSObjSetup(egressQoSFakeClient, "egressqoses", func(core.Action) (bool, runtime.Object, error) { + obj := &egressqos.EgressQoSList{} + for _, p := range egressQoSes { + obj.Items = append(obj.Items, *p) + } + return true, obj, nil + }) }) AfterEach(func() { @@ -383,6 +423,10 @@ var _ = Describe("Watch Factory Operations", func() { cloudPrivateIPConfigs = append(cloudPrivateIPConfigs, newCloudPrivateIPConfig("192.168.176.25")) testExisting(CloudPrivateIPConfigType, "", nil) }) + It("is called for each existing egressQoS", func() { + egressQoSes = append(egressQoSes, newEgressQoS("myEgressQoS", "default")) + testExisting(EgressQoSType, "", nil) + }) It("is called for each existing pod that matches a given namespace and label", func() { pod := newPod("pod1", "default") @@ -469,6 +513,11 @@ var _ = Describe("Watch Factory Operations", func() { cloudPrivateIPConfigs = append(cloudPrivateIPConfigs, newCloudPrivateIPConfig("192.168.126.26")) testExisting(CloudPrivateIPConfigType) }) + It("calls ADD for each existing egressQoS", func() { + egressQoSes = append(egressQoSes, newEgressQoS("myEgressQoS", "default")) + egressQoSes = append(egressQoSes, newEgressQoS("myEgressQoS1", "default")) + testExisting(EgressQoSType) + }) }) Context("when EgressIP is disabled", func() { @@ -497,6 +546,19 @@ var _ = Describe("Watch Factory Operations", func() { testExisting(EgressFirewallType) }) }) + Context("when EgressQoS is disabled", func() { + testExisting := func(objType reflect.Type) { + wf, err = NewMasterWatchFactory(ovnClientset) + Expect(err).NotTo(HaveOccurred()) + err = wf.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(wf.informers).NotTo(HaveKey(objType)) + } + It("does not contain EgressQoS informer", func() { + config.OVNKubernetesFeature.EnableEgressQoS = false + testExisting(EgressQoSType) + }) + }) addFilteredHandler := func(wf *WatchFactory, objType reflect.Type, namespace string, sel labels.Selector, funcs cache.ResourceEventHandlerFuncs) (*Handler, *handlerCalls) { calls := handlerCalls{} @@ -1181,6 +1243,41 @@ var _ = Describe("Watch Factory Operations", func() { wf.RemoveCloudPrivateIPConfigHandler(h) }) + It("responds to egressQoS add/update/delete events", func() { + wf, err = NewMasterWatchFactory(ovnClientset) + Expect(err).NotTo(HaveOccurred()) + err = wf.Start() + Expect(err).NotTo(HaveOccurred()) + + added := newEgressQoS("myEgressQoS", "default") + h, c := addHandler(wf, EgressQoSType, cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + egressQoS := obj.(*egressqos.EgressQoS) + Expect(reflect.DeepEqual(egressQoS, added)).To(BeTrue()) + }, + UpdateFunc: func(old, new interface{}) { + newEgressQoS := new.(*egressqos.EgressQoS) + Expect(reflect.DeepEqual(newEgressQoS, added)).To(BeTrue()) + Expect(newEgressQoS.Spec.Egress[0].DSCP).To(Equal(40)) + }, + DeleteFunc: func(obj interface{}) { + egressQoS := obj.(*egressqos.EgressQoS) + Expect(reflect.DeepEqual(egressQoS, added)).To(BeTrue()) + }, + }) + + egressQoSes = append(egressQoSes, added) + egressQoSWatch.Add(added) + Eventually(c.getAdded, 2).Should(Equal(1)) + added.Spec.Egress[0].DSCP = 40 + egressQoSWatch.Modify(added) + Eventually(c.getUpdated, 2).Should(Equal(1)) + egressQoSes = egressQoSes[:0] + egressQoSWatch.Delete(added) + Eventually(c.getDeleted, 2).Should(Equal(1)) + + wf.RemoveEgressQoSHandler(h) + }) It("stops processing events after the handler is removed", func() { wf, err = NewMasterWatchFactory(ovnClientset) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/factory/handler.go b/go-controller/pkg/factory/handler.go index e84e38b450..b07a5ff8ef 100644 --- a/go-controller/pkg/factory/handler.go +++ b/go-controller/pkg/factory/handler.go @@ -11,6 +11,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" egressfirewalllister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/listers/egressfirewall/v1" + egressqoslister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1" cloudprivateipconfiglister "github.com/openshift/client-go/cloudnetwork/listers/cloudnetwork/v1" egressiplister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/listers/egressip/v1" @@ -374,6 +375,8 @@ func newInformerLister(oType reflect.Type, sharedInformer cache.SharedIndexInfor return egressiplister.NewEgressIPLister(sharedInformer.GetIndexer()), nil case CloudPrivateIPConfigType: return cloudprivateipconfiglister.NewCloudPrivateIPConfigLister(sharedInformer.GetIndexer()), nil + case EgressQoSType: + return egressqoslister.NewEgressQoSLister(sharedInformer.GetIndexer()), nil } return nil, fmt.Errorf("cannot create lister from type %v", oType) diff --git a/go-controller/pkg/libovsdbops/model.go b/go-controller/pkg/libovsdbops/model.go index 1a493ba79b..a7c54fac1b 100644 --- a/go-controller/pkg/libovsdbops/model.go +++ b/go-controller/pkg/libovsdbops/model.go @@ -56,6 +56,8 @@ func getUUID(model model.Model) string { return t.UUID case *sbdb.SBGlobal: return t.UUID + case *nbdb.QoS: + return t.UUID default: panic(fmt.Sprintf("getUUID: unknown model %T", t)) } @@ -105,6 +107,8 @@ func setUUID(model model.Model, uuid string) { t.UUID = uuid case *sbdb.SBGlobal: t.UUID = uuid + case *nbdb.QoS: + t.UUID = uuid default: panic(fmt.Sprintf("setUUID: unknown model %T", t)) } @@ -209,6 +213,10 @@ func copyIndexes(model model.Model) model.Model { return &sbdb.SBGlobal{ UUID: t.UUID, } + case *nbdb.QoS: + return &nbdb.QoS{ + UUID: t.UUID, + } default: panic(fmt.Sprintf("copyIndexes: unknown model %T", t)) } @@ -256,6 +264,8 @@ func getListFromModel(model model.Model) interface{} { return &[]*sbdb.Chassis{} case *sbdb.MACBinding: return &[]*sbdb.MACBinding{} + case *nbdb.QoS: + return &[]nbdb.QoS{} default: panic(fmt.Sprintf("getModelList: unknown model %T", t)) } diff --git a/go-controller/pkg/libovsdbops/qos.go b/go-controller/pkg/libovsdbops/qos.go new file mode 100644 index 0000000000..2dded667d3 --- /dev/null +++ b/go-controller/pkg/libovsdbops/qos.go @@ -0,0 +1,107 @@ +package libovsdbops + +import ( + "context" + "strings" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +type QoSPredicate func(*nbdb.QoS) bool + +// FindQoSesWithPredicate looks up QoSes from the cache based on a +// given predicate +func FindQoSesWithPredicate(nbClient libovsdbclient.Client, p QoSPredicate) ([]*nbdb.QoS, error) { + ctx, cancel := context.WithTimeout(context.Background(), types.OVSDBTimeout) + defer cancel() + found := []*nbdb.QoS{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// CreateOrUpdateQoSesOps returns the ops to create or update the provided QoSes. +func CreateOrUpdateQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(qoses)) + for i := range qoses { + // can't use i in the predicate, for loop replaces it in-memory + qos := qoses[i] + opModel := operationModel{ + Model: qos, + ModelPredicate: func(q *nbdb.QoS) bool { + return strings.Contains(q.Match, qos.Match) && q.Priority == qos.Priority + }, + OnModelUpdates: []interface{}{}, // update all fields + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +// AddQoSesToLogicalSwitchOps returns the ops to add the provided QoSes to the switch +func AddQoSesToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + sw := &nbdb.LogicalSwitch{ + Name: name, + QOSRules: make([]string, 0, len(qoses)), + } + for _, qos := range qoses { + sw.QOSRules = append(sw.QOSRules, qos.UUID) + } + + opModels := operationModel{ + Model: sw, + ModelPredicate: func(item *nbdb.LogicalSwitch) bool { return item.Name == sw.Name }, + OnModelMutations: []interface{}{&sw.QOSRules}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels) +} + +// DeleteQoSesOps returns the ops to delete the provided QoSes. +func DeleteQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(qoses)) + for i := range qoses { + // can't use i in the predicate, for loop replaces it in-memory + qos := qoses[i] + opModel := operationModel{ + Model: qos, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels...) +} + +// RemoveQoSesFromLogicalSwitchOps returns the ops to remove the provided QoSes from the provided switch. +func RemoveQoSesFromLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + sw := &nbdb.LogicalSwitch{ + Name: name, + QOSRules: make([]string, 0, len(qoses)), + } + for _, qos := range qoses { + sw.QOSRules = append(sw.QOSRules, qos.UUID) + } + + opModels := operationModel{ + Model: sw, + ModelPredicate: func(item *nbdb.LogicalSwitch) bool { return item.Name == sw.Name }, + OnModelMutations: []interface{}{&sw.QOSRules}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels) +} diff --git a/go-controller/pkg/metrics/metrics.go b/go-controller/pkg/metrics/metrics.go index efa816d089..a940d2cd42 100644 --- a/go-controller/pkg/metrics/metrics.go +++ b/go-controller/pkg/metrics/metrics.go @@ -2,6 +2,7 @@ package metrics import ( "context" + "crypto/tls" "fmt" "net/http" "net/http/pprof" @@ -368,8 +369,29 @@ func checkPodRunsOnGivenNode(clientset kubernetes.Interface, labels []string, k8 strings.Join(labels, ","), k8sNodeName) } -// StartMetricsServer runs the prometheus listener so that OVN K8s metrics can be collected -func StartMetricsServer(bindAddress string, enablePprof bool) { +// using the cyrpto/tls module's GetCertificate() callback function helps in picking up +// the latest certificate (due to cert rotation on cert expiry) +func listenAndServeTLS(addr, certFile, privKeyFile string, handler http.Handler) error { + tlsConfig := &tls.Config{ + GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(certFile, privKeyFile) + if err != nil { + return nil, fmt.Errorf("error generating x509 certs for metrics TLS endpoint: %v", err) + } + return &cert, nil + }, + } + server := &http.Server{ + Addr: addr, + Handler: handler, + TLSConfig: tlsConfig, + } + return server.ListenAndServeTLS("", "") +} + +// StartMetricsServerTLS runs the prometheus listener so that OVN K8s metrics can be collected +// It puts the endpoint behind TLS if certFile and keyFile are defined. +func StartMetricsServer(bindAddress string, enablePprof bool, certFile string, keyFile string) { mux := http.NewServeMux() mux.Handle("/metrics", promhttp.Handler()) @@ -382,7 +404,12 @@ func StartMetricsServer(bindAddress string, enablePprof bool) { } go utilwait.Until(func() { - err := http.ListenAndServe(bindAddress, mux) + var err error + if certFile != "" && keyFile != "" { + err = listenAndServeTLS(bindAddress, certFile, keyFile, mux) + } else { + err = http.ListenAndServe(bindAddress, mux) + } if err != nil { utilruntime.HandleError(fmt.Errorf("starting metrics server failed: %v", err)) } @@ -392,14 +419,19 @@ func StartMetricsServer(bindAddress string, enablePprof bool) { var ovnRegistry = prometheus.NewRegistry() // StartOVNMetricsServer runs the prometheus listener so that OVN metrics can be collected -func StartOVNMetricsServer(bindAddress string) { +func StartOVNMetricsServer(bindAddress, certFile, keyFile string) { handler := promhttp.InstrumentMetricHandler(ovnRegistry, promhttp.HandlerFor(ovnRegistry, promhttp.HandlerOpts{})) mux := http.NewServeMux() mux.Handle("/metrics", handler) go utilwait.Until(func() { - err := http.ListenAndServe(bindAddress, mux) + var err error + if certFile != "" && keyFile != "" { + err = listenAndServeTLS(bindAddress, certFile, keyFile, mux) + } else { + err = http.ListenAndServe(bindAddress, mux) + } if err != nil { utilruntime.HandleError(fmt.Errorf("starting OVN metrics server failed: %v", err)) } diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index 16a300ab0c..ad60803aed 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -33,6 +33,7 @@ import ( egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" egressipfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake" + egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -169,9 +170,11 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, }) egressFirewallFakeClient := &egressfirewallfake.Clientset{} egressIPFakeClient := &egressipfake.Clientset{} + egressQoSFakeClient := &egressqosfake.Clientset{} fakeClient := &util.OVNClientset{ KubeClient: kubeFakeClient, EgressFirewallClient: egressFirewallFakeClient, + EgressQoSClient: egressQoSFakeClient, } stop := make(chan struct{}) @@ -451,9 +454,11 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, }) egressFirewallFakeClient := &egressfirewallfake.Clientset{} egressIPFakeClient := &egressipfake.Clientset{} + egressQoSFakeClient := &egressqosfake.Clientset{} fakeClient := &util.OVNClientset{ KubeClient: kubeFakeClient, EgressFirewallClient: egressFirewallFakeClient, + EgressQoSClient: egressQoSFakeClient, } _, nodeNet, err := net.ParseCIDR(nodeSubnet) @@ -792,9 +797,11 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0`, ) egressFirewallFakeClient := &egressfirewallfake.Clientset{} egressIPFakeClient := &egressipfake.Clientset{} + egressQoSFakeClient := &egressqosfake.Clientset{} fakeClient := &util.OVNClientset{ KubeClient: kubeFakeClient, EgressFirewallClient: egressFirewallFakeClient, + EgressQoSClient: egressQoSFakeClient, } stop := make(chan struct{}) diff --git a/go-controller/pkg/ovn/egressfirewall.go b/go-controller/pkg/ovn/egressfirewall.go index 4ff8802d8e..23825d3b1b 100644 --- a/go-controller/pkg/ovn/egressfirewall.go +++ b/go-controller/pkg/ovn/egressfirewall.go @@ -25,7 +25,6 @@ import ( const ( egressFirewallAppliedCorrectly = "EgressFirewall Rules applied" egressFirewallAddError = "EgressFirewall Rules not correctly added" - egressFirewallUpdateError = "EgressFirewall Rules not correctly updated" ) type egressFirewall struct { @@ -96,8 +95,9 @@ func newEgressFirewallRule(rawEgressFirewallRule egressfirewallapi.EgressFirewal // For this it just deletes all the ACLs on the distributed join switch // NOTE: Utilize the fact that we know that all egress firewall related setup must have a priority: types.MinimumReservedEgressFirewallPriority <= priority <= types.EgressFirewallStartPriority -func (oc *Controller) syncEgressFirewall(egressFirewalls []interface{}) { +func (oc *Controller) syncEgressFirewall(egressFirewalls []interface{}) error { oc.syncWithRetry("syncEgressFirewall", func() error { return oc.syncEgressFirewallRetriable(egressFirewalls) }) + return nil } // This function implements the main body of work of what is described by syncEgressFirewall. @@ -189,7 +189,7 @@ func (oc *Controller) addEgressFirewall(egressFirewall *egressfirewallapi.Egress ef.Lock() defer ef.Unlock() // there should not be an item already in egressFirewall map for the given Namespace - if _, loaded := oc.egressFirewalls.LoadOrStore(egressFirewall.Namespace, ef); loaded { + if _, loaded := oc.egressFirewalls.Load(egressFirewall.Namespace); loaded { return fmt.Errorf("error attempting to add egressFirewall %s to namespace %s when it already has an egressFirewall", egressFirewall.Name, egressFirewall.Namespace) } @@ -223,27 +223,17 @@ func (oc *Controller) addEgressFirewall(egressFirewall *egressfirewallapi.Egress return fmt.Errorf("cannot Ensure that addressSet for namespace %s exists %v", egressFirewall.Namespace, err) } ipv4HashedAS, ipv6HashedAS := addressset.MakeAddressSetHashNames(egressFirewall.Namespace) - err = oc.addEgressFirewallRules(ef, ipv4HashedAS, ipv6HashedAS, types.EgressFirewallStartPriority) - if err != nil { + if err := oc.addEgressFirewallRules(ef, ipv4HashedAS, ipv6HashedAS, types.EgressFirewallStartPriority); err != nil { return err } - + oc.egressFirewalls.Store(egressFirewall.Namespace, ef) return nil } -func (oc *Controller) updateEgressFirewall(oldEgressFirewall, newEgressFirewall *egressfirewallapi.EgressFirewall) error { - updateErrors := oc.deleteEgressFirewall(oldEgressFirewall) - if updateErrors != nil { - return updateErrors - } - updateErrors = oc.addEgressFirewall(newEgressFirewall) - return updateErrors -} - func (oc *Controller) deleteEgressFirewall(egressFirewallObj *egressfirewallapi.EgressFirewall) error { klog.Infof("Deleting egress Firewall %s in namespace %s", egressFirewallObj.Name, egressFirewallObj.Namespace) deleteDNS := false - obj, loaded := oc.egressFirewalls.LoadAndDelete(egressFirewallObj.Namespace) + obj, loaded := oc.egressFirewalls.Load(egressFirewallObj.Namespace) if !loaded { return fmt.Errorf("there is no egressFirewall found in namespace %s", egressFirewallObj.Namespace) @@ -265,13 +255,19 @@ func (oc *Controller) deleteEgressFirewall(egressFirewallObj *egressfirewallapi. } } if deleteDNS { - oc.egressFirewallDNS.Delete(egressFirewallObj.Namespace) + if err := oc.egressFirewallDNS.Delete(egressFirewallObj.Namespace); err != nil { + return err + } } - return oc.deleteEgressFirewallRules(egressFirewallObj.Namespace) + if err := oc.deleteEgressFirewallRules(egressFirewallObj.Namespace); err != nil { + return err + } + oc.egressFirewalls.Delete(egressFirewallObj.Namespace) + return nil } -func (oc *Controller) updateEgressFirewallWithRetry(egressfirewall *egressfirewallapi.EgressFirewall) error { +func (oc *Controller) updateEgressFirewallStatusWithRetry(egressfirewall *egressfirewallapi.EgressFirewall) error { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { return oc.kube.UpdateEgressFirewall(egressfirewall) }) @@ -562,3 +558,7 @@ func getClusterSubnetsExclusion() string { } return exclusion } + +func getEgressFirewallNamespacedName(egressFirewall *egressfirewallapi.EgressFirewall) string { + return fmt.Sprintf("%v/%v", egressFirewall.Namespace, egressFirewall.Name) +} diff --git a/go-controller/pkg/ovn/egressfirewall_dns.go b/go-controller/pkg/ovn/egressfirewall_dns.go index 59ce8d92ce..a91ee27bc3 100644 --- a/go-controller/pkg/ovn/egressfirewall_dns.go +++ b/go-controller/pkg/ovn/egressfirewall_dns.go @@ -84,7 +84,7 @@ func (e *EgressDNS) Add(namespace, dnsName string) (addressset.AddressSet, error } -func (e *EgressDNS) Delete(namespace string) bool { +func (e *EgressDNS) Delete(namespace string) error { e.lock.Lock() var dnsNamesToDelete []string @@ -96,7 +96,7 @@ func (e *EgressDNS) Delete(namespace string) bool { // the dnsEntry appears in no other namespace, so delete the address_set err := dnsEntry.dnsAddressSet.Destroy() if err != nil { - klog.Errorf("Error deleting EgressFirewall AddressSet for dnsName: %s %v", dnsName, err) + return fmt.Errorf("error deleting EgressFirewall AddressSet for dnsName: %s %v", dnsName, err) } // the dnsEntry is no longer needed because nothing references it, so delete it delete(e.dnsEntries, dnsName) @@ -111,7 +111,7 @@ func (e *EgressDNS) Delete(namespace string) bool { // blocks only if Run() is busy updating its internal values) e.deleted <- name } - return len(e.dnsEntries) == 0 + return nil } func (e *EgressDNS) Update(dns string) (bool, error) { diff --git a/go-controller/pkg/ovn/egressfirewall_test.go b/go-controller/pkg/ovn/egressfirewall_test.go index 4940d88c39..3e76d69542 100644 --- a/go-controller/pkg/ovn/egressfirewall_test.go +++ b/go-controller/pkg/ovn/egressfirewall_test.go @@ -3,8 +3,8 @@ package ovn import ( "context" "fmt" - "net" + "time" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -692,9 +692,277 @@ var _ = ginkgo.Describe("OVN EgressFirewall Operations for local gateway mode", gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + ginkgo.It("correctly retries deleting an egressfirewall", func() { + app.Action = func(ctx *cli.Context) error { + const ( + node1Name string = "node1" + node2Name string = "node2" + ) - }) + nodeSwitch1 := &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + } + nodeSwitch2 := &nbdb.LogicalSwitch{ + UUID: node2Name + "-UUID", + Name: node2Name, + } + + dbSetup := libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + nodeSwitch1, + nodeSwitch2, + clusterRouter, + }, + } + namespace1 := *newNamespace("namespace1") + egressFirewall := newEgressFirewallObject("default", namespace1.Name, []egressfirewallapi.EgressFirewallRule{ + { + Type: "Allow", + Ports: []egressfirewallapi.EgressFirewallPort{ + { + Protocol: "TCP", + Port: 100, + }, + }, + To: egressfirewallapi.EgressFirewallDestination{ + CIDRSelector: "1.2.3.5/23", + }, + }, + }) + + fakeOVN.startWithDBSetup(dbSetup, + &egressfirewallapi.EgressFirewallList{ + Items: []egressfirewallapi.EgressFirewall{ + *egressFirewall, + }, + }, + &v1.NodeList{ + Items: []v1.Node{ + { + Status: v1.NodeStatus{ + Phase: v1.NodeRunning, + }, + ObjectMeta: newObjectMeta(node1Name, ""), + }, + { + Status: v1.NodeStatus{ + Phase: v1.NodeRunning, + }, + ObjectMeta: newObjectMeta(node2Name, ""), + }, + }, + }) + + fakeOVN.controller.WatchEgressFirewall() + + ipv4ACL := libovsdbops.BuildACL( + "", + t.DirectionToLPort, + t.EgressFirewallStartPriority, + "(ip4.dst == 1.2.3.5/23) && ip4.src == $a10481622940199974102 && ((tcp && ( tcp.dst == 100 ))) && ip4.dst != 10.128.0.0/14", + nbdb.ACLActionAllow, + "", + "", + false, + map[string]string{"egressFirewall": "namespace1"}, + nil, + ) + ipv4ACL.UUID = "ipv4ACL-UUID" + + // new ACL will be added to the switches + nodeSwitch1.ACLs = []string{ipv4ACL.UUID} + nodeSwitch2.ACLs = []string{ipv4ACL.UUID} + + expectedDatabaseState := []libovsdb.TestData{ + ipv4ACL, + nodeSwitch1, + nodeSwitch2, + clusterRouter, + } + + gomega.Expect(fakeOVN.nbClient).To(libovsdbtest.HaveData(expectedDatabaseState)) + + ginkgo.By("Bringing down NBDB") + // inject transient problem, nbdb is down + fakeOVN.controller.nbClient.Close() + gomega.Eventually(func() bool { + return fakeOVN.controller.nbClient.Connected() + }).Should(gomega.BeFalse()) + + err := fakeOVN.fakeClient.EgressFirewallClient.K8sV1().EgressFirewalls(egressFirewall.Namespace).Delete(context.TODO(), egressFirewall.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // sleep long enough for TransactWithRetry to fail, causing egress firewall Add to fail + time.Sleep(t.OVSDBTimeout + time.Second) + // check to see if the retry cache has an entry for this egress firewall + key := getEgressFirewallNamespacedName(egressFirewall) + gomega.Eventually(func() *retryObjEntry { + return fakeOVN.controller.retryEgressFirewalls.getObjRetryEntry(key) + }).ShouldNot(gomega.BeNil()) + retryEntry := fakeOVN.controller.retryEgressFirewalls.getObjRetryEntry(key) + ginkgo.By("retry entry new obj should be nil") + gomega.Expect(retryEntry.newObj).To(gomega.BeNil()) + ginkgo.By("retry entry old obj should not be nil") + gomega.Expect(retryEntry.oldObj).NotTo(gomega.BeNil()) + + connCtx, cancel := context.WithTimeout(context.Background(), t.OVSDBTimeout) + defer cancel() + resetNBClient(connCtx, fakeOVN.controller.nbClient) + fakeOVN.controller.retryEgressFirewalls.requestRetryObjs() + + // ACL should be removed from switches after egfw is deleted + nodeSwitch1.ACLs = []string{} + nodeSwitch2.ACLs = []string{} + expectedDatabaseState = []libovsdb.TestData{ + nodeSwitch1, + nodeSwitch2, + clusterRouter, + } + + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + // check the cache no longer has the entry + gomega.Eventually(func() *retryObjEntry { + return fakeOVN.controller.retryEgressFirewalls.getObjRetryEntry(key) + }).Should(gomega.BeNil()) + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.It("correctly retries adding and updating an egressfirewall", func() { + app.Action = func(ctx *cli.Context) error { + const ( + node1Name string = "node1" + ) + + InitialNodeSwitch := &nbdb.LogicalSwitch{ + UUID: node1Name + "-UUID", + Name: node1Name, + } + + dbSetup := libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + InitialNodeSwitch, + clusterRouter, + }, + } + + namespace1 := *newNamespace("namespace1") + egressFirewall := newEgressFirewallObject("default", namespace1.Name, []egressfirewallapi.EgressFirewallRule{ + { + Type: "Allow", + To: egressfirewallapi.EgressFirewallDestination{ + CIDRSelector: "1.2.3.4/23", + }, + }, + }) + egressFirewall1 := newEgressFirewallObject("default", namespace1.Name, []egressfirewallapi.EgressFirewallRule{ + { + Type: "Deny", + To: egressfirewallapi.EgressFirewallDestination{ + CIDRSelector: "1.2.3.4/23", + }, + }, + }) + + fakeOVN.startWithDBSetup(dbSetup, + &egressfirewallapi.EgressFirewallList{ + Items: []egressfirewallapi.EgressFirewall{ + *egressFirewall, + }, + }, + &v1.NamespaceList{ + Items: []v1.Namespace{ + namespace1, + }, + }, + &v1.NodeList{ + Items: []v1.Node{ + { + Status: v1.NodeStatus{ + Phase: v1.NodeRunning, + }, + ObjectMeta: newObjectMeta(node1Name, ""), + }, + }, + }) + + fakeOVN.controller.WatchNamespaces() + fakeOVN.controller.WatchEgressFirewall() + + ipv4ACL := libovsdbops.BuildACL( + "", + t.DirectionToLPort, + t.EgressFirewallStartPriority, + "(ip4.dst == 1.2.3.4/23) && ip4.src == $a10481622940199974102 && ip4.dst != 10.128.0.0/14", + nbdb.ACLActionAllow, + "", + "", + false, + map[string]string{"egressFirewall": "namespace1"}, + nil, + ) + ipv4ACL.UUID = "ipv4ACL-UUID" + + // new ACL will be added to the switch + finalNodeSwitch := &nbdb.LogicalSwitch{ + UUID: InitialNodeSwitch.UUID, + Name: InitialNodeSwitch.Name, + ACLs: []string{ipv4ACL.UUID}, + } + + // new ACL will be added to the switch + expectedDatabaseState := []libovsdb.TestData{ + ipv4ACL, + finalNodeSwitch, + clusterRouter, + } + + gomega.Expect(fakeOVN.nbClient).To(libovsdbtest.HaveData(expectedDatabaseState)) + ginkgo.By("Bringing down NBDB") + // inject transient problem, nbdb is down + fakeOVN.controller.nbClient.Close() + gomega.Eventually(func() bool { + return fakeOVN.controller.nbClient.Connected() + }).Should(gomega.BeFalse()) + + _, err := fakeOVN.fakeClient.EgressFirewallClient.K8sV1().EgressFirewalls(egressFirewall.Namespace).Get(context.TODO(), egressFirewall.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, err = fakeOVN.fakeClient.EgressFirewallClient.K8sV1().EgressFirewalls(egressFirewall1.Namespace).Update(context.TODO(), egressFirewall1, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // sleep long enough for TransactWithRetry to fail, causing egress firewall Add to fail + time.Sleep(t.OVSDBTimeout + time.Second) + // check to see if the retry cache has an entry for this egress firewall + key := getEgressFirewallNamespacedName(egressFirewall) + gomega.Eventually(func() *retryObjEntry { + return fakeOVN.controller.retryEgressFirewalls.getObjRetryEntry(key) + }).ShouldNot(gomega.BeNil()) + retryEntry := fakeOVN.controller.retryEgressFirewalls.getObjRetryEntry(key) + ginkgo.By("retry entry new obj should not be nil") + gomega.Expect(retryEntry.newObj).NotTo(gomega.BeNil()) + ginkgo.By("retry entry old obj should not be nil") + gomega.Expect(retryEntry.oldObj).NotTo(gomega.BeNil()) + connCtx, cancel := context.WithTimeout(context.Background(), t.OVSDBTimeout) + defer cancel() + resetNBClient(connCtx, fakeOVN.controller.nbClient) + fakeOVN.controller.retryEgressFirewalls.requestRetryObjs() + // check the cache no longer has the entry + gomega.Eventually(func() *retryObjEntry { + return fakeOVN.controller.retryEgressFirewalls.getObjRetryEntry(key) + }).Should(gomega.BeNil()) + ipv4ACL.Action = nbdb.ACLActionDrop + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + }) + }) }) var _ = ginkgo.Describe("OVN EgressFirewall Operations for shared gateway mode", func() { @@ -1295,7 +1563,6 @@ var _ = ginkgo.Describe("OVN EgressFirewall Operations for shared gateway mode", gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - }) }) diff --git a/go-controller/pkg/ovn/egressqos.go b/go-controller/pkg/ovn/egressqos.go new file mode 100644 index 0000000000..c67b22f116 --- /dev/null +++ b/go-controller/pkg/ovn/egressqos.go @@ -0,0 +1,946 @@ +package ovn + +import ( + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + egressqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + egressqosinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/egressqos/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdbops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/pkg/errors" + kapi "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + v1coreinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" +) + +const ( + maxEgressQoSRetries = 10 + defaultEgressQoSName = "default" + EgressQoSFlowStartPriority = 1000 + rulePriorityDelimeter = "-" +) + +type egressQoS struct { + sync.RWMutex + name string + namespace string + rules []*egressQoSRule + stale bool +} + +type egressQoSRule struct { + priority int + dscp int + destination string + addrSet addressset.AddressSet + pods *sync.Map // pods name -> ips in the addrSet + podSelector metav1.LabelSelector +} + +// shallow copies the EgressQoS object provided. +func (oc *Controller) cloneEgressQoS(raw *egressqosapi.EgressQoS) (*egressQoS, error) { + eq := &egressQoS{ + name: raw.Name, + namespace: raw.Namespace, + rules: make([]*egressQoSRule, 0), + } + + if len(raw.Spec.Egress) > EgressQoSFlowStartPriority { + return nil, fmt.Errorf("cannot create EgressQoS with %d rules - maximum is %d", len(raw.Spec.Egress), EgressQoSFlowStartPriority) + } + + addErrors := errors.New("") + for i, rule := range raw.Spec.Egress { + eqr, err := oc.cloneEgressQoSRule(rule, EgressQoSFlowStartPriority-i) + if err != nil { + dst := "any" + if rule.DstCIDR != nil { + dst = *rule.DstCIDR + } + addErrors = errors.Wrapf(addErrors, "error: cannot create egressqos Rule to destination %s for namespace %s - %v", + dst, eq.namespace, err) + continue + } + eq.rules = append(eq.rules, eqr) + } + + if addErrors.Error() == "" { + addErrors = nil + } + + return eq, addErrors +} + +// shallow copies the EgressQoSRule object provided. +func (oc *Controller) cloneEgressQoSRule(raw egressqosapi.EgressQoSRule, priority int) (*egressQoSRule, error) { + dst := "" + if raw.DstCIDR != nil { + _, _, err := net.ParseCIDR(*raw.DstCIDR) + if err != nil { + return nil, err + } + dst = *raw.DstCIDR + } + + _, err := metav1.LabelSelectorAsSelector(&raw.PodSelector) + if err != nil { + return nil, err + } + + eqr := &egressQoSRule{ + priority: priority, + dscp: raw.DSCP, + destination: dst, + podSelector: raw.PodSelector, + } + + return eqr, nil +} + +func (oc *Controller) createASForEgressQoSRule(podSelector metav1.LabelSelector, namespace string, priority int) (addressset.AddressSet, *sync.Map, error) { + var addrSet addressset.AddressSet + + selector, _ := metav1.LabelSelectorAsSelector(&podSelector) + if selector.Empty() { // empty selector means that the rule applies to all pods in the namespace + addrSet, err := oc.addressSetFactory.EnsureAddressSet(namespace) + if err != nil { + return nil, nil, fmt.Errorf("cannot ensure that addressSet for namespace %s exists %v", namespace, err) + } + return addrSet, nil, nil + } + + podsCache := sync.Map{} + + pods, err := oc.watchFactory.GetPodsBySelector(namespace, podSelector) + if err != nil { + return nil, nil, err + } + + addrSet, err = oc.addressSetFactory.EnsureAddressSet(fmt.Sprintf("%s%s%s%d", types.EgressQoSRulePrefix, namespace, rulePriorityDelimeter, priority)) + if err != nil { + return nil, nil, err + } + + podsIps := []net.IP{} + for _, pod := range pods { + // we don't handle HostNetworked or completed pods + if util.PodWantsNetwork(pod) && !util.PodCompleted(pod) { + podIPs, err := util.GetAllPodIPs(pod) + if err != nil { + return nil, nil, err + } + podsCache.Store(pod.Name, podIPs) + podsIps = append(podsIps, podIPs...) + } + } + err = addrSet.SetIPs(podsIps) + if err != nil { + return nil, nil, err + } + + return addrSet, &podsCache, nil +} + +// initEgressQoSController initializes the EgressQoS controller. +func (oc *Controller) initEgressQoSController( + eqInformer egressqosinformer.EgressQoSInformer, + podInformer v1coreinformers.PodInformer, + nodeInformer v1coreinformers.NodeInformer) { + klog.Info("Setting up event handlers for EgressQoS") + oc.egressQoSLister = eqInformer.Lister() + oc.egressQoSSynced = eqInformer.Informer().HasSynced + oc.egressQoSQueue = workqueue.NewNamedRateLimitingQueue( + workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), + "egressqos", + ) + eqInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: oc.onEgressQoSAdd, + UpdateFunc: oc.onEgressQoSUpdate, + DeleteFunc: oc.onEgressQoSDelete, + }) + + oc.egressQoSPodLister = podInformer.Lister() + oc.egressQoSPodSynced = podInformer.Informer().HasSynced + oc.egressQoSPodQueue = workqueue.NewNamedRateLimitingQueue( + workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), + "egressqospods", + ) + podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: oc.onEgressQoSPodAdd, + UpdateFunc: oc.onEgressQoSPodUpdate, + DeleteFunc: oc.onEgressQoSPodDelete, + }) + + oc.egressQoSNodeLister = nodeInformer.Lister() + oc.egressQoSNodeSynced = nodeInformer.Informer().HasSynced + oc.egressQoSNodeQueue = workqueue.NewNamedRateLimitingQueue( + workqueue.NewItemFastSlowRateLimiter(1*time.Second, 5*time.Second, 5), + "egressqosnodes", + ) + nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: oc.onEgressQoSNodeAdd, // we only care about new logical switches being added + UpdateFunc: func(o, n interface{}) {}, + DeleteFunc: func(obj interface{}) {}, + }) +} + +func (oc *Controller) runEgressQoSController(threadiness int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + klog.Infof("Starting EgressQoS Controller") + + if !cache.WaitForNamedCacheSync("egressqosnodes", stopCh, oc.egressQoSNodeSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + klog.Infof("Synchronization failed") + return + } + + if !cache.WaitForNamedCacheSync("egressqospods", stopCh, oc.egressQoSPodSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + klog.Infof("Synchronization failed") + return + } + + if !cache.WaitForNamedCacheSync("egressqos", stopCh, oc.egressQoSSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + klog.Infof("Synchronization failed") + return + } + + klog.Infof("Repairing EgressQoSes") + err := oc.repairEgressQoSes() + if err != nil { + klog.Errorf("Failed to delete stale EgressQoS entries: %v", err) + } + + wg := &sync.WaitGroup{} + for i := 0; i < threadiness; i++ { + wg.Add(1) + go func() { + defer wg.Done() + wait.Until(func() { + oc.runEgressQoSWorker(wg) + }, time.Second, stopCh) + }() + } + + for i := 0; i < threadiness; i++ { + wg.Add(1) + go func() { + defer wg.Done() + wait.Until(func() { + oc.runEgressQoSPodWorker(wg) + }, time.Second, stopCh) + }() + } + + for i := 0; i < threadiness; i++ { + wg.Add(1) + go func() { + defer wg.Done() + wait.Until(func() { + oc.runEgressQoSNodeWorker(wg) + }, time.Second, stopCh) + }() + } + + // wait until we're told to stop + <-stopCh + + klog.Infof("Shutting down EgressQoS controller") + oc.egressQoSQueue.ShutDown() + oc.egressQoSPodQueue.ShutDown() + oc.egressQoSNodeQueue.ShutDown() + + wg.Wait() +} + +// onEgressQoSAdd queues the EgressQoS for processing. +func (oc *Controller) onEgressQoSAdd(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(4).Infof("Adding EgressQoS %s", key) + oc.egressQoSQueue.Add(key) +} + +// onEgressQoSUpdate queues the EgressQoS for processing. +func (oc *Controller) onEgressQoSUpdate(oldObj, newObj interface{}) { + oldEQ := oldObj.(*egressqosapi.EgressQoS) + newEQ := newObj.(*egressqosapi.EgressQoS) + + if oldEQ.ResourceVersion == newEQ.ResourceVersion || + !newEQ.GetDeletionTimestamp().IsZero() { + return + } + + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err == nil { + oc.egressQoSQueue.Add(key) + } +} + +// onEgressQoSDelete queues the EgressQoS for processing. +func (oc *Controller) onEgressQoSDelete(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(4).Infof("Deleting EgressQoS %s", key) + oc.egressQoSQueue.Add(key) +} + +func (oc *Controller) runEgressQoSWorker(wg *sync.WaitGroup) { + for oc.processNextEgressQoSWorkItem(wg) { + } +} + +func (oc *Controller) processNextEgressQoSWorkItem(wg *sync.WaitGroup) bool { + wg.Add(1) + defer wg.Done() + + key, quit := oc.egressQoSQueue.Get() + if quit { + return false + } + + defer oc.egressQoSQueue.Done(key) + + err := oc.syncEgressQoS(key.(string)) + if err == nil { + oc.egressQoSQueue.Forget(key) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", key, err)) + + if oc.egressQoSQueue.NumRequeues(key) < maxEgressQoSRetries { + oc.egressQoSQueue.AddRateLimited(key) + return true + } + + oc.egressQoSQueue.Forget(key) + return true +} + +// This takes care of syncing stale data which we might have in OVN if +// there's no ovnkube-master running for a while. +// It deletes all QoSes and Address Sets from OVN that belong to deleted EgressQoSes. +func (oc *Controller) repairEgressQoSes() error { + startTime := time.Now() + klog.V(4).Infof("Starting repairing loop for egressqos") + defer func() { + klog.V(4).Infof("Finished repairing loop for egressqos: %v", time.Since(startTime)) + }() + + existing, err := oc.egressQoSLister.List(labels.Everything()) + if err != nil { + return err + } + + nsWithQoS := map[string]bool{} + for _, q := range existing { + nsWithQoS[q.Namespace] = true + } + + p := func(q *nbdb.QoS) bool { + ns, ok := q.ExternalIDs["EgressQoS"] + if !ok { + return false + } + + return !nsWithQoS[ns] + } + existingQoSes, err := libovsdbops.FindQoSesWithPredicate(oc.nbClient, p) + if err != nil { + return err + } + + if len(existingQoSes) > 0 { + allOps := []ovsdb.Operation{} + + ops, err := libovsdbops.DeleteQoSesOps(oc.nbClient, nil, existingQoSes...) + if err != nil { + return err + } + allOps = append(allOps, ops...) + + logicalSwitches, err := oc.egressQoSSwitches() + if err != nil { + return err + } + + for _, sw := range logicalSwitches { + ops, err := libovsdbops.RemoveQoSesFromLogicalSwitchOps(oc.nbClient, nil, sw, existingQoSes...) + if err != nil { + return err + } + allOps = append(allOps, ops...) + } + + if _, err := libovsdbops.TransactAndCheck(oc.nbClient, allOps); err != nil { + return fmt.Errorf("unable to remove stale qoses, err: %v", err) + } + } + + asPredicate := func(as *nbdb.AddressSet) bool { + if !strings.HasPrefix(as.ExternalIDs["name"], types.EgressQoSRulePrefix) { + return false + } + + // we extract the namespace from the id by removing the prefix and the priority suffix + // egress-qos-pods-my-namespace-123 -> my-namespace + ns := strings.TrimPrefix(as.ExternalIDs["name"], types.EgressQoSRulePrefix) + ns = ns[:strings.LastIndex(ns, rulePriorityDelimeter)] + return !nsWithQoS[ns] + } + if err := libovsdbops.DeleteAddressSetsWithPredicate(oc.nbClient, asPredicate); err != nil { + return fmt.Errorf("failed to remove stale egress qos address sets, err: %v", err) + } + + return nil +} + +func (oc *Controller) syncEgressQoS(key string) error { + startTime := time.Now() + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + klog.Infof("Processing sync for EgressQoS %s/%s", namespace, name) + + defer func() { + klog.V(4).Infof("Finished syncing EgressQoS %s on namespace %s : %v", name, namespace, time.Since(startTime)) + }() + + eq, err := oc.egressQoSLister.EgressQoSes(namespace).Get(name) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + if name != defaultEgressQoSName { + klog.Errorf("EgressQoS name %s is invalid, must be %s", name, defaultEgressQoSName) + return nil // Return nil to avoid requeues + } + + // TODO: we should reconcile better by cleaning and creating in one transaction. + // that should minimize the window of lost DSCP markings on packets. + err = oc.cleanEgressQoSNS(namespace) + if err != nil { + return fmt.Errorf("unable to delete EgressQoS %s/%s, err: %v", namespace, name, err) + } + + if eq == nil { // it was deleted no need to process further + return nil + } + + klog.V(5).Infof("EgressQoS %s retrieved from lister: %v", eq.Name, eq) + + return oc.addEgressQoS(eq) +} + +func (oc *Controller) cleanEgressQoSNS(namespace string) error { + obj, loaded := oc.egressQoSCache.Load(namespace) + if !loaded { + // the namespace is clean + klog.V(4).Infof("EgressQoS for namespace %s not found in cache", namespace) + return nil + } + + eq := obj.(*egressQoS) + + eq.Lock() + defer eq.Unlock() + + p := func(q *nbdb.QoS) bool { + eqNs, ok := q.ExternalIDs["EgressQoS"] + if !ok { // the QoS is not managed by an EgressQoS + return false + } + return eqNs == eq.namespace + } + existingQoSes, err := libovsdbops.FindQoSesWithPredicate(oc.nbClient, p) + if err != nil { + return err + } + + if len(existingQoSes) > 0 { + allOps := []ovsdb.Operation{} + + ops, err := libovsdbops.DeleteQoSesOps(oc.nbClient, nil, existingQoSes...) + if err != nil { + return err + } + allOps = append(allOps, ops...) + + logicalSwitches, err := oc.egressQoSSwitches() + if err != nil { + return err + } + + for _, sw := range logicalSwitches { + ops, err := libovsdbops.RemoveQoSesFromLogicalSwitchOps(oc.nbClient, nil, sw, existingQoSes...) + if err != nil { + return err + } + allOps = append(allOps, ops...) + } + + if _, err := libovsdbops.TransactAndCheck(oc.nbClient, allOps); err != nil { + return fmt.Errorf("failed to delete qos, err: %s", err) + } + } + + asPredicate := func(as *nbdb.AddressSet) bool { + return strings.HasPrefix(as.ExternalIDs["name"], types.EgressQoSRulePrefix+eq.namespace) + } + if err := libovsdbops.DeleteAddressSetsWithPredicate(oc.nbClient, asPredicate); err != nil { + return fmt.Errorf("failed to remove egress qos address sets, err: %v", err) + } + + // we can delete the object from the cache now. + // we also mark it as stale to prevent pod processing if RLock + // acquired after removal from cache. + oc.egressQoSCache.Delete(namespace) + eq.stale = true + + return nil +} + +func (oc *Controller) addEgressQoS(eqObj *egressqosapi.EgressQoS) error { + eq, err := oc.cloneEgressQoS(eqObj) + if err != nil { + return err + } + + eq.Lock() + defer eq.Unlock() + eq.stale = true // until we finish processing successfully + + // there should not be an item in the cache for the given namespace + // as we first attempt to delete before create. + if _, loaded := oc.egressQoSCache.LoadOrStore(eq.namespace, eq); loaded { + return fmt.Errorf("error attempting to add egressQoS %s to namespace %s when it already has an EgressQoS", + eq.name, eq.namespace) + } + + for _, rule := range eq.rules { + rule.addrSet, rule.pods, err = oc.createASForEgressQoSRule(rule.podSelector, eq.namespace, rule.priority) + if err != nil { + return err + } + } + + logicalSwitches, err := oc.egressQoSSwitches() + if err != nil { + return err + } + + allOps := []ovsdb.Operation{} + qoses := []*nbdb.QoS{} + for _, r := range eq.rules { + hashedIPv4, hashedIPv6 := r.addrSet.GetASHashNames() + match := generateEgressQoSMatch(r, hashedIPv4, hashedIPv6) + qos := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: match, + Priority: r.priority, + Action: map[string]int{nbdb.QoSActionDSCP: r.dscp}, + ExternalIDs: map[string]string{"EgressQoS": eq.namespace}, + } + qoses = append(qoses, qos) + } + + ops, err := libovsdbops.CreateOrUpdateQoSesOps(oc.nbClient, nil, qoses...) + if err != nil { + return err + } + allOps = append(allOps, ops...) + + for _, sw := range logicalSwitches { + ops, err := libovsdbops.AddQoSesToLogicalSwitchOps(oc.nbClient, nil, sw, qoses...) + if err != nil { + return err + } + allOps = append(allOps, ops...) + } + + if _, err := libovsdbops.TransactAndCheck(oc.nbClient, allOps); err != nil { + return fmt.Errorf("failed to create qos, err: %s", err) + } + + eq.stale = false // we can mark it as "ready" now + return nil +} + +func generateEgressQoSMatch(eq *egressQoSRule, hashedAddressSetNameIPv4, hashedAddressSetNameIPv6 string) string { + var src string + var dst string + + switch { + case config.IPv4Mode && config.IPv6Mode: + src = fmt.Sprintf("(ip4.src == $%s || ip6.src == $%s)", hashedAddressSetNameIPv4, hashedAddressSetNameIPv6) + case config.IPv4Mode: + src = fmt.Sprintf("ip4.src == $%s", hashedAddressSetNameIPv4) + case config.IPv6Mode: + src = fmt.Sprintf("ip6.src == $%s", hashedAddressSetNameIPv6) + } + + dst = "ip4.dst == 0.0.0.0/0 || ip6.dst == ::/0" // if the dstCIDR field was not set we treat it as "any" destination + if eq.destination != "" { + dst = fmt.Sprintf("ip4.dst == %s", eq.destination) + if utilnet.IsIPv6CIDRString(eq.destination) { + dst = fmt.Sprintf("ip6.dst == %s", eq.destination) + } + } + + return fmt.Sprintf("(%s) && %s", dst, src) +} + +func (oc *Controller) egressQoSSwitches() ([]string, error) { + logicalSwitches := []string{} + + // Find all node switches + p := func(item *nbdb.LogicalSwitch) bool { + // Ignore external and Join switches(both legacy and current) + return !(strings.HasPrefix(item.Name, types.JoinSwitchPrefix) || item.Name == "join" || strings.HasPrefix(item.Name, types.ExternalSwitchPrefix)) + } + + nodeLocalSwitches, err := libovsdbops.FindLogicalSwitchesWithPredicate(oc.nbClient, p) + if err != nil { + return nil, fmt.Errorf("unable to fetch local switches for EgressQoS, err: %v", err) + } + + for _, nodeLocalSwitch := range nodeLocalSwitches { + logicalSwitches = append(logicalSwitches, nodeLocalSwitch.Name) + } + + return logicalSwitches, nil +} + +type mapOp int + +const ( + mapInsert mapOp = iota + mapDelete +) + +type mapAndOp struct { + m *sync.Map + op mapOp +} + +func (oc *Controller) syncEgressQoSPod(key string) error { + startTime := time.Now() + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + + klog.Infof("Processing sync for EgressQoS pod %s/%s", namespace, name) + + defer func() { + klog.V(4).Infof("Finished syncing EgressQoS pod %s on namespace %s : %v", name, namespace, time.Since(startTime)) + }() + + obj, loaded := oc.egressQoSCache.Load(namespace) + if !loaded { // no EgressQoS in the namespace + return nil + } + + eq := obj.(*egressQoS) + eq.RLock() // allow multiple pods to sync + defer eq.RUnlock() + if eq.stale { // was deleted or not created properly + return nil + } + + pod, err := oc.egressQoSPodLister.Pods(namespace).Get(name) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + allOps := []ovsdb.Operation{} + + // on delete/complete we remove the pod from the relevant address sets + if pod == nil || util.PodCompleted(pod) { + podsCaches := []*sync.Map{} + for _, rule := range eq.rules { + obj, loaded := rule.pods.Load(name) + if !loaded { + continue + } + ips := obj.([]net.IP) + ops, err := rule.addrSet.DeleteIPsReturnOps(ips) + if err != nil { + return err + } + podsCaches = append(podsCaches, rule.pods) + allOps = append(allOps, ops...) + } + _, err = libovsdbops.TransactAndCheck(oc.nbClient, allOps) + if err != nil { + return err + } + + for _, pc := range podsCaches { + pc.Delete(name) + } + + return nil + } + + klog.V(5).Infof("Pod %s retrieved from lister: %v", pod.Name, pod) + + if !util.PodWantsNetwork(pod) { // we don't handle HostNetworked pods + return nil + } + + podIPs, err := util.GetAllPodIPs(pod) + if errors.Is(err, util.ErrNoPodIPFound) { + return nil // reprocess it when it is updated with an IP + } + if err != nil { + return err + } + + podLabels := labels.Set(pod.Labels) + podMapOps := []mapAndOp{} + for _, r := range eq.rules { + selector, _ := metav1.LabelSelectorAsSelector(&r.podSelector) + if selector.Empty() { // rule applies to all pods in the namespace, no need to modify address set + continue + } + + _, loaded := r.pods.Load(pod.Name) + if selector.Matches(podLabels) && !loaded { + ops, err := r.addrSet.AddIPsReturnOps(podIPs) + if err != nil { + return err + } + allOps = append(allOps, ops...) + podMapOps = append(podMapOps, mapAndOp{r.pods, mapInsert}) + } else if !selector.Matches(podLabels) && loaded { + ops, err := r.addrSet.DeleteIPsReturnOps(podIPs) + if err != nil { + return err + } + allOps = append(allOps, ops...) + podMapOps = append(podMapOps, mapAndOp{r.pods, mapDelete}) + } + } + + _, err = libovsdbops.TransactAndCheck(oc.nbClient, allOps) + if err != nil { + return err + } + + for _, mapOp := range podMapOps { + switch mapOp.op { + case mapInsert: + mapOp.m.Store(pod.Name, podIPs) + case mapDelete: + mapOp.m.Delete(pod.Name) + } + } + + return nil +} + +// onEgressQoSPodAdd queues the pod for processing. +func (oc *Controller) onEgressQoSPodAdd(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(4).Infof("Adding EgressQoS pod %s", key) + oc.egressQoSPodQueue.Add(key) +} + +// onEgressQoSPodUpdate queues the pod for processing. +func (oc *Controller) onEgressQoSPodUpdate(oldObj, newObj interface{}) { + oldPod := oldObj.(*kapi.Pod) + newPod := newObj.(*kapi.Pod) + + if oldPod.ResourceVersion == newPod.ResourceVersion || + !newPod.GetDeletionTimestamp().IsZero() { + return + } + + oldPodLabels := labels.Set(oldPod.Labels) + newPodLabels := labels.Set(newPod.Labels) + oldPodIPs, _ := util.GetAllPodIPs(oldPod) + newPodIPs, _ := util.GetAllPodIPs(newPod) + if labels.Equals(oldPodLabels, newPodLabels) && + len(oldPodIPs) == len(newPodIPs) { + return + } + + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", newObj, err)) + return + } + + oc.egressQoSPodQueue.Add(key) +} + +func (oc *Controller) onEgressQoSPodDelete(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(4).Infof("Deleting EgressQoS Pod %s", key) + oc.egressQoSPodQueue.Add(key) +} + +func (oc *Controller) runEgressQoSPodWorker(wg *sync.WaitGroup) { + for oc.processNextEgressQoSPodWorkItem(wg) { + } +} + +func (oc *Controller) processNextEgressQoSPodWorkItem(wg *sync.WaitGroup) bool { + wg.Add(1) + defer wg.Done() + key, quit := oc.egressQoSPodQueue.Get() + if quit { + return false + } + defer oc.egressQoSPodQueue.Done(key) + + err := oc.syncEgressQoSPod(key.(string)) + if err == nil { + oc.egressQoSPodQueue.Forget(key) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", key, err)) + + if oc.egressQoSPodQueue.NumRequeues(key) < maxEgressQoSRetries { + oc.egressQoSPodQueue.AddRateLimited(key) + return true + } + + oc.egressQoSPodQueue.Forget(key) + return true +} + +// onEgressQoSAdd queues the node for processing. +func (oc *Controller) onEgressQoSNodeAdd(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + klog.V(4).Infof("Adding EgressQoS node %s", key) + oc.egressQoSNodeQueue.Add(key) +} + +func (oc *Controller) runEgressQoSNodeWorker(wg *sync.WaitGroup) { + for oc.processNextEgressQoSNodeWorkItem(wg) { + } +} + +func (oc *Controller) processNextEgressQoSNodeWorkItem(wg *sync.WaitGroup) bool { + wg.Add(1) + defer wg.Done() + key, quit := oc.egressQoSNodeQueue.Get() + if quit { + return false + } + defer oc.egressQoSNodeQueue.Done(key) + + err := oc.syncEgressQoSNode(key.(string)) + if err == nil { + oc.egressQoSNodeQueue.Forget(key) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", key, err)) + + if oc.egressQoSNodeQueue.NumRequeues(key) < maxEgressQoSRetries { + oc.egressQoSNodeQueue.AddRateLimited(key) + return true + } + + oc.egressQoSNodeQueue.Forget(key) + return true +} + +func (oc *Controller) syncEgressQoSNode(key string) error { + startTime := time.Now() + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + klog.Infof("Processing sync for EgressQoS node %s", name) + + defer func() { + klog.V(4).Infof("Finished syncing EgressQoS node %s : %v", name, time.Since(startTime)) + }() + + n, err := oc.egressQoSNodeLister.Get(name) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + if n == nil { // we don't process node deletions, its logical switch will be deleted. + return nil + } + + klog.V(5).Infof("EgressQoS %s node retrieved from lister: %v", n.Name, n) + + nodeSw := &nbdb.LogicalSwitch{ + Name: n.Name, + } + nodeSw, err = libovsdbops.GetLogicalSwitch(oc.nbClient, nodeSw) + if err != nil { + return err + } + + p := func(q *nbdb.QoS) bool { + _, ok := q.ExternalIDs["EgressQoS"] + return ok + } + existingQoSes, err := libovsdbops.FindQoSesWithPredicate(oc.nbClient, p) + if err != nil { + return err + } + + if len(existingQoSes) == 0 { + return nil + } + + ops, err := libovsdbops.AddQoSesToLogicalSwitchOps(oc.nbClient, nil, nodeSw.Name, existingQoSes...) + if err != nil { + return err + } + + if _, err := libovsdbops.TransactAndCheck(oc.nbClient, ops); err != nil { + return fmt.Errorf("unable to add existing qoses to new node, err: %v", err) + } + + return nil +} diff --git a/go-controller/pkg/ovn/egressqos_test.go b/go-controller/pkg/ovn/egressqos_test.go new file mode 100644 index 0000000000..d220e8979e --- /dev/null +++ b/go-controller/pkg/ovn/egressqos_test.go @@ -0,0 +1,575 @@ +package ovn + +import ( + "context" + "fmt" + "net" + + "github.com/onsi/ginkgo" + ginkgotable "github.com/onsi/ginkgo/extensions/table" + "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + egressqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdbops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + "github.com/urfave/cli/v2" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" +) + +func newEgressQoSObject(name, namespace string, egressRules []egressqosapi.EgressQoSRule) *egressqosapi.EgressQoS { + return &egressqosapi.EgressQoS{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: egressqosapi.EgressQoSSpec{ + Egress: egressRules, + }, + } +} + +var _ = ginkgo.Describe("OVN EgressQoS Operations", func() { + var ( + app *cli.App + fakeOVN *FakeOVN + ) + const ( + node1Name string = "node1" + node2Name string = "node2" + ) + + ginkgo.BeforeEach(func() { + // Restore global default values before each testcase + config.PrepareTestConfig() + config.OVNKubernetesFeature.EnableEgressQoS = true + + app = cli.NewApp() + app.Name = "test" + app.Flags = config.Flags + + fakeOVN = NewFakeOVN() + }) + + ginkgo.AfterEach(func() { + fakeOVN.shutdown() + }) + + ginkgotable.DescribeTable("reconciles existing and non-existing egressqoses without PodSelectors", + func(ipv4Mode, ipv6Mode bool, dst1, dst2, match1, match2 string) { + app.Action = func(ctx *cli.Context) error { + config.IPv4Mode = ipv4Mode + config.IPv6Mode = ipv6Mode + namespaceT := *newNamespace("namespace1") + + staleQoS := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: "some-match", + Priority: EgressQoSFlowStartPriority, + Action: map[string]int{nbdb.QoSActionDSCP: 50}, + ExternalIDs: map[string]string{"EgressQoS": "staleNS"}, + UUID: "staleQoS-UUID", + } + + staleAddrSet := &nbdb.AddressSet{ + Name: "egress-qos-pods-staleNS", + ExternalIDs: map[string]string{"name": "egress-qos-pods-staleNS-1000"}, + UUID: "staleAS-UUID", + Addresses: []string{"1.2.3.4"}, + } + + node1Switch := &nbdb.LogicalSwitch{ + UUID: "node1-UUID", + Name: node1Name, + QOSRules: []string{staleQoS.UUID}, + } + + node2Switch := &nbdb.LogicalSwitch{ + UUID: "node2-UUID", + Name: node2Name, + } + + joinSwitch := &nbdb.LogicalSwitch{ + UUID: "join-UUID", + Name: types.OVNJoinSwitch, + QOSRules: []string{staleQoS.UUID}, + } + + dbSetup := libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + staleQoS, + staleAddrSet, + node1Switch, + node2Switch, + joinSwitch, + }, + } + + fakeOVN.startWithDBSetup(dbSetup, + &v1.NamespaceList{ + Items: []v1.Namespace{ + namespaceT, + }, + }, + ) + + // Create one EgressQoS + eq := newEgressQoSObject("default", namespaceT.Name, []egressqosapi.EgressQoSRule{ + { + DstCIDR: &dst1, + DSCP: 50, + }, + { + DstCIDR: &dst2, + DSCP: 60, + }, + }) + eq.ResourceVersion = "1" + _, err := fakeOVN.fakeClient.EgressQoSClient.K8sV1().EgressQoSes(namespaceT.Name).Create(context.TODO(), eq, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + fakeOVN.InitAndRunEgressQoSController() + + qos1 := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: match1, + Priority: EgressQoSFlowStartPriority, + Action: map[string]int{nbdb.QoSActionDSCP: 50}, + ExternalIDs: map[string]string{"EgressQoS": namespaceT.Name}, + UUID: "qos1-UUID", + } + qos2 := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: match2, + Priority: EgressQoSFlowStartPriority - 1, + Action: map[string]int{nbdb.QoSActionDSCP: 60}, + ExternalIDs: map[string]string{"EgressQoS": namespaceT.Name}, + UUID: "qos2-UUID", + } + node1Switch.QOSRules = []string{qos1.UUID, qos2.UUID} + node2Switch.QOSRules = []string{qos1.UUID, qos2.UUID} + expectedDatabaseState := []libovsdbtest.TestData{ + qos1, + qos2, + node1Switch, + node2Switch, + joinSwitch, + } + + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) + + // Update the EgressQoS + eq.Spec.Egress = []egressqosapi.EgressQoSRule{ + { + DstCIDR: &dst1, + DSCP: 40, + }, + } + eq.ResourceVersion = "2" + _, err = fakeOVN.fakeClient.EgressQoSClient.K8sV1().EgressQoSes(namespaceT.Name).Update(context.TODO(), eq, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + qos3 := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: match1, + Priority: EgressQoSFlowStartPriority, + Action: map[string]int{nbdb.QoSActionDSCP: 40}, + ExternalIDs: map[string]string{"EgressQoS": namespaceT.Name}, + UUID: "qos3-UUID", + } + node1Switch.QOSRules = []string{qos3.UUID} + node2Switch.QOSRules = []string{qos3.UUID} + expectedDatabaseState = []libovsdbtest.TestData{ + qos3, + node1Switch, + node2Switch, + joinSwitch, + } + + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) + + // Delete the EgressQoS + err = fakeOVN.fakeClient.EgressQoSClient.K8sV1().EgressQoSes(namespaceT.Name).Delete(context.TODO(), eq.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + node1Switch.QOSRules = []string{} + node2Switch.QOSRules = []string{} + expectedDatabaseState = []libovsdbtest.TestData{ + node1Switch, + node2Switch, + joinSwitch, + } + + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) + + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }, + ginkgotable.Entry("ipv4", true, false, "1.2.3.4/32", "5.6.7.8/32", + "(ip4.dst == 1.2.3.4/32) && ip4.src == $a10481622940199974102", + "(ip4.dst == 5.6.7.8/32) && ip4.src == $a10481622940199974102"), + ginkgotable.Entry("ipv6", false, true, "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", + "(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7334/128) && ip6.src == $a10481620741176717680", + "(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7335/128) && ip6.src == $a10481620741176717680"), + ginkgotable.Entry("dual", true, true, "1.2.3.4/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", + "(ip4.dst == 1.2.3.4/32) && (ip4.src == $a10481622940199974102 || ip6.src == $a10481620741176717680)", + "(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7335/128) && (ip4.src == $a10481622940199974102 || ip6.src == $a10481620741176717680)"), + ) + + ginkgotable.DescribeTable("reconciles existing and non-existing egressqoses with PodSelectors", + func(ipv4Mode, ipv6Mode bool, dst1, dst2, match1, match2 string) { + app.Action = func(ctx *cli.Context) error { + config.IPv4Mode = ipv4Mode + config.IPv6Mode = ipv6Mode + namespaceT := *newNamespace("namespace1") + + staleQoS := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: "some-match", + Priority: EgressQoSFlowStartPriority, + Action: map[string]int{nbdb.QoSActionDSCP: 50}, + ExternalIDs: map[string]string{"EgressQoS": "staleNS"}, + UUID: "staleQoS-UUID", + } + + staleAddrSet := &nbdb.AddressSet{ + Name: "egress-qos-pods-staleNS", + ExternalIDs: map[string]string{"name": "egress-qos-pods-staleNS-1000"}, + UUID: "staleAS-UUID", + Addresses: []string{"1.2.3.4"}, + } + + podT := newPodWithLabels( + namespaceT.Name, + "myPod", + node1Name, + "10.128.1.3", + map[string]string{"app": "nice"}, + ) + + node1Switch := &nbdb.LogicalSwitch{ + UUID: node1Name, + Name: node1Name, + QOSRules: []string{staleQoS.UUID}, + } + + node2Switch := &nbdb.LogicalSwitch{ + UUID: node2Name, + Name: node2Name, + QOSRules: []string{}, + } + + joinSwitch := &nbdb.LogicalSwitch{ + UUID: "join-UUID", + Name: types.OVNJoinSwitch, + QOSRules: []string{staleQoS.UUID}, + } + + dbSetup := libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + staleQoS, + staleAddrSet, + node1Switch, + node2Switch, + joinSwitch, + }, + } + + fakeOVN.startWithDBSetup(dbSetup, + &v1.NamespaceList{ + Items: []v1.Namespace{ + namespaceT, + }, + }, + &v1.PodList{ + Items: []v1.Pod{ + *podT, + }, + }, + ) + + i, n, _ := net.ParseCIDR("10.128.1.3" + "/23") + n.IP = i + fakeOVN.controller.logicalPortCache.add("", util.GetLogicalPortName(podT.Namespace, podT.Name), "", nil, []*net.IPNet{n}) + + // Create one EgressQoS + eq := newEgressQoSObject("default", namespaceT.Name, []egressqosapi.EgressQoSRule{ + { + DstCIDR: &dst1, + DSCP: 50, + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nice", + }, + }, + }, + { + DstCIDR: &dst2, + DSCP: 60, + }, + }) + eq.ResourceVersion = "1" + _, err := fakeOVN.fakeClient.EgressQoSClient.K8sV1().EgressQoSes(namespaceT.Name).Create(context.TODO(), eq, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + fakeOVN.InitAndRunEgressQoSController() + + qos1 := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: match1, + Priority: EgressQoSFlowStartPriority, + Action: map[string]int{nbdb.QoSActionDSCP: 50}, + ExternalIDs: map[string]string{"EgressQoS": namespaceT.Name}, + UUID: "qos1-UUID", + } + qos2 := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: match2, + Priority: EgressQoSFlowStartPriority - 1, + Action: map[string]int{nbdb.QoSActionDSCP: 60}, + ExternalIDs: map[string]string{"EgressQoS": namespaceT.Name}, + UUID: "qos2-UUID", + } + node1Switch.QOSRules = []string{qos1.UUID, qos2.UUID} + node2Switch.QOSRules = []string{qos1.UUID, qos2.UUID} + expectedDatabaseState := []libovsdbtest.TestData{ + qos1, + qos2, + node1Switch, + node2Switch, + joinSwitch, + } + + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) + + // Update the EgressQoS + eq.Spec.Egress = []egressqosapi.EgressQoSRule{ + { + DstCIDR: &dst1, + DSCP: 40, + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nice", + }, + }, + }, + } + eq.ResourceVersion = "2" + _, err = fakeOVN.fakeClient.EgressQoSClient.K8sV1().EgressQoSes(namespaceT.Name).Update(context.TODO(), eq, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + qos3 := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: match1, + Priority: EgressQoSFlowStartPriority, + Action: map[string]int{nbdb.QoSActionDSCP: 40}, + ExternalIDs: map[string]string{"EgressQoS": namespaceT.Name}, + UUID: "qos3-UUID", + } + node1Switch.QOSRules = []string{qos3.UUID} + node2Switch.QOSRules = []string{qos3.UUID} + expectedDatabaseState = []libovsdbtest.TestData{ + qos3, + node1Switch, + node2Switch, + joinSwitch, + } + + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) + + // Delete the EgressQoS + err = fakeOVN.fakeClient.EgressQoSClient.K8sV1().EgressQoSes(namespaceT.Name).Delete(context.TODO(), eq.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + node1Switch.QOSRules = []string{} + node2Switch.QOSRules = []string{} + expectedDatabaseState = []libovsdbtest.TestData{ + node1Switch, + node2Switch, + joinSwitch, + } + + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) + + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }, + ginkgotable.Entry("ipv4", true, false, "1.2.3.4/32", "5.6.7.8/32", + "(ip4.dst == 1.2.3.4/32) && ip4.src == $a8797969223947225899", + "(ip4.dst == 5.6.7.8/32) && ip4.src == $a10481622940199974102"), + ginkgotable.Entry("ipv6", false, true, "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", + "(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7334/128) && ip6.src == $a8797971422970482321", + "(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7335/128) && ip6.src == $a10481620741176717680"), + ginkgotable.Entry("dual", true, true, "1.2.3.4/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7335/128", + "(ip4.dst == 1.2.3.4/32) && (ip4.src == $a8797969223947225899 || ip6.src == $a8797971422970482321)", + "(ip6.dst == 2001:0db8:85a3:0000:0000:8a2e:0370:7335/128) && (ip4.src == $a10481622940199974102 || ip6.src == $a10481620741176717680)"), + ) + + ginkgo.It("should respond to node events correctly", func() { + app.Action = func(ctx *cli.Context) error { + namespaceT := *newNamespace("namespace1") + + node1Switch := &nbdb.LogicalSwitch{ + UUID: "node1-UUID", + Name: node1Name, + } + + node2Switch := &nbdb.LogicalSwitch{ + UUID: "node2-UUID", + Name: node2Name, + } + + joinSwitch := &nbdb.LogicalSwitch{ + UUID: "join-UUID", + Name: types.OVNJoinSwitch, + } + + dbSetup := libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + node1Switch, + node2Switch, + joinSwitch, + }, + } + + fakeOVN.startWithDBSetup(dbSetup, + &v1.NamespaceList{ + Items: []v1.Namespace{ + namespaceT, + }, + }, + ) + + // Create one EgressQoS + eq := newEgressQoSObject("default", namespaceT.Name, []egressqosapi.EgressQoSRule{ + { + DstCIDR: pointer.String("1.2.3.4/32"), + DSCP: 50, + }, + { + DstCIDR: pointer.String("5.6.7.8/32"), + DSCP: 60, + }, + }) + _, err := fakeOVN.fakeClient.EgressQoSClient.K8sV1().EgressQoSes(namespaceT.Name).Create(context.TODO(), eq, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + fakeOVN.InitAndRunEgressQoSController() + + qos1 := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: "(ip4.dst == 1.2.3.4/32) && ip4.src == $a10481622940199974102", + Priority: EgressQoSFlowStartPriority, + Action: map[string]int{nbdb.QoSActionDSCP: 50}, + ExternalIDs: map[string]string{"EgressQoS": namespaceT.Name}, + UUID: "qos1-UUID", + } + qos2 := &nbdb.QoS{ + Direction: nbdb.QoSDirectionToLport, + Match: "(ip4.dst == 5.6.7.8/32) && ip4.src == $a10481622940199974102", + Priority: EgressQoSFlowStartPriority - 1, + Action: map[string]int{nbdb.QoSActionDSCP: 60}, + ExternalIDs: map[string]string{"EgressQoS": namespaceT.Name}, + UUID: "qos2-UUID", + } + node1Switch.QOSRules = append(node1Switch.QOSRules, qos1.UUID, qos2.UUID) + node2Switch.QOSRules = append(node2Switch.QOSRules, qos1.UUID, qos2.UUID) + expectedDatabaseState := []libovsdbtest.TestData{ + qos1, + qos2, + node1Switch, + node2Switch, + joinSwitch, + } + + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) + + node3Switch, err := createNodeAndLS(fakeOVN, "node3") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + node3Switch.QOSRules = []string{qos1.UUID, qos2.UUID} + expectedDatabaseState = []libovsdbtest.TestData{ + qos1, + qos2, + node1Switch, + node2Switch, + node3Switch, + joinSwitch, + } + + gomega.Eventually(fakeOVN.nbClient, 3).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) + + // Delete the EgressQoS + err = fakeOVN.fakeClient.EgressQoSClient.K8sV1().EgressQoSes(namespaceT.Name).Delete(context.TODO(), eq.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + node1Switch.QOSRules = []string{} + node2Switch.QOSRules = []string{} + node3Switch.QOSRules = []string{} + expectedDatabaseState = []libovsdbtest.TestData{ + node1Switch, + node2Switch, + node3Switch, + joinSwitch, + } + + gomega.Eventually(fakeOVN.nbClient).Should(libovsdbtest.HaveDataIgnoringUUIDs(expectedDatabaseState)) + + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) +}) + +func (o *FakeOVN) InitAndRunEgressQoSController() { + klog.Warningf("#### [%p] INIT EgressQoS", o) + o.controller.initEgressQoSController(o.watcher.EgressQoSInformer(), o.watcher.PodCoreInformer(), o.watcher.NodeCoreInformer()) + o.egressQoSWg.Add(1) + go func() { + defer o.egressQoSWg.Done() + o.controller.runEgressQoSController(1, o.stopChan) + }() +} + +func createNodeAndLS(fakeOVN *FakeOVN, name string) (*nbdb.LogicalSwitch, error) { + node := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + }, + } + _, err := fakeOVN.fakeClient.KubeClient.CoreV1().Nodes().Create(context.TODO(), &node, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + logicalSwitch := &nbdb.LogicalSwitch{ + UUID: name + "-UUID", + Name: name, + } + + if err := libovsdbops.CreateOrUpdateLogicalSwitch(fakeOVN.nbClient, logicalSwitch); err != nil { + return nil, fmt.Errorf("failed to create logical switch %s, error: %v", name, err) + + } + + return logicalSwitch, nil +} diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index ccaad81b5d..d50b31e135 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -1314,6 +1314,7 @@ func (oc *Controller) addUpdateNodeEvent(node *kapi.Node, nSyncs *nodeSyncs) err if err != nil { return fmt.Errorf("nodeAdd: error adding noHost subnet for node %s: %w", node.Name, err) } + oc.clearInitialNodeNetworkUnavailableCondition(node) return nil } diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 55a8f61ede..4392039f22 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -15,6 +15,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" egressipfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake" + egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -884,10 +885,12 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { }) egressFirewallFakeClient := &egressfirewallfake.Clientset{} egressIPFakeClient := &egressipfake.Clientset{} + egressQoSFakeClient := &egressqosfake.Clientset{} fakeClient := &util.OVNClientset{ KubeClient: kubeFakeClient, EgressIPClient: egressIPFakeClient, EgressFirewallClient: egressFirewallFakeClient, + EgressQoSClient: egressQoSFakeClient, } _, err := config.InitConfig(ctx, nil, nil) @@ -1083,10 +1086,12 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { }) egressFirewallFakeClient := &egressfirewallfake.Clientset{} egressIPFakeClient := &egressipfake.Clientset{} + egressQoSFakeClient := &egressqosfake.Clientset{} fakeClient := &util.OVNClientset{ KubeClient: kubeFakeClient, EgressIPClient: egressIPFakeClient, EgressFirewallClient: egressFirewallFakeClient, + EgressQoSClient: egressQoSFakeClient, } _, err := config.InitConfig(ctx, nil, nil) @@ -1860,10 +1865,12 @@ func TestController_allocateNodeSubnets(t *testing.T) { kubeFakeClient := fake.NewSimpleClientset() egressFirewallFakeClient := &egressfirewallfake.Clientset{} egressIPFakeClient := &egressipfake.Clientset{} + egressQoSFakeClient := &egressqosfake.Clientset{} fakeClient := &util.OVNClientset{ KubeClient: kubeFakeClient, EgressIPClient: egressIPFakeClient, EgressFirewallClient: egressFirewallFakeClient, + EgressQoSClient: egressQoSFakeClient, } f, err := factory.NewMasterWatchFactory(fakeClient) if err != nil { diff --git a/go-controller/pkg/ovn/obj_retry.go b/go-controller/pkg/ovn/obj_retry.go index 51568ed5d7..2a2fec63a0 100644 --- a/go-controller/pkg/ovn/obj_retry.go +++ b/go-controller/pkg/ovn/obj_retry.go @@ -18,7 +18,9 @@ import ( "k8s.io/klog/v2" + egressfirewall "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" factory "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -242,22 +244,22 @@ func areResourcesEqual(objType reflect.Type, obj1, obj2 interface{}) (bool, erro case factory.PolicyType: np1, ok := obj1.(*knet.NetworkPolicy) if !ok { - return false, fmt.Errorf("could not cast obj1 of type interface{} to *knet.NetworkPolicy") + return false, fmt.Errorf("could not cast obj1 of type %T to *knet.NetworkPolicy", obj1) } np2, ok := obj2.(*knet.NetworkPolicy) if !ok { - return false, fmt.Errorf("could not cast obj2 of type interface{} to *knet.NetworkPolicy") + return false, fmt.Errorf("could not cast obj2 of type %T to *knet.NetworkPolicy", obj2) } return reflect.DeepEqual(np1, np2), nil case factory.NodeType: node1, ok := obj1.(*kapi.Node) if !ok { - return false, fmt.Errorf("could not cast obj1 of type interface{} to *kapi.Node") + return false, fmt.Errorf("could not cast obj1 of type %T to *kapi.Node", obj1) } node2, ok := obj2.(*kapi.Node) if !ok { - return false, fmt.Errorf("could not cast obj2 of type interface{} to *kapi.Node") + return false, fmt.Errorf("could not cast obj2 of type %T to *kapi.Node", obj2) } // when shouldUpdate is false, the hostsubnet is not assigned by ovn-kubernetes @@ -270,11 +272,11 @@ func areResourcesEqual(objType reflect.Type, obj1, obj2 interface{}) (bool, erro case factory.PeerServiceType: service1, ok := obj1.(*kapi.Service) if !ok { - return false, fmt.Errorf("could not cast obj1 of type interface{} to *kapi.Service") + return false, fmt.Errorf("could not cast obj1 of type %T to *kapi.Service", obj1) } service2, ok := obj2.(*kapi.Service) if !ok { - return false, fmt.Errorf("could not cast obj2 of type interface{} to *kapi.Service") + return false, fmt.Errorf("could not cast obj2 of type %T to *kapi.Service", obj2) } areEqual := reflect.DeepEqual(service1.Spec.ExternalIPs, service2.Spec.ExternalIPs) && reflect.DeepEqual(service1.Spec.ClusterIP, service2.Spec.ClusterIP) && @@ -296,6 +298,17 @@ func areResourcesEqual(objType reflect.Type, obj1, obj2 interface{}) (bool, erro // For these types there is no update code, so pretend old and new // objs are always equivalent and stop processing the update event. return true, nil + + case factory.EgressFirewallType: + newEgressFirewall, ok := obj1.(*egressfirewall.EgressFirewall) + if !ok { + return false, fmt.Errorf("could not cast obj1 of type %T to *egressfirewall.EgressFirewall", obj1) + } + oldEgressFirewall, ok := obj2.(*egressfirewall.EgressFirewall) + if !ok { + return false, fmt.Errorf("could not cast obj2 of type %T to *egressfirewall.EgressFirewall", obj2) + } + return reflect.DeepEqual(oldEgressFirewall.Spec, newEgressFirewall.Spec), nil } return false, fmt.Errorf("no object comparison for type %v", objType) @@ -309,21 +322,21 @@ func getResourceKey(objType reflect.Type, obj interface{}) (string, error) { case factory.PolicyType: np, ok := obj.(*knet.NetworkPolicy) if !ok { - return "", fmt.Errorf("could not cast interface{} object to *knet.NetworkPolicy") + return "", fmt.Errorf("could not cast %T object to *knet.NetworkPolicy", obj) } return getPolicyNamespacedName(np), nil case factory.NodeType: node, ok := obj.(*kapi.Node) if !ok { - return "", fmt.Errorf("could not cast interface{} object to *kapi.Node") + return "", fmt.Errorf("could not cast %T object to *kapi.Node", obj) } return node.Name, nil case factory.PeerServiceType: service, ok := obj.(*kapi.Service) if !ok { - return "", fmt.Errorf("could not cast interface{} object to *kapi.Service") + return "", fmt.Errorf("could not cast %T object to *kapi.Service", obj) } return getNamespacedName(service.Namespace, service.Name), nil @@ -333,7 +346,7 @@ func getResourceKey(objType reflect.Type, obj interface{}) (string, error) { factory.LocalPodSelectorType: pod, ok := obj.(*kapi.Pod) if !ok { - return "", fmt.Errorf("could not cast interface{} object to *kapi.Pod") + return "", fmt.Errorf("could not cast %T object to *kapi.Pod", obj) } return getNamespacedName(pod.Namespace, pod.Name), nil @@ -341,9 +354,16 @@ func getResourceKey(objType reflect.Type, obj interface{}) (string, error) { factory.PeerNamespaceSelectorType: namespace, ok := obj.(*kapi.Namespace) if !ok { - return "", fmt.Errorf("could not cast interface{} object to *kapi.Namespace") + return "", fmt.Errorf("could not cast %T object to *kapi.Namespace", obj) } return namespace.Name, nil + + case factory.EgressFirewallType: + egressFirewall, ok := obj.(*egressfirewall.EgressFirewall) + if !ok { + return "", fmt.Errorf("could not cast %T object to *egressfirewall.EgressFirewall", obj) + } + return getEgressFirewallNamespacedName(egressFirewall), nil } return "", fmt.Errorf("object type %v not supported", objType) @@ -409,6 +429,10 @@ func (oc *Controller) getResourceFromInformerCache(objType reflect.Type, key str factory.PeerNamespaceSelectorType: obj, err = oc.watchFactory.GetNamespace(key) + case factory.EgressFirewallType: + namespace, name := splitNamespacedName(key) + obj, err = oc.watchFactory.GetEgressFirewall(namespace, name) + default: err = fmt.Errorf("object type %v not supported, cannot retrieve it from informers cache", objType) @@ -436,7 +460,8 @@ func (oc *Controller) recordDeleteEvent(objType reflect.Type, obj interface{}) { } } -// Given an object and its type, recordErrorEvent records an error event on this object. Only used for pods now. +// Given an object and its type, recordErrorEvent records an error event on this object. +// Only used for pods now. func (oc *Controller) recordErrorEvent(objType reflect.Type, obj interface{}, err error) { switch objType { case factory.PodType: @@ -468,14 +493,14 @@ func (oc *Controller) addResource(objectsToRetry *retryObjs, obj interface{}, fr case factory.PodType: pod, ok := obj.(*kapi.Pod) if !ok { - return fmt.Errorf("could not cast interface{} object to *knet.Pod") + return fmt.Errorf("could not cast %T object to *knet.Pod", obj) } return oc.ensurePod(nil, pod, true) case factory.PolicyType: np, ok := obj.(*knet.NetworkPolicy) if !ok { - return fmt.Errorf("could not cast interface{} object to *knet.NetworkPolicy") + return fmt.Errorf("could not cast %T object to *knet.NetworkPolicy", obj) } if err = oc.addNetworkPolicy(np); err != nil { klog.Infof("Network Policy retry delete failed for %s/%s, will try again later: %v", @@ -486,7 +511,7 @@ func (oc *Controller) addResource(objectsToRetry *retryObjs, obj interface{}, fr case factory.NodeType: node, ok := obj.(*kapi.Node) if !ok { - return fmt.Errorf("could not cast interface{} object to *kapi.Node") + return fmt.Errorf("could not cast %T object to *kapi.Node", obj) } var nodeParams *nodeSyncs if fromRetryLoop { @@ -512,7 +537,7 @@ func (oc *Controller) addResource(objectsToRetry *retryObjs, obj interface{}, fr case factory.PeerServiceType: service, ok := obj.(*kapi.Service) if !ok { - return fmt.Errorf("could not cast peer service of type interface{} to *kapi.Service") + return fmt.Errorf("could not cast peer service of type %T to *kapi.Service", obj) } extraParameters := objectsToRetry.extraParameters.(*NetworkPolicyExtraParameters) return oc.handlePeerServiceAdd(extraParameters.gp, service) @@ -572,7 +597,21 @@ func (oc *Controller) addResource(objectsToRetry *retryObjs, obj interface{}, fr extraParameters.portGroupIngressDenyName, extraParameters.portGroupEgressDenyName, obj) - + case factory.EgressFirewallType: + var err error + egressFirewall := obj.(*egressfirewall.EgressFirewall).DeepCopy() + if err = oc.addEgressFirewall(egressFirewall); err != nil { + egressFirewall.Status.Status = egressFirewallAddError + err = fmt.Errorf("failed to create egress firewall %s, error: %v", getEgressFirewallNamespacedName(egressFirewall), err) + } else { + egressFirewall.Status.Status = egressFirewallAppliedCorrectly + metrics.UpdateEgressFirewallRuleCount(float64(len(egressFirewall.Spec.Egress))) + metrics.IncrementEgressFirewallCount() + } + if err := oc.updateEgressFirewallStatusWithRetry(egressFirewall); err != nil { + klog.Errorf("Failed to update egress firewall status %s, error: %v", getEgressFirewallNamespacedName(egressFirewall), err) + } + return err default: return fmt.Errorf("no add function for object type %v", objectsToRetry.oType) } @@ -596,11 +635,11 @@ func (oc *Controller) updateResource(objectsToRetry *retryObjs, oldObj, newObj i case factory.NodeType: newNode, ok := newObj.(*kapi.Node) if !ok { - return fmt.Errorf("could not cast newObj of type interface{} to *kapi.Node") + return fmt.Errorf("could not cast newObj of type %T to *kapi.Node", newObj) } oldNode, ok := oldObj.(*kapi.Node) if !ok { - return fmt.Errorf("could not cast oldObj of type interface{} to *kapi.Node") + return fmt.Errorf("could not cast oldObj of type %T to *kapi.Node", oldObj) } // determine what actually changed in this update _, nodeSync := oc.addNodeFailed.Load(newNode.Name) @@ -653,7 +692,7 @@ func (oc *Controller) deleteResource(objectsToRetry *retryObjs, obj, cachedObj i var cachedNP *networkPolicy knp, ok := obj.(*knet.NetworkPolicy) if !ok { - return fmt.Errorf("could not cast obj of type interface{} to *knet.NetworkPolicy") + return fmt.Errorf("could not cast obj of type %T to *knet.NetworkPolicy", obj) } if cachedObj != nil { if cachedNP, ok = cachedObj.(*networkPolicy); !ok { @@ -665,14 +704,14 @@ func (oc *Controller) deleteResource(objectsToRetry *retryObjs, obj, cachedObj i case factory.NodeType: node, ok := obj.(*kapi.Node) if !ok { - return fmt.Errorf("could not cast obj of type interface{} to *knet.Node") + return fmt.Errorf("could not cast obj of type %T to *knet.Node", obj) } return oc.deleteNodeEvent(node) case factory.PeerServiceType: service, ok := obj.(*kapi.Service) if !ok { - return fmt.Errorf("could not cast peer service of type interface{} to *kapi.Service") + return fmt.Errorf("could not cast peer service of type %T to *kapi.Service", obj) } extraParameters := objectsToRetry.extraParameters.(*NetworkPolicyExtraParameters) return oc.handlePeerServiceDelete(extraParameters.gp, service) @@ -728,6 +767,14 @@ func (oc *Controller) deleteResource(objectsToRetry *retryObjs, obj, cachedObj i extraParameters.portGroupEgressDenyName, obj) + case factory.EgressFirewallType: + egressFirewall := obj.(*egressfirewall.EgressFirewall) + if err := oc.deleteEgressFirewall(egressFirewall); err != nil { + return fmt.Errorf("failed to delete egress firewall %s, error: %v", getEgressFirewallNamespacedName(egressFirewall), err) + } + metrics.UpdateEgressFirewallRuleCount(float64(-len(egressFirewall.Spec.Egress))) + metrics.DecrementEgressFirewallCount() + return nil default: return fmt.Errorf("object type %v not supported", objectsToRetry.oType) } @@ -897,6 +944,10 @@ func (oc *Controller) getSyncResourcesFunc(r *retryObjs) (func([]interface{}), e name = "LocalPodSelectorType" syncRetriableFunc = r.syncFunc + case factory.EgressFirewallType: + name = "syncEgressFirewall" + syncRetriableFunc = oc.syncEgressFirewall + default: return nil, fmt.Errorf("no sync function for object type %v", r.oType) } @@ -1154,7 +1205,6 @@ func (oc *Controller) WatchResource(objectsToRetry *retryObjs) *factory.Handler return } } - objectsToRetry.deleteRetryObj(newKey, true) }, diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index bccb3767ef..ea6cd7055d 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -22,16 +22,16 @@ import ( addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" svccontroller "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/services" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/unidling" + corev1listers "k8s.io/client-go/listers/core/v1" lsm "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/logical_switch_manager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/subnetallocator" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" - egressfirewall "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" - utilnet "k8s.io/utils/net" + egressqoslisters "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1" kapi "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -44,6 +44,8 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" ref "k8s.io/client-go/tools/reference" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" ) @@ -99,11 +101,10 @@ type namespaceInfo struct { // Controller structure is the object which holds the controls for starting // and reacting upon the watched resources (e.g. pods, endpoints) type Controller struct { - client clientset.Interface - kube kube.Interface - watchFactory *factory.WatchFactory - egressFirewallHandler *factory.Handler - stopChan <-chan struct{} + client clientset.Interface + kube kube.Interface + watchFactory *factory.WatchFactory + stopChan <-chan struct{} // FIXME DUAL-STACK - Make IP Allocators more dual-stack friendly masterSubnetAllocator *subnetallocator.SubnetAllocator @@ -135,6 +136,20 @@ type Controller struct { // egressFirewalls is a map of namespaces and the egressFirewall attached to it egressFirewalls sync.Map + // EgressQoS + egressQoSLister egressqoslisters.EgressQoSLister + egressQoSSynced cache.InformerSynced + egressQoSQueue workqueue.RateLimitingInterface + egressQoSCache sync.Map + + egressQoSPodLister corev1listers.PodLister + egressQoSPodSynced cache.InformerSynced + egressQoSPodQueue workqueue.RateLimitingInterface + + egressQoSNodeLister corev1listers.NodeLister + egressQoSNodeSynced cache.InformerSynced + egressQoSNodeQueue workqueue.RateLimitingInterface + // An address set factory that creates address sets addressSetFactory addressset.AddressSetFactory @@ -194,6 +209,9 @@ type Controller struct { // Objects for network policies that need to be retried retryNetworkPolicies *retryObjs + // Objects for egress firewall that need to be retried + retryEgressFirewalls *retryObjs + // Objects for nodes that need to be retried retryNodes *retryObjs // Node-specific syncMap used by node event handler @@ -283,6 +301,7 @@ func NewOvnController(ovnClient *util.OVNClientset, wf *factory.WatchFactory, st retryPods: NewRetryObjs(factory.PodType, "", nil, nil, nil), retryNetworkPolicies: NewRetryObjs(factory.PolicyType, "", nil, nil, nil), retryNodes: NewRetryObjs(factory.NodeType, "", nil, nil, nil), + retryEgressFirewalls: NewRetryObjs(factory.EgressFirewallType, "", nil, nil, nil), recorder: recorder, nbClient: libovsdbOvnNBClient, sbClient: libovsdbOvnSBClient, @@ -356,8 +375,20 @@ func (oc *Controller) Run(ctx context.Context, wg *sync.WaitGroup) error { return err } oc.egressFirewallDNS.Run(egressFirewallDNSDefaultDuration) - oc.egressFirewallHandler = oc.WatchEgressFirewall() + oc.WatchEgressFirewall() + + } + if config.OVNKubernetesFeature.EnableEgressQoS { + oc.initEgressQoSController( + oc.watchFactory.EgressQoSInformer(), + oc.watchFactory.PodCoreInformer(), + oc.watchFactory.NodeCoreInformer()) + wg.Add(1) + go func() { + defer wg.Done() + oc.runEgressQoSController(1, oc.stopChan) + }() } klog.Infof("Completing all the Watchers took %v", time.Since(start)) @@ -505,56 +536,8 @@ func (oc *Controller) WatchNetworkPolicy() { // WatchEgressFirewall starts the watching of egressfirewall resource and calls // back the appropriate handler logic -func (oc *Controller) WatchEgressFirewall() *factory.Handler { - return oc.watchFactory.AddEgressFirewallHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - egressFirewall := obj.(*egressfirewall.EgressFirewall).DeepCopy() - addErrors := oc.addEgressFirewall(egressFirewall) - if addErrors != nil { - klog.Error(addErrors) - egressFirewall.Status.Status = egressFirewallAddError - } else { - egressFirewall.Status.Status = egressFirewallAppliedCorrectly - } - - err := oc.updateEgressFirewallWithRetry(egressFirewall) - if err != nil { - klog.Error(err) - } - metrics.UpdateEgressFirewallRuleCount(float64(len(egressFirewall.Spec.Egress))) - metrics.IncrementEgressFirewallCount() - }, - UpdateFunc: func(old, newer interface{}) { - newEgressFirewall := newer.(*egressfirewall.EgressFirewall).DeepCopy() - oldEgressFirewall := old.(*egressfirewall.EgressFirewall) - if !reflect.DeepEqual(oldEgressFirewall.Spec, newEgressFirewall.Spec) { - errList := oc.updateEgressFirewall(oldEgressFirewall, newEgressFirewall) - if errList != nil { - newEgressFirewall.Status.Status = egressFirewallUpdateError - klog.Error(errList) - } else { - newEgressFirewall.Status.Status = egressFirewallAppliedCorrectly - } - - err := oc.updateEgressFirewallWithRetry(newEgressFirewall) - if err != nil { - klog.Error(err) - } - metrics.UpdateEgressFirewallRuleCount(float64(len(newEgressFirewall.Spec.Egress) - len(oldEgressFirewall.Spec.Egress))) - } - }, - DeleteFunc: func(obj interface{}) { - egressFirewall := obj.(*egressfirewall.EgressFirewall) - deleteErrors := oc.deleteEgressFirewall(egressFirewall) - if deleteErrors != nil { - klog.Error(deleteErrors) - return - } - - metrics.UpdateEgressFirewallRuleCount(float64(-len(egressFirewall.Spec.Egress))) - metrics.DecrementEgressFirewallCount() - }, - }, oc.syncEgressFirewall) +func (oc *Controller) WatchEgressFirewall() { + oc.WatchResource(oc.retryEgressFirewalls) } // WatchEgressNodes starts the watching of egress assignable nodes and calls diff --git a/go-controller/pkg/ovn/ovn_test.go b/go-controller/pkg/ovn/ovn_test.go index d0e916e4bf..fe135e4daa 100644 --- a/go-controller/pkg/ovn/ovn_test.go +++ b/go-controller/pkg/ovn/ovn_test.go @@ -2,12 +2,16 @@ package ovn import ( "context" + "sync" + "github.com/onsi/gomega" libovsdbclient "github.com/ovn-org/libovsdb/client" egressfirewall "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" egressip "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" egressipfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake" + egressqos "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" + egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" @@ -43,12 +47,14 @@ type FakeOVN struct { sbClient libovsdbclient.Client dbSetup libovsdbtest.TestSetup nbsbCleanup *libovsdbtest.Cleanup + egressQoSWg *sync.WaitGroup } func NewFakeOVN() *FakeOVN { return &FakeOVN{ asf: addressset.NewFakeAddressSetFactory(), fakeRecorder: record.NewFakeRecorder(10), + egressQoSWg: &sync.WaitGroup{}, } } @@ -59,12 +65,15 @@ func (o *FakeOVN) start(objects ...runtime.Object) { egressIPObjects := []runtime.Object{} egressFirewallObjects := []runtime.Object{} + egressQoSObjects := []runtime.Object{} v1Objects := []runtime.Object{} for _, object := range objects { if _, isEgressIPObject := object.(*egressip.EgressIPList); isEgressIPObject { egressIPObjects = append(egressIPObjects, object) } else if _, isEgressFirewallObject := object.(*egressfirewall.EgressFirewallList); isEgressFirewallObject { egressFirewallObjects = append(egressFirewallObjects, object) + } else if _, isEgressQoSObject := object.(*egressqos.EgressQoSList); isEgressQoSObject { + egressQoSObjects = append(egressQoSObjects, object) } else { v1Objects = append(v1Objects, object) } @@ -73,6 +82,7 @@ func (o *FakeOVN) start(objects ...runtime.Object) { KubeClient: fake.NewSimpleClientset(v1Objects...), EgressIPClient: egressipfake.NewSimpleClientset(egressIPObjects...), EgressFirewallClient: egressfirewallfake.NewSimpleClientset(egressFirewallObjects...), + EgressQoSClient: egressqosfake.NewSimpleClientset(egressQoSObjects...), } o.init() } @@ -85,6 +95,7 @@ func (o *FakeOVN) startWithDBSetup(dbSetup libovsdbtest.TestSetup, objects ...ru func (o *FakeOVN) shutdown() { o.watcher.Shutdown() close(o.stopChan) + o.egressQoSWg.Wait() o.nbsbCleanup.Cleanup() } diff --git a/go-controller/pkg/types/const.go b/go-controller/pkg/types/const.go index 8b78f4bfe2..b62d5d4f1c 100644 --- a/go-controller/pkg/types/const.go +++ b/go-controller/pkg/types/const.go @@ -111,6 +111,7 @@ const ( // OVN-K8S Address Sets Names HybridRoutePolicyPrefix = "hybrid-route-pods-" + EgressQoSRulePrefix = "egress-qos-pods-" // OVN-K8S Topology Versions OvnSingleJoinSwitchTopoVersion = 1 diff --git a/go-controller/pkg/util/kube.go b/go-controller/pkg/util/kube.go index 9ad9cab2c4..ffee00bcf4 100644 --- a/go-controller/pkg/util/kube.go +++ b/go-controller/pkg/util/kube.go @@ -25,6 +25,7 @@ import ( egressfirewallclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned" egressipclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned" + egressqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ocpcloudnetworkclientset "github.com/openshift/client-go/cloudnetwork/clientset/versioned" @@ -38,6 +39,7 @@ type OVNClientset struct { EgressIPClient egressipclientset.Interface EgressFirewallClient egressfirewallclientset.Interface CloudNetworkClient ocpcloudnetworkclientset.Interface + EgressQoSClient egressqosclientset.Interface } func adjustCommit() string { @@ -136,11 +138,17 @@ func NewOVNClientset(conf *config.KubernetesConfig) (*OVNClientset, error) { if err != nil { return nil, err } + egressqosClientset, err := egressqosclientset.NewForConfig(kconfig) + if err != nil { + return nil, err + } + return &OVNClientset{ KubeClient: kclientset, EgressIPClient: egressIPClientset, EgressFirewallClient: egressFirewallClientset, CloudNetworkClient: cloudNetworkClientset, + EgressQoSClient: egressqosClientset, }, nil }