diff --git a/Makefile b/Makefile
index 2196e02fe..3957d7644 100644
--- a/Makefile
+++ b/Makefile
@@ -18,11 +18,10 @@ TUNED_COMMIT:=682c47c0a9eb5596c2d396b6d0dae4e297414c50
TUNED_DIR:=daemon
# API-related variables
-API_TYPES_DIR:=./pkg/apis/tuned/v1
-API_TYPES:=$(wildcard $(API_TYPES_DIR)/*_types.go)
+API_TYPES_DIR:=pkg/apis
+API_TYPES:=$(shell find $(API_TYPES_DIR) -name \*_types.go)
API_ZZ_GENERATED:=zz_generated.deepcopy
-API_TYPES_GENERATED:=$(API_TYPES_DIR)/$(API_ZZ_GENERATED).go
-API_GO_HEADER_FILE:=pkg/apis/header.go.txt
+API_GO_HEADER_FILE:=$(API_TYPES_DIR)/header.go.txt
# Container image-related variables
IMAGE_BUILD_CMD=podman build --no-cache
@@ -33,6 +32,10 @@ ORG=openshift
TAG=$(shell git rev-parse --abbrev-ref HEAD)
IMAGE=$(REGISTRY)/$(ORG)/origin-cluster-node-tuning-operator:$(TAG)
+# PAO variables
+CLUSTER ?= "ci"
+PAO_CRD_APIS :=$(addprefix ./$(API_TYPES_DIR)/pao/,v2 v1 v1alpha1)
+
all: build
# Do not put any includes above the "all" target. We want the default target to build
@@ -59,25 +62,25 @@ $(BINDATA): $(GOBINDATA_BIN) $(ASSETS)
pkg/generated: $(API_TYPES)
$(GO) run k8s.io/code-generator/cmd/deepcopy-gen \
- --input-dirs $(PACKAGE)/pkg/apis/tuned/v1 \
+ --input-dirs $(PACKAGE)/$(API_TYPES_DIR)/tuned/v1,$(PACKAGE)/$(API_TYPES_DIR)/pao/v1alpha1,$(PACKAGE)/$(API_TYPES_DIR)/pao/v1,$(PACKAGE)/$(API_TYPES_DIR)/pao/v2 \
-O $(API_ZZ_GENERATED) \
--go-header-file $(API_GO_HEADER_FILE) \
- --bounding-dirs $(PACKAGE)/pkg/apis \
+ --bounding-dirs $(PACKAGE)/$(API_TYPES_DIR) \
--output-base tmp
$(GO) run k8s.io/code-generator/cmd/client-gen \
--clientset-name versioned \
--input-base '' \
- --input $(PACKAGE)/pkg/apis/tuned/v1 \
+ --input $(PACKAGE)/$(API_TYPES_DIR)/tuned/v1 \
--go-header-file $(API_GO_HEADER_FILE) \
--output-package $(PACKAGE)/pkg/generated/clientset \
--output-base tmp
$(GO) run k8s.io/code-generator/cmd/lister-gen \
- --input-dirs $(PACKAGE)/pkg/apis/tuned/v1 \
+ --input-dirs $(PACKAGE)/$(API_TYPES_DIR)/tuned/v1 \
--go-header-file $(API_GO_HEADER_FILE) \
--output-package $(PACKAGE)/pkg/generated/listers \
--output-base tmp
$(GO) run k8s.io/code-generator/cmd/informer-gen \
- --input-dirs $(PACKAGE)/pkg/apis/tuned/v1 \
+ --input-dirs $(PACKAGE)/$(API_TYPES_DIR)/tuned/v1 \
--versioned-clientset-package $(PACKAGE)/pkg/generated/clientset/versioned \
--listers-package $(PACKAGE)/pkg/generated/listers \
--go-header-file $(API_GO_HEADER_FILE) \
@@ -86,7 +89,6 @@ pkg/generated: $(API_TYPES)
tar c tmp | tar x --strip-components=4
touch $@
-
$(GOBINDATA_BIN):
$(GO) build -o $(GOBINDATA_BIN) ./vendor/github.com/kevinburke/go-bindata/go-bindata
@@ -130,7 +132,8 @@ local-image-push:
# $2 - apis
# $3 - manifests
# $4 - output
-$(call add-crd-gen,tuned,$(API_TYPES_DIR),./manifests,./manifests)
+$(call add-crd-gen,tuned,./$(API_TYPES_DIR)/tuned/v1,./manifests,./manifests)
+$(call add-crd-gen,pao,$(PAO_CRD_APIS),./manifests,./manifests)
# This will include additional actions on the update and verify targets to ensure that profile patches are applied
# to manifest files
@@ -141,3 +144,29 @@ $(call add-crd-gen,tuned,$(API_TYPES_DIR),./manifests,./manifests)
$(call add-profile-manifests,manifests,./profile-patches,./manifests)
.PHONY: all build deepcopy crd-schema-gen test-e2e verify verify-gofmt clean local-image local-image-push
+
+# PAO
+
+.PHONY: cluster-deploy-pao
+cluster-deploy-pao:
+ @echo "Deploying PAO artifacts"
+ CLUSTER=$(CLUSTER) hack/deploy.sh
+
+.PHONY: cluster-label-worker-cnf
+cluster-label-worker-cnf:
+ @echo "Adding worker-cnf label to worker nodes"
+ hack/label-worker-cnf.sh
+
+.PHONY: pao-functests
+pao-functests: cluster-label-worker-cnf pao-functests-only
+
+.PHONY: pao-functests-only
+pao-functests-only:
+ @echo "Cluster Version"
+ hack/show-cluster-version.sh
+ hack/run-functests.sh
+
+.PHONY: cluster-clean-pao
+cluster-clean-pao:
+ @echo "Cleaning up performance addons artifacts"
+ hack/clean-deploy.sh
diff --git a/assets/pao/assets.go b/assets/pao/assets.go
new file mode 100644
index 000000000..d4367291b
--- /dev/null
+++ b/assets/pao/assets.go
@@ -0,0 +1,19 @@
+package assets
+
+import (
+ "embed"
+)
+
+var (
+ // Configs contains all files that placed under the configs directory
+ //go:embed configs
+ Configs embed.FS
+
+ // Scripts contains all files that placed under the scripts directory
+ //go:embed scripts
+ Scripts embed.FS
+
+ // Tuned contains all files that placed under the tuned directory
+ //go:embed tuned
+ Tuned embed.FS
+)
diff --git a/assets/pao/configs/99-low-latency-hooks.json b/assets/pao/configs/99-low-latency-hooks.json
new file mode 100644
index 000000000..cbf6085c6
--- /dev/null
+++ b/assets/pao/configs/99-low-latency-hooks.json
@@ -0,0 +1,11 @@
+{
+ "version": "1.0.0",
+ "hook": {
+ "path": "/usr/local/bin/low-latency-hooks.sh",
+ "args": ["low-latency-hooks.sh", "{{.RPSMask}}"]
+ },
+ "when": {
+ "always": true
+ },
+ "stages": ["prestart"]
+}
diff --git a/assets/pao/configs/99-netdev-rps.rules b/assets/pao/configs/99-netdev-rps.rules
new file mode 100644
index 000000000..4e6d346af
--- /dev/null
+++ b/assets/pao/configs/99-netdev-rps.rules
@@ -0,0 +1 @@
+SUBSYSTEM=="net", ACTION=="add", ENV{DEVPATH}!="/devices/virtual/net/veth*", TAG+="systemd", ENV{SYSTEMD_WANTS}="update-rps@%k.service"
diff --git a/assets/pao/configs/99-runtimes.conf b/assets/pao/configs/99-runtimes.conf
new file mode 100644
index 000000000..c44a5fff0
--- /dev/null
+++ b/assets/pao/configs/99-runtimes.conf
@@ -0,0 +1,20 @@
+{{if .ReservedCpus}}
+[crio.runtime]
+infra_ctr_cpuset = "{{.ReservedCpus}}"
+{{end}}
+
+# We should copy paste the default runtime because this snippet will override the whole runtimes section
+[crio.runtime.runtimes.runc]
+runtime_path = ""
+runtime_type = "oci"
+runtime_root = "/run/runc"
+
+# The CRI-O will check the allowed_annotations under the runtime handler and apply high-performance hooks when one of
+# high-performance annotations presents under it.
+# We should provide the runtime_path because we need to inform that we want to re-use runc binary and we
+# do not have high-performance binary under the $PATH that will point to it.
+[crio.runtime.runtimes.high-performance]
+runtime_path = "/bin/runc"
+runtime_type = "oci"
+runtime_root = "/run/runc"
+allowed_annotations = ["cpu-load-balancing.crio.io", "cpu-quota.crio.io", "irq-load-balancing.crio.io"]
diff --git a/assets/pao/scripts/hugepages-allocation.sh b/assets/pao/scripts/hugepages-allocation.sh
new file mode 100755
index 000000000..dd6a25a40
--- /dev/null
+++ b/assets/pao/scripts/hugepages-allocation.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+nodes_path="/sys/devices/system/node"
+hugepages_file="${nodes_path}/node${NUMA_NODE}/hugepages/hugepages-${HUGEPAGES_SIZE}kB/nr_hugepages"
+
+if [ ! -f "${hugepages_file}" ]; then
+ echo "ERROR: ${hugepages_file} does not exist"
+ exit 1
+fi
+
+timeout=60
+sample=1
+current_time=0
+while [ "$(cat "${hugepages_file}")" -ne "${HUGEPAGES_COUNT}" ]; do
+ echo "${HUGEPAGES_COUNT}" >"${hugepages_file}"
+
+ current_time=$((current_time + sample))
+ if [ $current_time -gt $timeout ]; then
+ echo "ERROR: ${hugepages_file} does not have the expected number of hugepages ${HUGEPAGES_COUNT}"
+ exit 1
+ fi
+
+ sleep $sample
+done
diff --git a/assets/pao/scripts/low-latency-hooks.sh b/assets/pao/scripts/low-latency-hooks.sh
new file mode 100644
index 000000000..aff54afd1
--- /dev/null
+++ b/assets/pao/scripts/low-latency-hooks.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+mask="${1}"
+[ -n "${mask}" ] || { logger "${0}: The rps-mask parameter is missing" ; exit 0; }
+
+pid=$(jq '.pid' /dev/stdin 2>&1)
+[[ $? -eq 0 && -n "${pid}" ]] || { logger "${0}: Failed to extract the pid: ${pid}"; exit 0; }
+
+ns=$(ip netns identify "${pid}" 2>&1)
+[[ $? -eq 0 && -n "${ns}" ]] || { logger "${0} Failed to identify the namespace: ${ns}"; exit 0; }
+
+# Updates the container veth RPS mask on the node
+netns_link_indexes=$(ip netns exec "${ns}" ip -j link | jq ".[] | select(.link_index != null) | .link_index")
+for link_index in ${netns_link_indexes}; do
+ container_veth=$(ip -j link | jq ".[] | select(.ifindex == ${link_index}) | .ifname" | tr -d '"')
+ echo ${mask} > /sys/devices/virtual/net/${container_veth}/queues/rx-0/rps_cpus
+done
+
+# Updates the RPS mask for the interface inside of the container network namespace
+mode=$(ip netns exec "${ns}" [ -w /sys ] && echo "rw" || echo "ro" 2>&1)
+[ $? -eq 0 ] || { logger "${0} Failed to determine if the /sys is writable: ${mode}"; exit 0; }
+
+if [ "${mode}" = "ro" ]; then
+ res=$(ip netns exec "${ns}" mount -o remount,rw /sys 2>&1)
+ [ $? -eq 0 ] || { logger "${0}: Failed to remount /sys as rw: ${res}"; exit 0; }
+fi
+
+# /sys/class/net can't be used recursively to find the rps_cpus file, use /sys/devices instead
+res=$(ip netns exec "${ns}" find /sys/devices -type f -name rps_cpus -exec sh -c "echo ${mask} | cat > {}" \; 2>&1)
+[[ $? -eq 0 && -z "${res}" ]] || logger "${0}: Failed to apply the RPS mask: ${res}"
+
+if [ "${mode}" = "ro" ]; then
+ ip netns exec "${ns}" mount -o remount,ro /sys
+ [ $? -eq 0 ] || exit 1 # Error out so the pod will not start with a writable /sys
+fi
diff --git a/assets/pao/scripts/set-rps-mask.sh b/assets/pao/scripts/set-rps-mask.sh
new file mode 100644
index 000000000..e6bb827a3
--- /dev/null
+++ b/assets/pao/scripts/set-rps-mask.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+dev=$1
+[ -n "${dev}" ] || { echo "The device argument is missing" >&2 ; exit 1; }
+
+mask=$2
+[ -n "${mask}" ] || { echo "The mask argument is missing" >&2 ; exit 1; }
+
+dev_dir="/sys/class/net/${dev}"
+
+function find_dev_dir {
+ systemd_devs=$(systemctl list-units -t device | grep sys-subsystem-net-devices | cut -d' ' -f1)
+
+ for systemd_dev in ${systemd_devs}; do
+ dev_sysfs=$(systemctl show "${systemd_dev}" -p SysFSPath --value)
+
+ dev_orig_name="${dev_sysfs##*/}"
+ if [ "${dev_orig_name}" = "${dev}" ]; then
+ dev_name="${systemd_dev##*-}"
+ dev_name="${dev_name%%.device}"
+ if [ "${dev_name}" = "${dev}" ]; then # disregard the original device unit
+ continue
+ fi
+
+ echo "${dev} device was renamed to $dev_name"
+ dev_dir="/sys/class/net/${dev_name}"
+ break
+ fi
+ done
+}
+
+[ -d "${dev_dir}" ] || find_dev_dir # the net device was renamed, find the new name
+[ -d "${dev_dir}" ] || { sleep 5; find_dev_dir; } # search failed, wait a little and try again
+[ -d "${dev_dir}" ] || { echo "${dev_dir}" directory not found >&2 ; exit 0; } # the interface disappeared, not an error
+
+find "${dev_dir}"/queues -type f -name rps_cpus -exec sh -c "echo ${mask} | cat > {}" \;
\ No newline at end of file
diff --git a/assets/pao/tuned/openshift-node-performance b/assets/pao/tuned/openshift-node-performance
new file mode 100644
index 000000000..331f08af7
--- /dev/null
+++ b/assets/pao/tuned/openshift-node-performance
@@ -0,0 +1,132 @@
+[main]
+summary=Openshift node optimized for deterministic performance at the cost of increased power consumption, focused on low latency network performance. Based on Tuned 2.11 and Cluster node tuning (oc 4.5)
+include=openshift-node,cpu-partitioning
+
+# Inheritance of base profiles legend:
+# cpu-partitioning -> network-latency -> latency-performance
+# https://github.com/redhat-performance/tuned/blob/master/profiles/latency-performance/tuned.conf
+# https://github.com/redhat-performance/tuned/blob/master/profiles/network-latency/tuned.conf
+# https://github.com/redhat-performance/tuned/blob/master/profiles/cpu-partitioning/tuned.conf
+
+# All values are mapped with a comment where a parent profile contains them.
+# Different values will override the original values in parent profiles.
+
+[variables]
+#> isolated_cores take a list of ranges; e.g. isolated_cores=2,4-7
+{{if .IsolatedCpus}}
+isolated_cores={{.IsolatedCpus}}
+{{end}}
+
+not_isolated_cores_expanded=${f:cpulist_invert:${isolated_cores_expanded}}
+
+[cpu]
+#> latency-performance
+#> (override)
+force_latency=cstate.id:1|3
+governor=performance
+energy_perf_bias=performance
+min_perf_pct=100
+
+[service]
+service.stalld=start,enable
+
+[vm]
+#> network-latency
+transparent_hugepages=never
+
+{{if not .GloballyDisableIrqLoadBalancing}}
+[irqbalance]
+#> Override the value set by cpu-partitioning with an empty one
+banned_cpus=""
+{{end}}
+
+[scheduler]
+runtime=0
+group.ksoftirqd=0:f:11:*:ksoftirqd.*
+group.rcuc=0:f:11:*:rcuc.*
+sched_rt_runtime_us=-1
+sched_min_granularity_ns=10000000
+sched_migration_cost_ns=5000000
+numa_balancing=0
+{{if not .GloballyDisableIrqLoadBalancing}}
+default_irq_smp_affinity = ignore
+{{end}}
+
+[sysctl]
+#> cpu-partitioning #realtime
+kernel.hung_task_timeout_secs = 600
+#> cpu-partitioning #realtime
+kernel.nmi_watchdog = 0
+#> realtime
+kernel.sched_rt_runtime_us = -1
+# cpu-partitioning and realtime for RHEL disable it (= 0)
+# OCP is too dynamic when partitioning and needs to evacuate
+#> scheduled timers when starting a guaranteed workload (= 1)
+kernel.timer_migration = 1
+#> network-latency
+kernel.numa_balancing=0
+net.core.busy_read=50
+net.core.busy_poll=50
+net.ipv4.tcp_fastopen=3
+#> cpu-partitioning #realtime
+vm.stat_interval = 10
+
+# ktune sysctl settings for rhel6 servers, maximizing i/o throughput
+#
+# Minimal preemption granularity for CPU-bound tasks:
+# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds)
+#> latency-performance
+kernel.sched_min_granularity_ns=10000000
+
+# If a workload mostly uses anonymous memory and it hits this limit, the entire
+# working set is buffered for I/O, and any more write buffering would require
+# swapping, so it's time to throttle writes until I/O can catch up. Workloads
+# that mostly use file mappings may be able to use even higher values.
+#
+# The generator of dirty data starts writeback at this percentage (system default
+# is 20%)
+#> latency-performance
+vm.dirty_ratio=10
+
+# Start background writeback (via writeback threads) at this percentage (system
+# default is 10%)
+#> latency-performance
+vm.dirty_background_ratio=3
+
+# The swappiness parameter controls the tendency of the kernel to move
+# processes out of physical memory and onto the swap disk.
+# 0 tells the kernel to avoid swapping processes out of physical memory
+# for as long as possible
+# 100 tells the kernel to aggressively swap processes out of physical memory
+# and move them to swap cache
+#> latency-performance
+vm.swappiness=10
+
+# The total time the scheduler will consider a migrated process
+# "cache hot" and thus less likely to be re-migrated
+# (system default is 500000, i.e. 0.5 ms)
+#> latency-performance
+kernel.sched_migration_cost_ns=5000000
+
+[selinux]
+#> Custom (atomic host)
+avc_cache_threshold=8192
+
+{{if .NetDevices}}
+{{.NetDevices}}
+{{end}}
+
+[bootloader]
+# set empty values to disable RHEL initrd setting in cpu-partitioning
+initrd_remove_dir=
+initrd_dst_img=
+initrd_add_dir=
+# overrides cpu-partitioning cmdline
+cmdline_cpu_part=+nohz=on rcu_nocbs=${isolated_cores} tuned.non_isolcpus=${not_isolated_cpumask} intel_pstate=disable nosoftlockup
+{{if .StaticIsolation}}
+cmdline_realtime=+tsc=nowatchdog intel_iommu=on iommu=pt isolcpus=domain,managed_irq,${isolated_cores} systemd.cpu_affinity=${not_isolated_cores_expanded}
+{{else}}
+cmdline_realtime=+tsc=nowatchdog intel_iommu=on iommu=pt isolcpus=managed_irq,${isolated_cores} systemd.cpu_affinity=${not_isolated_cores_expanded}
+{{end}}
+cmdline_hugepages=+{{if .DefaultHugepagesSize}} default_hugepagesz={{.DefaultHugepagesSize}} {{end}} {{if .Hugepages}} {{.Hugepages}} {{end}}
+cmdline_additionalArg=+{{if .AdditionalArgs}} {{.AdditionalArgs}} {{end}}
diff --git a/cmd/cluster-node-tuning-operator/main.go b/cmd/cluster-node-tuning-operator/main.go
index 5e4e98a5e..cca9f316f 100644
--- a/cmd/cluster-node-tuning-operator/main.go
+++ b/cmd/cluster-node-tuning-operator/main.go
@@ -7,13 +7,20 @@ import (
"runtime"
apiconfigv1 "github.com/openshift/api/config/v1"
+ performancev1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1"
+ performancev1alpha1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1alpha1"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ paocontroller "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller"
mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ olmv1 "github.com/operator-framework/api/pkg/operators/v1"
+ olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiruntime "k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
-
"k8s.io/klog/v2"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/cache"
tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
"github.com/openshift/cluster-node-tuning-operator/pkg/config"
@@ -23,14 +30,15 @@ import (
"github.com/openshift/cluster-node-tuning-operator/pkg/tuned"
"github.com/openshift/cluster-node-tuning-operator/pkg/util"
"github.com/openshift/cluster-node-tuning-operator/version"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/cache"
)
const (
operandFilename = "openshift-tuned"
operatorFilename = "cluster-node-tuning-operator"
- metricsHost = "0.0.0.0"
+ webhookPort = 4343
+ webhookCertDir = "/apiserver.local.config/certificates"
+ webhookCertName = "apiserver.crt"
+ webhookKeyName = "apiserver.key"
)
var (
@@ -42,6 +50,11 @@ func init() {
utilruntime.Must(tunedv1.AddToScheme(scheme))
utilruntime.Must(mcov1.AddToScheme(scheme))
utilruntime.Must(apiconfigv1.Install(scheme))
+ utilruntime.Must(performancev1alpha1.AddToScheme(scheme))
+ utilruntime.Must(performancev1.AddToScheme(scheme))
+ utilruntime.Must(performancev2.AddToScheme(scheme))
+ utilruntime.Must(olmv1alpha1.AddToScheme(scheme))
+ utilruntime.Must(olmv1.AddToScheme(scheme))
}
func printVersion() {
@@ -100,12 +113,41 @@ func main() {
controller, err := operator.NewController()
if err != nil {
- klog.Fatal(err)
+ klog.Fatalf("failed to create new controller: %v", err)
+ }
+
+ if err := mgr.Add(controller); err != nil {
+ klog.Fatalf("failed to add new controller to the manager: %v", err)
+ }
+
+ if err := mgr.Add(metrics.Server{}); err != nil {
+ klog.Fatalf("unable to add metrics server as runnable under the manager: %v", err)
}
- mgr.Add(controller)
- mgr.Add(metrics.Server{})
metrics.RegisterVersion(version.Version)
+ if err = (&paocontroller.PerformanceProfileReconciler{
+ Client: mgr.GetClient(),
+ Scheme: mgr.GetScheme(),
+ Recorder: mgr.GetEventRecorderFor("performance-profile-controller"),
+ }).SetupWithManager(mgr); err != nil {
+ klog.Exitf("unable to create PerformanceProfile controller: %v", err)
+ }
+
+ // Configure webhook server.
+ webHookServer := mgr.GetWebhookServer()
+ webHookServer.Port = webhookPort
+ webHookServer.CertDir = webhookCertDir
+ webHookServer.CertName = webhookCertName
+ webHookServer.KeyName = webhookKeyName
+
+ if err = (&performancev1.PerformanceProfile{}).SetupWebhookWithManager(mgr); err != nil {
+ klog.Exitf("unable to create PerformanceProfile v1 webhook: %v", err)
+ }
+
+ if err = (&performancev2.PerformanceProfile{}).SetupWebhookWithManager(mgr); err != nil {
+ klog.Exitf("unable to create PerformanceProfile v2 webhook: %v", err)
+ }
+
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
klog.Exitf("manager exited with non-zero code: %v", err)
}
diff --git a/cmd/performance-profile-creator/README.md b/cmd/performance-profile-creator/README.md
new file mode 100644
index 000000000..874bd7f4e
--- /dev/null
+++ b/cmd/performance-profile-creator/README.md
@@ -0,0 +1,99 @@
+# Performance Profile Creator (PPC)
+A tool to automate the process of creating Performance Profile using the user supplied profile parameters.
+
+## Software Components
+1. A CLI tool part of the Performance Addon Operator image
+
+## Flow
+1. PPC consumes a must-gather output.
+1. PPC output is a bunch of YAML data (PAO profile + NTO tuned part).
+
+## Things to note before running Performance Profile Creator
+1. Performance Profile Creator is present as an entrypoint (in /usr/local/bin/performance-profile-creator) in the Performance Addon Operator image.
+1. It is assumed that we have a must-gather directory available where we run the tool.
+ 1. Option 1: Run must-gather tool like below and use its output dir when you run PPC.
+ ```bash
+ oc adm must-gather --image=quay.io/openshift-kni/performance-addon-operator-must-gather:4.9-snapshot --dest-dir=
+ ```
+ 1. Option 2: Use an existing must-gather tarball decompressed to a directory.
+
+## Building Performance Profile Creator binary and image
+Developers can build the Performance Profile Creator images from the source tree using make targets.
+ 1. Setup Environment variables
+ ```bash
+ export REGISTRY_NAMESPACE=
+ export IMAGE_TAG= #defaults to "latest"
+ export IMAGE_BUILD_CMD=podman
+ ```
+1. To build from Performance Profile Creator source:
+ ```bash
+ make create-performance-profile
+ ```
+1. To build the Performance addon Operator image from source:
+ ```bash
+ make operator-container
+ ```
+Alternatively, you can pull the latest master upstream image. In the following examples, TAG has the format major.minor-snapshot. For example, the TAG for OpenShift 4.11 will be 4.11-snapshot:
+
+```bash
+podman pull quay.io/openshift-kni/performance-addon-operator:4.11-snapshot
+```
+
+## Running Performance Profile Creator
+Depending on how the must-gather directory was set up the operator can now run the Performance Profile Creator tool with the required parameters.
+
+PPC Tool help output:
+```bash
+$ podman run --entrypoint performance-profile-creator quay.io/openshift-kni/performance-addon-operator:4.11-snapshot -h
+A tool that automates creation of Performance Profiles
+
+Usage:
+ performance-profile-creator [flags]
+
+Flags:
+ --disable-ht Disable Hyperthreading
+ -h, --help help for performance-profile-creator
+ --info string Show cluster information; requires --must-gather-dir-path, ignore the other arguments. [Valid values: log, json] (default "log")
+ --mcp-name string MCP name corresponding to the target machines (required)
+ --must-gather-dir-path string Must gather directory path (default "must-gather")
+ --power-consumption-mode string The power consumption mode. [Valid values: default, low-latency, ultra-low-latency] (default "default")
+ --profile-name string Name of the performance profile to be created (default "performance")
+ --reserved-cpu-count int Number of reserved CPUs (required)
+ --rt-kernel Enable Real Time Kernel (required)
+ --split-reserved-cpus-across-numa Split the Reserved CPUs across NUMA nodes
+ --topology-manager-policy string Kubelet Topology Manager Policy of the performance profile to be created. [Valid values: single-numa-node, best-effort, restricted] (default "restricted")
+ --user-level-networking Run with User level Networking(DPDK) enabled
+```
+
+1. Option 1: Example of using must-gather output dir (obtained after running must gather manually) along with required arguments
+ ```bash
+ podman run --entrypoint performance-profile-creator -v /path/to/must-gather-output:/must-gather:z \
+ quay.io/openshift-kni/performance-addon-operator:4.11-snapshot --must-gather-dir-path /must-gather \
+ --reserved-cpu-count 20 --mcp-name worker-cnf --rt-kernel false > performance-profile.yaml
+ ```
+1. Option 2: Example of using an existing must-gather tarball which is decompressed to a directory along with required arguments
+ ```bash
+ podman run --entrypoint performance-profile-creator -v /path/to/decompressed-tarball:/must-gather:z \
+ quay.io/openshift-kni/performance-addon-operator:4.11-snapshot --must-gather-dir-path /must-gather \
+ --reserved-cpu-count 20 --mcp-name worker-cnf --rt-kernel false > performance-profile.yaml
+ ```
+
+## Running Performance Profile Creator using Wrapper script
+
+1. Example of how the following wrapper script can be used to create a performance profle:
+ ```bash
+ ./hack/run-perf-profile-creator.sh -t must-gather.tar.gz -- --mcp-name=worker-cnf --reserved-cpu-count=20 \
+ --rt-kernel=false --split-reserved-cpus-across-numa=true --topology-manager-policy=restricted \
+ --power-consumption-mode=low-latency > performace-profile.yaml
+ ```
+
+## Discovery mode
+
+To learn about the key details of the cluster you want to create a profile for, you may use the `discovery` (aka `info`) mode:
+```bash
+ ./hack/run-perf-profile-creator.sh -t must-gather.tar.gz -- --info=log
+
+```
+
+The `info` option requires a value which drives the output format. Please refer to the online help of the performance-profile-creator
+tool to learn about the supported formats.
diff --git a/cmd/performance-profile-creator/cmd/root.go b/cmd/performance-profile-creator/cmd/root.go
new file mode 100644
index 000000000..f67fcabb7
--- /dev/null
+++ b/cmd/performance-profile-creator/cmd/root.go
@@ -0,0 +1,573 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Copyright 2021 Red Hat, Inc.
+ */
+
+package cmd
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/profilecreator"
+ machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ log "github.com/sirupsen/logrus"
+ "sigs.k8s.io/yaml"
+
+ "github.com/spf13/cobra"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ kubeletconfig "k8s.io/kubelet/config/v1beta1"
+ "k8s.io/utils/pointer"
+)
+
+const (
+ infoModeJSON = "json"
+ infoModeLog = "log"
+)
+
+var (
+ validTMPolicyValues = []string{kubeletconfig.SingleNumaNodeTopologyManagerPolicy, kubeletconfig.BestEffortTopologyManagerPolicy, kubeletconfig.RestrictedTopologyManagerPolicy}
+ validInfoModes = []string{infoModeLog, infoModeJSON}
+)
+
+// ProfileData collects and stores all the data needed for profile creation
+type ProfileData struct {
+ isolatedCPUs, reservedCPUs string
+ nodeSelector *metav1.LabelSelector
+ mcpSelector map[string]string
+ performanceProfileName string
+ topologyPoilcy string
+ rtKernel bool
+ additionalKernelArgs []string
+ userLevelNetworking *bool
+ disableHT bool
+}
+
+// ClusterData collects the cluster wide information, each mcp points to a list of ghw node handlers
+type ClusterData map[*machineconfigv1.MachineConfigPool][]*profilecreator.GHWHandler
+
+func init() {
+ log.SetOutput(os.Stderr)
+ log.SetFormatter(&log.TextFormatter{
+ DisableTimestamp: true,
+ })
+}
+
+// NewRootCommand returns entrypoint command to interact with all other commands
+func NewRootCommand() *cobra.Command {
+ pcArgs := &ProfileCreatorArgs{
+ UserLevelNetworking: pointer.BoolPtr(false),
+ }
+
+ var requiredFlags []string = []string{
+ "reserved-cpu-count",
+ "mcp-name",
+ "rt-kernel",
+ "must-gather-dir-path",
+ }
+
+ root := &cobra.Command{
+ Use: "performance-profile-creator",
+ Short: "A tool that automates creation of Performance Profiles",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if cmd.Flag("info").Changed {
+ infoMode := cmd.Flag("info").Value.String()
+ if err := validateFlag("info", infoMode, validInfoModes); err != nil {
+ return err
+ }
+
+ missingRequiredFlags := checkRequiredFlags(cmd, "must-gather-dir-path")
+ if len(missingRequiredFlags) > 0 {
+ return fmt.Errorf("missing required flags: %s", strings.Join(argNameToFlag(missingRequiredFlags), ", "))
+ }
+
+ mustGatherDirPath := cmd.Flag("must-gather-dir-path").Value.String()
+ cluster, err := getClusterData(mustGatherDirPath)
+ if err != nil {
+ return fmt.Errorf("failed to parse the cluster data: %v", err)
+ }
+
+ clusterInfo := makeClusterInfoFromClusterData(cluster)
+ if infoMode == infoModeJSON {
+ showClusterInfoJSON(clusterInfo)
+ } else {
+ showClusterInfoLog(clusterInfo)
+ }
+ return nil
+ }
+
+ missingRequiredFlags := checkRequiredFlags(cmd, requiredFlags...)
+ if len(missingRequiredFlags) > 0 {
+ return fmt.Errorf("missing required flags: %s", strings.Join(argNameToFlag(missingRequiredFlags), ", "))
+ }
+
+ mustGatherDirPath := cmd.Flag("must-gather-dir-path").Value.String()
+ cluster, err := getClusterData(mustGatherDirPath)
+ if err != nil {
+ return fmt.Errorf("failed to parse the cluster data: %v", err)
+ }
+
+ profileCreatorArgsFromFlags, err := getDataFromFlags(cmd)
+ if err != nil {
+ return fmt.Errorf("failed to obtain data from flags %v", err)
+ }
+ profileData, err := getProfileData(profileCreatorArgsFromFlags, cluster)
+ if err != nil {
+ return err
+ }
+ createProfile(*profileData)
+ return nil
+ },
+ }
+
+ root.PersistentFlags().IntVar(&pcArgs.ReservedCPUCount, "reserved-cpu-count", 0, "Number of reserved CPUs (required)")
+ root.PersistentFlags().BoolVar(&pcArgs.SplitReservedCPUsAcrossNUMA, "split-reserved-cpus-across-numa", false, "Split the Reserved CPUs across NUMA nodes")
+ root.PersistentFlags().StringVar(&pcArgs.MCPName, "mcp-name", "", "MCP name corresponding to the target machines (required)")
+ root.PersistentFlags().BoolVar(&pcArgs.DisableHT, "disable-ht", false, "Disable Hyperthreading")
+ root.PersistentFlags().BoolVar(&pcArgs.RTKernel, "rt-kernel", false, "Enable Real Time Kernel (required)")
+ root.PersistentFlags().BoolVar(pcArgs.UserLevelNetworking, "user-level-networking", false, "Run with User level Networking(DPDK) enabled")
+ root.PersistentFlags().StringVar(&pcArgs.PowerConsumptionMode, "power-consumption-mode", profilecreator.ValidPowerConsumptionModes[0], fmt.Sprintf("The power consumption mode. [Valid values: %s]", strings.Join(profilecreator.ValidPowerConsumptionModes, ", ")))
+ root.PersistentFlags().StringVar(&pcArgs.MustGatherDirPath, "must-gather-dir-path", "must-gather", "Must gather directory path")
+ root.PersistentFlags().StringVar(&pcArgs.ProfileName, "profile-name", "performance", "Name of the performance profile to be created")
+ root.PersistentFlags().StringVar(&pcArgs.TMPolicy, "topology-manager-policy", kubeletconfig.RestrictedTopologyManagerPolicy, fmt.Sprintf("Kubelet Topology Manager Policy of the performance profile to be created. [Valid values: %s, %s, %s]", kubeletconfig.SingleNumaNodeTopologyManagerPolicy, kubeletconfig.BestEffortTopologyManagerPolicy, kubeletconfig.RestrictedTopologyManagerPolicy))
+ root.PersistentFlags().StringVar(&pcArgs.Info, "info", infoModeLog, fmt.Sprintf("Show cluster information; requires --must-gather-dir-path, ignore the other arguments. [Valid values: %s]", strings.Join(validInfoModes, ", ")))
+
+ return root
+}
+
+func checkRequiredFlags(cmd *cobra.Command, argNames ...string) []string {
+ missing := []string{}
+ for _, argName := range argNames {
+ if !cmd.Flag(argName).Changed {
+ missing = append(missing, argName)
+ }
+ }
+ return missing
+}
+
+func argNameToFlag(argNames []string) []string {
+ var flagNames []string
+ for _, argName := range argNames {
+ flagNames = append(flagNames, fmt.Sprintf("--%s", argName))
+ }
+ return flagNames
+}
+
+func getClusterData(mustGatherDirPath string) (ClusterData, error) {
+ cluster := make(ClusterData)
+ info, err := os.Stat(mustGatherDirPath)
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("the must-gather path '%s' is not valid", mustGatherDirPath)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("can't access the must-gather path: %v", err)
+ }
+ if !info.IsDir() {
+ return nil, fmt.Errorf("the must-gather path '%s' is not a directory", mustGatherDirPath)
+ }
+
+ mcps, err := profilecreator.GetMCPList(mustGatherDirPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get the MCP list under %s: %v", mustGatherDirPath, err)
+ }
+
+ nodes, err := profilecreator.GetNodeList(mustGatherDirPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load the cluster nodes: %v", err)
+ }
+
+ for _, mcp := range mcps {
+ matchedNodes, err := profilecreator.GetNodesForPool(mcp, mcps, nodes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find MCP %s's nodes: %v", mcp.Name, err)
+ }
+ handlers := make([]*profilecreator.GHWHandler, len(matchedNodes))
+ for i, node := range matchedNodes {
+ handle, err := profilecreator.NewGHWHandler(mustGatherDirPath, node)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load node's %s's GHW snapshot : %v", mcp.Name, err)
+ }
+ handlers[i] = handle
+ }
+ cluster[mcp] = handlers
+ }
+
+ return cluster, nil
+}
+
+// NUMACellInfo describe a NUMA cell on a node
+type NUMACellInfo struct {
+ ID int `json:"id"`
+ CoreList []int `json:"cores"`
+}
+
+// NodeInfo describe a Node in a MCP
+type NodeInfo struct {
+ Name string `json:"name"`
+ HTEnabled bool `json:"smt_enabled"`
+ CPUsCount int `json:"cpus_count"`
+ NUMACells []NUMACellInfo `json:"numa_cells"`
+}
+
+// MCPInfo describe a MCP in a cluster
+type MCPInfo struct {
+ Name string `json:"name"`
+ Nodes []NodeInfo `json:"nodes"`
+}
+
+// ClusterInfo describe a cluster
+type ClusterInfo []MCPInfo
+
+// Sort ensures all sequences in the ClusterInfo are sorted, to make comparisons easier.
+func (cInfo ClusterInfo) Sort() ClusterInfo {
+ for _, mcpInfo := range cInfo {
+ for _, nodeInfo := range mcpInfo.Nodes {
+ for _, numaCell := range nodeInfo.NUMACells {
+ sort.Ints(numaCell.CoreList)
+ }
+ sort.Slice(nodeInfo.NUMACells, func(i, j int) bool { return nodeInfo.NUMACells[i].ID < nodeInfo.NUMACells[j].ID })
+ }
+ }
+ sort.Slice(cInfo, func(i, j int) bool { return cInfo[i].Name < cInfo[j].Name })
+ return cInfo
+}
+
+func makeClusterInfoFromClusterData(cluster ClusterData) ClusterInfo {
+ var cInfo ClusterInfo
+ for mcp, nodeHandlers := range cluster {
+ mInfo := MCPInfo{
+ Name: mcp.Name,
+ }
+ for _, handle := range nodeHandlers {
+ topology, err := handle.SortedTopology()
+ if err != nil {
+ log.Infof("%s(Topology discovery error: %v)", handle.Node.GetName(), err)
+ continue
+ }
+
+ htEnabled, err := handle.IsHyperthreadingEnabled()
+ if err != nil {
+ log.Infof("%s(HT discovery error: %v)", handle.Node.GetName(), err)
+ }
+
+ nInfo := NodeInfo{
+ Name: handle.Node.GetName(),
+ HTEnabled: htEnabled,
+ }
+
+ for id, node := range topology.Nodes {
+ var coreList []int
+ for _, core := range node.Cores {
+ coreList = append(coreList, core.LogicalProcessors...)
+ }
+ nInfo.CPUsCount += len(coreList)
+ nInfo.NUMACells = append(nInfo.NUMACells, NUMACellInfo{
+ ID: id,
+ CoreList: coreList,
+ })
+ }
+ mInfo.Nodes = append(mInfo.Nodes, nInfo)
+ }
+ cInfo = append(cInfo, mInfo)
+ }
+ return cInfo.Sort()
+}
+
+func showClusterInfoJSON(cInfo ClusterInfo) {
+ json.NewEncoder(os.Stdout).Encode(cInfo)
+}
+
+func showClusterInfoLog(cInfo ClusterInfo) {
+ log.Infof("Cluster info:")
+ for _, mcpInfo := range cInfo {
+ log.Infof("MCP '%s' nodes:", mcpInfo.Name)
+ for _, nInfo := range mcpInfo.Nodes {
+ log.Infof("Node: %s (NUMA cells: %d, HT: %v)", nInfo.Name, len(nInfo.NUMACells), nInfo.HTEnabled)
+ for _, cInfo := range nInfo.NUMACells {
+ log.Infof("NUMA cell %d : %v", cInfo.ID, cInfo.CoreList)
+ }
+ log.Infof("CPU(s): %d", nInfo.CPUsCount)
+ }
+ log.Infof("---")
+ }
+}
+func getDataFromFlags(cmd *cobra.Command) (ProfileCreatorArgs, error) {
+ creatorArgs := ProfileCreatorArgs{}
+ mustGatherDirPath := cmd.Flag("must-gather-dir-path").Value.String()
+ mcpName := cmd.Flag("mcp-name").Value.String()
+ reservedCPUCount, err := strconv.Atoi(cmd.Flag("reserved-cpu-count").Value.String())
+ if err != nil {
+ return creatorArgs, fmt.Errorf("failed to parse reserved-cpu-count flag: %v", err)
+ }
+ splitReservedCPUsAcrossNUMA, err := strconv.ParseBool(cmd.Flag("split-reserved-cpus-across-numa").Value.String())
+ if err != nil {
+ return creatorArgs, fmt.Errorf("failed to parse split-reserved-cpus-across-numa flag: %v", err)
+ }
+ profileName := cmd.Flag("profile-name").Value.String()
+ tmPolicy := cmd.Flag("topology-manager-policy").Value.String()
+ if err != nil {
+ return creatorArgs, fmt.Errorf("failed to parse topology-manager-policy flag: %v", err)
+ }
+ err = validateFlag("topology-manager-policy", tmPolicy, validTMPolicyValues)
+ if err != nil {
+ return creatorArgs, fmt.Errorf("invalid value for topology-manager-policy flag specified: %v", err)
+ }
+ if tmPolicy == kubeletconfig.SingleNumaNodeTopologyManagerPolicy && splitReservedCPUsAcrossNUMA {
+ return creatorArgs, fmt.Errorf("not appropriate to split reserved CPUs in case of topology-manager-policy: %v", tmPolicy)
+ }
+ powerConsumptionMode := cmd.Flag("power-consumption-mode").Value.String()
+ if err != nil {
+ return creatorArgs, fmt.Errorf("failed to parse power-consumption-mode flag: %v", err)
+ }
+ err = validateFlag("power-consumption-mode", powerConsumptionMode, profilecreator.ValidPowerConsumptionModes)
+ if err != nil {
+ return creatorArgs, fmt.Errorf("invalid value for power-consumption-mode flag specified: %v", err)
+ }
+
+ rtKernelEnabled, err := strconv.ParseBool(cmd.Flag("rt-kernel").Value.String())
+ if err != nil {
+ return creatorArgs, fmt.Errorf("failed to parse rt-kernel flag: %v", err)
+ }
+
+ htDisabled, err := strconv.ParseBool(cmd.Flag("disable-ht").Value.String())
+ if err != nil {
+ return creatorArgs, fmt.Errorf("failed to parse disable-ht flag: %v", err)
+ }
+ creatorArgs = ProfileCreatorArgs{
+ MustGatherDirPath: mustGatherDirPath,
+ ProfileName: profileName,
+ ReservedCPUCount: reservedCPUCount,
+ SplitReservedCPUsAcrossNUMA: splitReservedCPUsAcrossNUMA,
+ MCPName: mcpName,
+ TMPolicy: tmPolicy,
+ RTKernel: rtKernelEnabled,
+ PowerConsumptionMode: powerConsumptionMode,
+ DisableHT: htDisabled,
+ }
+
+ if cmd.Flag("user-level-networking").Changed {
+ userLevelNetworkingEnabled, err := strconv.ParseBool(cmd.Flag("user-level-networking").Value.String())
+ if err != nil {
+ return creatorArgs, fmt.Errorf("failed to parse user-level-networking flag: %v", err)
+ }
+ creatorArgs.UserLevelNetworking = &userLevelNetworkingEnabled
+ }
+
+ return creatorArgs, nil
+}
+
+func getProfileData(args ProfileCreatorArgs, cluster ClusterData) (*ProfileData, error) {
+ mcps := make([]*machineconfigv1.MachineConfigPool, len(cluster))
+ mcpNames := make([]string, len(cluster))
+ var mcp *machineconfigv1.MachineConfigPool
+
+ i := 0
+ for m := range cluster {
+ mcps[i] = m
+ mcpNames[i] = m.Name
+ if m.Name == args.MCPName {
+ mcp = m
+ }
+ i++
+ }
+
+ if mcp == nil {
+ return nil, fmt.Errorf("'%s' MCP does not exist, valid values are %v", args.MCPName, mcpNames)
+ }
+
+ mcpSelector, err := profilecreator.GetMCPSelector(mcp, mcps)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compute the MCP selector: %v", err)
+ }
+
+ if len(cluster[mcp]) == 0 {
+ return nil, fmt.Errorf("no schedulable nodes are associated with '%s' MCP", args.MCPName)
+ }
+
+ var matchedNodeNames []string
+ for _, nodeHandler := range cluster[mcp] {
+ matchedNodeNames = append(matchedNodeNames, nodeHandler.Node.GetName())
+ }
+ log.Infof("Nodes targetted by %s MCP are: %v", args.MCPName, matchedNodeNames)
+ err = profilecreator.EnsureNodesHaveTheSameHardware(cluster[mcp])
+ if err != nil {
+ return nil, fmt.Errorf("targeted nodes differ: %v", err)
+ }
+
+ // We make sure that the matched Nodes are the same
+ // Assumption here is moving forward matchedNodes[0] is representative of how all the nodes are
+ // same from hardware topology point of view
+
+ nodeHandle := cluster[mcp][0]
+ reservedCPUs, isolatedCPUs, err := nodeHandle.GetReservedAndIsolatedCPUs(args.ReservedCPUCount, args.SplitReservedCPUsAcrossNUMA, args.DisableHT)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compute the reserved and isolated CPUs: %v", err)
+ }
+ log.Infof("%d reserved CPUs allocated: %v ", reservedCPUs.Size(), reservedCPUs.String())
+ log.Infof("%d isolated CPUs allocated: %v", isolatedCPUs.Size(), isolatedCPUs.String())
+ kernelArgs := profilecreator.GetAdditionalKernelArgs(args.PowerConsumptionMode, args.DisableHT)
+ profileData := &ProfileData{
+ reservedCPUs: reservedCPUs.String(),
+ isolatedCPUs: isolatedCPUs.String(),
+ nodeSelector: mcp.Spec.NodeSelector,
+ mcpSelector: mcpSelector,
+ performanceProfileName: args.ProfileName,
+ topologyPoilcy: args.TMPolicy,
+ rtKernel: args.RTKernel,
+ additionalKernelArgs: kernelArgs,
+ userLevelNetworking: args.UserLevelNetworking,
+ }
+ return profileData, nil
+}
+
+func validateFlag(name, value string, validValues []string) error {
+ if isStringInSlice(value, validValues) {
+ return nil
+ }
+ return fmt.Errorf("flag %q: Value '%s' is invalid. Valid values "+
+ "come from the set %v", name, value, validValues)
+}
+
+func isStringInSlice(value string, candidates []string) bool {
+ for _, candidate := range candidates {
+ if strings.EqualFold(candidate, value) {
+ return true
+ }
+ }
+ return false
+}
+
+// ProfileCreatorArgs represents the arguments passed to the ProfileCreator
+type ProfileCreatorArgs struct {
+ PowerConsumptionMode string `json:"power-consumption-mode"`
+ MustGatherDirPath string `json:"must-gather-dir-path"`
+ ProfileName string `json:"profile-name"`
+ ReservedCPUCount int `json:"reserved-cpu-count"`
+ SplitReservedCPUsAcrossNUMA bool `json:"split-reserved-cpus-across-numa"`
+ DisableHT bool `json:"disable-ht"`
+ RTKernel bool `json:"rt-kernel"`
+ UserLevelNetworking *bool `json:"user-level-networking,omitempty"`
+ MCPName string `json:"mcp-name"`
+ TMPolicy string `json:"topology-manager-policy"`
+ Info string `json:"info"`
+}
+
+func createProfile(profileData ProfileData) {
+ reserved := performancev2.CPUSet(profileData.reservedCPUs)
+ isolated := performancev2.CPUSet(profileData.isolatedCPUs)
+ // TODO: Get the name from MCP if not specified in the command line arguments
+ profile := &performancev2.PerformanceProfile{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PerformanceProfile",
+ APIVersion: performancev2.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: profileData.performanceProfileName,
+ },
+ Spec: performancev2.PerformanceProfileSpec{
+ CPU: &performancev2.CPU{
+ Isolated: &isolated,
+ Reserved: &reserved,
+ },
+ MachineConfigPoolSelector: profileData.mcpSelector,
+ NodeSelector: profileData.nodeSelector.MatchLabels,
+ RealTimeKernel: &performancev2.RealTimeKernel{
+ Enabled: &profileData.rtKernel,
+ },
+ AdditionalKernelArgs: profileData.additionalKernelArgs,
+ NUMA: &performancev2.NUMA{
+ TopologyPolicy: &profileData.topologyPoilcy,
+ },
+ },
+ }
+
+ if profileData.userLevelNetworking != nil {
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: profileData.userLevelNetworking,
+ }
+ }
+
+ // write CSV to out dir
+ writer := strings.Builder{}
+ MarshallObject(&profile, &writer)
+
+ fmt.Printf("%s", writer.String())
+}
+
+// MarshallObject mashals an object, usually a CSV into YAML
+func MarshallObject(obj interface{}, writer io.Writer) error {
+ jsonBytes, err := json.Marshal(obj)
+ if err != nil {
+ return err
+ }
+
+ var r unstructured.Unstructured
+ if err := json.Unmarshal(jsonBytes, &r.Object); err != nil {
+ return err
+ }
+
+ // remove status and metadata.creationTimestamp
+ unstructured.RemoveNestedField(r.Object, "metadata", "creationTimestamp")
+ unstructured.RemoveNestedField(r.Object, "template", "metadata", "creationTimestamp")
+ unstructured.RemoveNestedField(r.Object, "spec", "template", "metadata", "creationTimestamp")
+ unstructured.RemoveNestedField(r.Object, "status")
+
+ deployments, exists, err := unstructured.NestedSlice(r.Object, "spec", "install", "spec", "deployments")
+ if exists {
+ for _, obj := range deployments {
+ deployment := obj.(map[string]interface{})
+ unstructured.RemoveNestedField(deployment, "metadata", "creationTimestamp")
+ unstructured.RemoveNestedField(deployment, "spec", "template", "metadata", "creationTimestamp")
+ unstructured.RemoveNestedField(deployment, "status")
+ }
+ unstructured.SetNestedSlice(r.Object, deployments, "spec", "install", "spec", "deployments")
+ }
+
+ jsonBytes, err = json.Marshal(r.Object)
+ if err != nil {
+ return err
+ }
+
+ yamlBytes, err := yaml.JSONToYAML(jsonBytes)
+ if err != nil {
+ return err
+ }
+
+ // fix double quoted strings by removing unneeded single quotes...
+ s := string(yamlBytes)
+ s = strings.Replace(s, " '\"", " \"", -1)
+ s = strings.Replace(s, "\"'\n", "\"\n", -1)
+
+ yamlBytes = []byte(s)
+
+ _, err = writer.Write([]byte("---\n"))
+ if err != nil {
+ return err
+ }
+
+ _, err = writer.Write(yamlBytes)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/cmd/performance-profile-creator/main.go b/cmd/performance-profile-creator/main.go
new file mode 100644
index 000000000..3ba7cdc4d
--- /dev/null
+++ b/cmd/performance-profile-creator/main.go
@@ -0,0 +1,30 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Copyright 2021 Red Hat, Inc.
+ */
+
+package main
+
+import (
+ "os"
+
+ "github.com/openshift/cluster-node-tuning-operator/cmd/performance-profile-creator/cmd"
+)
+
+func main() {
+ root := cmd.NewRootCommand()
+ if err := root.Execute(); err != nil {
+ os.Exit(1)
+ }
+}
diff --git a/examples/pao/crd/bases/performance.openshift.io_performanceprofiles.yaml b/examples/pao/crd/bases/performance.openshift.io_performanceprofiles.yaml
new file mode 100644
index 000000000..1028c6daf
--- /dev/null
+++ b/examples/pao/crd/bases/performance.openshift.io_performanceprofiles.yaml
@@ -0,0 +1,679 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: (devel)
+ creationTimestamp: null
+ name: performanceprofiles.performance.openshift.io
+spec:
+ group: performance.openshift.io
+ names:
+ kind: PerformanceProfile
+ listKind: PerformanceProfileList
+ plural: performanceprofiles
+ singular: performanceprofile
+ scope: Cluster
+ versions:
+ - deprecated: true
+ deprecationWarning: v1 is deprecated and should be removed in next three releases,
+ use v2 instead
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: PerformanceProfile is the Schema for the performanceprofiles
+ API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PerformanceProfileSpec defines the desired state of PerformanceProfile.
+ properties:
+ additionalKernelArgs:
+ description: Addional kernel arguments.
+ items:
+ type: string
+ type: array
+ cpu:
+ description: CPU defines a set of CPU related parameters.
+ properties:
+ balanceIsolated:
+ description: BalanceIsolated toggles whether or not the Isolated
+ CPU set is eligible for load balancing work loads. When this
+ option is set to "false", the Isolated CPU set will be static,
+ meaning workloads have to explicitly assign each thread to a
+ specific cpu in order to work across multiple CPUs. Setting
+ this to "true" allows workloads to be balanced across CPUs.
+ Setting this to "false" offers the most predictable performance
+ for guaranteed workloads, but it offloads the complexity of
+ cpu load balancing to the application. Defaults to "true"
+ type: boolean
+ isolated:
+ description: 'Isolated defines a set of CPUs that will be used
+ to give to application threads the most execution time possible,
+ which means removing as many extraneous tasks off a CPU as possible.
+ It is important to notice the CPU manager can choose any CPU
+ to run the workload except the reserved CPUs. In order to guarantee
+ that your workload will run on the isolated CPU: 1. The union
+ of reserved CPUs and isolated CPUs should include all online
+ CPUs 2. The isolated CPUs field should be the complementary
+ to reserved CPUs field'
+ type: string
+ reserved:
+ description: Reserved defines a set of CPUs that will not be used
+ for any container workloads initiated by kubelet.
+ type: string
+ required:
+ - isolated
+ type: object
+ globallyDisableIrqLoadBalancing:
+ description: GloballyDisableIrqLoadBalancing toggles whether IRQ load
+ balancing will be disabled for the Isolated CPU set. When the option
+ is set to "true" it disables IRQs load balancing for the Isolated
+ CPU set. Setting the option to "false" allows the IRQs to be balanced
+ across all CPUs, however the IRQs load balancing can be disabled
+ per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io
+ annotations. Defaults to "false"
+ type: boolean
+ hugepages:
+ description: HugePages defines a set of huge pages related parameters.
+ It is possible to set huge pages with multiple size values at the
+ same time. For example, hugepages can be set with 1G and 2M, both
+ values will be set on the node by the performance-addon-operator.
+ It is important to notice that setting hugepages default size to
+ 1G will remove all 2M related folders from the node and it will
+ be impossible to configure 2M hugepages under the node.
+ properties:
+ defaultHugepagesSize:
+ description: DefaultHugePagesSize defines huge pages default size
+ under kernel boot parameters.
+ type: string
+ pages:
+ description: Pages defines huge pages that we want to allocate
+ at boot time.
+ items:
+ description: HugePage defines the number of allocated huge pages
+ of the specific size.
+ properties:
+ count:
+ description: Count defines amount of huge pages, maps to
+ the 'hugepages' kernel boot parameter.
+ format: int32
+ type: integer
+ node:
+ description: Node defines the NUMA node where hugepages
+ will be allocated, if not specified, pages will be allocated
+ equally between NUMA nodes
+ format: int32
+ type: integer
+ size:
+ description: Size defines huge page size, maps to the 'hugepagesz'
+ kernel boot parameter.
+ type: string
+ type: object
+ type: array
+ type: object
+ machineConfigLabel:
+ additionalProperties:
+ type: string
+ description: MachineConfigLabel defines the label to add to the MachineConfigs
+ the operator creates. It has to be used in the MachineConfigSelector
+ of the MachineConfigPool which targets this performance profile.
+ Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ machineConfigPoolSelector:
+ additionalProperties:
+ type: string
+ description: MachineConfigPoolSelector defines the MachineConfigPool
+ label to use in the MachineConfigPoolSelector of resources like
+ KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ net:
+ description: Net defines a set of network related features
+ properties:
+ devices:
+ description: Devices contains a list of network device representations
+ that will be set with a netqueue count equal to CPU.Reserved
+ . If no devices are specified then the default is all devices.
+ items:
+ description: 'Device defines a way to represent a network device
+ in several options: device name, vendor ID, model ID, PCI
+ path and MAC address'
+ properties:
+ deviceID:
+ description: Network device ID (model) represnted as a 16
+ bit hexmadecimal number.
+ type: string
+ interfaceName:
+ description: Network device name to be matched. It uses
+ a syntax of shell-style wildcards which are either positive
+ or negative.
+ type: string
+ vendorID:
+ description: Network device vendor ID represnted as a 16
+ bit Hexmadecimal number.
+ type: string
+ type: object
+ type: array
+ userLevelNetworking:
+ description: UserLevelNetworking when enabled - sets either all
+ or specified network devices queue size to the amount of reserved
+ CPUs. Defaults to "false".
+ type: boolean
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'NodeSelector defines the Node label to use in the NodeSelectors
+ of resources like Tuned created by the operator. It most likely
+ should, but does not have to match the node label in the NodeSelector
+ of the MachineConfigPool which targets this performance profile.
+ In the case when machineConfigLabels or machineConfigPoolSelector
+ are not set, we are expecting a certain NodeSelector format /:
+ "" in order to be able to calculate the default values for the former
+ mentioned fields.'
+ type: object
+ numa:
+ description: NUMA defines options related to topology aware affinities
+ properties:
+ topologyPolicy:
+ description: Name of the policy applied when TopologyManager is
+ enabled Operator defaults to "best-effort"
+ type: string
+ type: object
+ realTimeKernel:
+ description: RealTimeKernel defines a set of real time kernel related
+ parameters. RT kernel won't be installed when not set.
+ properties:
+ enabled:
+ description: Enabled defines if the real time kernel packages
+ should be installed. Defaults to "false"
+ type: boolean
+ type: object
+ required:
+ - cpu
+ - nodeSelector
+ type: object
+ status:
+ description: PerformanceProfileStatus defines the observed state of PerformanceProfile.
+ properties:
+ conditions:
+ description: Conditions represents the latest available observations
+ of current state.
+ items:
+ description: Condition represents the state of the operator's reconciliation
+ functionality.
+ properties:
+ lastHeartbeatTime:
+ format: date-time
+ type: string
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType is the state of the operator's reconciliation
+ functionality.
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ runtimeClass:
+ description: RuntimeClass contains the name of the RuntimeClass resource
+ created by the operator.
+ type: string
+ tuned:
+ description: Tuned points to the Tuned custom resource object that
+ contains the tuning values generated by this operator.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - deprecated: true
+ deprecationWarning: v1alpha1 is deprecated and should be removed in the next release,
+ use v2 instead
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: PerformanceProfile is the Schema for the performanceprofiles
+ API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PerformanceProfileSpec defines the desired state of PerformanceProfile.
+ properties:
+ additionalKernelArgs:
+ description: Addional kernel arguments.
+ items:
+ type: string
+ type: array
+ cpu:
+ description: CPU defines a set of CPU related parameters.
+ properties:
+ balanceIsolated:
+ description: BalanceIsolated toggles whether or not the Isolated
+ CPU set is eligible for load balancing work loads. When this
+ option is set to "false", the Isolated CPU set will be static,
+ meaning workloads have to explicitly assign each thread to a
+ specific cpu in order to work across multiple CPUs. Setting
+ this to "true" allows workloads to be balanced across CPUs.
+ Setting this to "false" offers the most predictable performance
+ for guaranteed workloads, but it offloads the complexity of
+ cpu load balancing to the application. Defaults to "true"
+ type: boolean
+ isolated:
+ description: 'Isolated defines a set of CPUs that will be used
+ to give to application threads the most execution time possible,
+ which means removing as many extraneous tasks off a CPU as possible.
+ It is important to notice the CPU manager can choose any CPU
+ to run the workload except the reserved CPUs. In order to guarantee
+ that your workload will run on the isolated CPU: 1. The union
+ of reserved CPUs and isolated CPUs should include all online
+ CPUs 2. The isolated CPUs field should be the complementary
+ to reserved CPUs field'
+ type: string
+ reserved:
+ description: Reserved defines a set of CPUs that will not be used
+ for any container workloads initiated by kubelet.
+ type: string
+ type: object
+ hugepages:
+ description: HugePages defines a set of huge pages related parameters.
+ It is possible to set huge pages with multiple size values at the
+ same time. For example, hugepages can be set with 1G and 2M, both
+ values will be set on the node by the performance-addon-operator.
+ It is important to notice that setting hugepages default size to
+ 1G will remove all 2M related folders from the node and it will
+ be impossible to configure 2M hugepages under the node.
+ properties:
+ defaultHugepagesSize:
+ description: DefaultHugePagesSize defines huge pages default size
+ under kernel boot parameters.
+ type: string
+ pages:
+ description: Pages defines huge pages that we want to allocate
+ at boot time.
+ items:
+ description: HugePage defines the number of allocated huge pages
+ of the specific size.
+ properties:
+ count:
+ description: Count defines amount of huge pages, maps to
+ the 'hugepages' kernel boot parameter.
+ format: int32
+ type: integer
+ node:
+ description: Node defines the NUMA node where hugepages
+ will be allocated, if not specified, pages will be allocated
+ equally between NUMA nodes
+ format: int32
+ type: integer
+ size:
+ description: Size defines huge page size, maps to the 'hugepagesz'
+ kernel boot parameter.
+ type: string
+ type: object
+ type: array
+ type: object
+ machineConfigLabel:
+ additionalProperties:
+ type: string
+ description: MachineConfigLabel defines the label to add to the MachineConfigs
+ the operator creates. It has to be used in the MachineConfigSelector
+ of the MachineConfigPool which targets this performance profile.
+ Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ machineConfigPoolSelector:
+ additionalProperties:
+ type: string
+ description: MachineConfigPoolSelector defines the MachineConfigPool
+ label to use in the MachineConfigPoolSelector of resources like
+ KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector defines the Node label to use in the NodeSelectors
+ of resources like Tuned created by the operator. It most likely
+ should, but does not have to match the node label in the NodeSelector
+ of the MachineConfigPool which targets this performance profile.
+ type: object
+ numa:
+ description: NUMA defines options related to topology aware affinities
+ properties:
+ topologyPolicy:
+ description: Name of the policy applied when TopologyManager is
+ enabled Operator defaults to "best-effort"
+ type: string
+ type: object
+ realTimeKernel:
+ description: RealTimeKernel defines a set of real time kernel related
+ parameters. RT kernel won't be installed when not set.
+ properties:
+ enabled:
+ description: Enabled defines if the real time kernel packages
+ should be installed. Defaults to "false"
+ type: boolean
+ type: object
+ type: object
+ status:
+ description: PerformanceProfileStatus defines the observed state of PerformanceProfile.
+ properties:
+ conditions:
+ description: Conditions represents the latest available observations
+ of current state.
+ items:
+ description: Condition represents the state of the operator's reconciliation
+ functionality.
+ properties:
+ lastHeartbeatTime:
+ format: date-time
+ type: string
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType is the state of the operator's reconciliation
+ functionality.
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ runtimeClass:
+ description: RuntimeClass contains the name of the RuntimeClass resource
+ created by the operator.
+ type: string
+ tuned:
+ description: Tuned points to the Tuned custom resource object that
+ contains the tuning values generated by this operator.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - name: v2
+ schema:
+ openAPIV3Schema:
+ description: PerformanceProfile is the Schema for the performanceprofiles
+ API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PerformanceProfileSpec defines the desired state of PerformanceProfile.
+ properties:
+ additionalKernelArgs:
+ description: Addional kernel arguments.
+ items:
+ type: string
+ type: array
+ cpu:
+ description: CPU defines a set of CPU related parameters.
+ properties:
+ balanceIsolated:
+ description: BalanceIsolated toggles whether or not the Isolated
+ CPU set is eligible for load balancing work loads. When this
+ option is set to "false", the Isolated CPU set will be static,
+ meaning workloads have to explicitly assign each thread to a
+ specific cpu in order to work across multiple CPUs. Setting
+ this to "true" allows workloads to be balanced across CPUs.
+ Setting this to "false" offers the most predictable performance
+ for guaranteed workloads, but it offloads the complexity of
+ cpu load balancing to the application. Defaults to "true"
+ type: boolean
+ isolated:
+ description: 'Isolated defines a set of CPUs that will be used
+ to give to application threads the most execution time possible,
+ which means removing as many extraneous tasks off a CPU as possible.
+ It is important to notice the CPU manager can choose any CPU
+ to run the workload except the reserved CPUs. In order to guarantee
+ that your workload will run on the isolated CPU: 1. The union
+ of reserved CPUs and isolated CPUs should include all online
+ CPUs 2. The isolated CPUs field should be the complementary
+ to reserved CPUs field'
+ type: string
+ reserved:
+ description: Reserved defines a set of CPUs that will not be used
+ for any container workloads initiated by kubelet.
+ type: string
+ required:
+ - isolated
+ - reserved
+ type: object
+ globallyDisableIrqLoadBalancing:
+ description: GloballyDisableIrqLoadBalancing toggles whether IRQ load
+ balancing will be disabled for the Isolated CPU set. When the option
+ is set to "true" it disables IRQs load balancing for the Isolated
+ CPU set. Setting the option to "false" allows the IRQs to be balanced
+ across all CPUs, however the IRQs load balancing can be disabled
+ per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io
+ annotations. Defaults to "false"
+ type: boolean
+ hugepages:
+ description: HugePages defines a set of huge pages related parameters.
+ It is possible to set huge pages with multiple size values at the
+ same time. For example, hugepages can be set with 1G and 2M, both
+ values will be set on the node by the performance-addon-operator.
+ It is important to notice that setting hugepages default size to
+ 1G will remove all 2M related folders from the node and it will
+ be impossible to configure 2M hugepages under the node.
+ properties:
+ defaultHugepagesSize:
+ description: DefaultHugePagesSize defines huge pages default size
+ under kernel boot parameters.
+ type: string
+ pages:
+ description: Pages defines huge pages that we want to allocate
+ at boot time.
+ items:
+ description: HugePage defines the number of allocated huge pages
+ of the specific size.
+ properties:
+ count:
+ description: Count defines amount of huge pages, maps to
+ the 'hugepages' kernel boot parameter.
+ format: int32
+ type: integer
+ node:
+ description: Node defines the NUMA node where hugepages
+ will be allocated, if not specified, pages will be allocated
+ equally between NUMA nodes
+ format: int32
+ type: integer
+ size:
+ description: Size defines huge page size, maps to the 'hugepagesz'
+ kernel boot parameter.
+ type: string
+ type: object
+ type: array
+ type: object
+ machineConfigLabel:
+ additionalProperties:
+ type: string
+ description: MachineConfigLabel defines the label to add to the MachineConfigs
+ the operator creates. It has to be used in the MachineConfigSelector
+ of the MachineConfigPool which targets this performance profile.
+ Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ machineConfigPoolSelector:
+ additionalProperties:
+ type: string
+ description: MachineConfigPoolSelector defines the MachineConfigPool
+ label to use in the MachineConfigPoolSelector of resources like
+ KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ net:
+ description: Net defines a set of network related features
+ properties:
+ devices:
+ description: Devices contains a list of network device representations
+ that will be set with a netqueue count equal to CPU.Reserved
+ . If no devices are specified then the default is all devices.
+ items:
+ description: 'Device defines a way to represent a network device
+ in several options: device name, vendor ID, model ID, PCI
+ path and MAC address'
+ properties:
+ deviceID:
+ description: Network device ID (model) represnted as a 16
+ bit hexmadecimal number.
+ type: string
+ interfaceName:
+ description: Network device name to be matched. It uses
+ a syntax of shell-style wildcards which are either positive
+ or negative.
+ type: string
+ vendorID:
+ description: Network device vendor ID represnted as a 16
+ bit Hexmadecimal number.
+ type: string
+ type: object
+ type: array
+ userLevelNetworking:
+ description: UserLevelNetworking when enabled - sets either all
+ or specified network devices queue size to the amount of reserved
+ CPUs. Defaults to "false".
+ type: boolean
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'NodeSelector defines the Node label to use in the NodeSelectors
+ of resources like Tuned created by the operator. It most likely
+ should, but does not have to match the node label in the NodeSelector
+ of the MachineConfigPool which targets this performance profile.
+ In the case when machineConfigLabels or machineConfigPoolSelector
+ are not set, we are expecting a certain NodeSelector format /:
+ "" in order to be able to calculate the default values for the former
+ mentioned fields.'
+ type: object
+ numa:
+ description: NUMA defines options related to topology aware affinities
+ properties:
+ topologyPolicy:
+ description: Name of the policy applied when TopologyManager is
+ enabled Operator defaults to "best-effort"
+ type: string
+ type: object
+ realTimeKernel:
+ description: RealTimeKernel defines a set of real time kernel related
+ parameters. RT kernel won't be installed when not set.
+ properties:
+ enabled:
+ description: Enabled defines if the real time kernel packages
+ should be installed. Defaults to "false"
+ type: boolean
+ type: object
+ required:
+ - cpu
+ - nodeSelector
+ type: object
+ status:
+ description: PerformanceProfileStatus defines the observed state of PerformanceProfile.
+ properties:
+ conditions:
+ description: Conditions represents the latest available observations
+ of current state.
+ items:
+ description: Condition represents the state of the operator's reconciliation
+ functionality.
+ properties:
+ lastHeartbeatTime:
+ format: date-time
+ type: string
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType is the state of the operator's reconciliation
+ functionality.
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ runtimeClass:
+ description: RuntimeClass contains the name of the RuntimeClass resource
+ created by the operator.
+ type: string
+ tuned:
+ description: Tuned points to the Tuned custom resource object that
+ contains the tuning values generated by this operator.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/examples/pao/crd/kustomization.yaml b/examples/pao/crd/kustomization.yaml
new file mode 100644
index 000000000..b651e6b26
--- /dev/null
+++ b/examples/pao/crd/kustomization.yaml
@@ -0,0 +1,6 @@
+# This kustomization.yaml is not intended to be run by itself,
+# since it depends on service name and namespace that are out of this kustomize package.
+# It should be run by config/default
+resources:
+- bases/performance.openshift.io_performanceprofiles.yaml
+# +kubebuilder:scaffold:crdkustomizeresource
diff --git a/examples/pao/default/kustomization.yaml b/examples/pao/default/kustomization.yaml
new file mode 100644
index 000000000..918debf45
--- /dev/null
+++ b/examples/pao/default/kustomization.yaml
@@ -0,0 +1,5 @@
+bases:
+- ../crd
+- ../rbac
+- ../samples
+- ../manager
diff --git a/examples/pao/rbac/kustomization.yaml b/examples/pao/rbac/kustomization.yaml
new file mode 100644
index 000000000..b228b96da
--- /dev/null
+++ b/examples/pao/rbac/kustomization.yaml
@@ -0,0 +1,3 @@
+resources:
+- role.yaml
+- role_binding.yaml
diff --git a/examples/pao/rbac/role.yaml b/examples/pao/rbac/role.yaml
new file mode 100644
index 000000000..285346831
--- /dev/null
+++ b/examples/pao/rbac/role.yaml
@@ -0,0 +1,100 @@
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: performance-operator
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - config.openshift.io
+ resources:
+ - infrastructures
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - machineconfiguration.openshift.io
+ resources:
+ - kubeletconfigs
+ - machineconfigpools
+ - machineconfigs
+ verbs:
+ - '*'
+- apiGroups:
+ - node.k8s.io
+ resources:
+ - runtimeclasses
+ verbs:
+ - '*'
+- apiGroups:
+ - performance.openshift.io
+ resources:
+ - performanceprofiles
+ - performanceprofiles/finalizers
+ - performanceprofiles/status
+ verbs:
+ - '*'
+- apiGroups:
+ - tuned.openshift.io
+ resources:
+ - profiles
+ - tuneds
+ verbs:
+ - '*'
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ creationTimestamp: null
+ name: performance-operator
+ namespace: openshift-cluster-node-tuning-operator
+rules:
+- apiGroups:
+ - apps
+ resourceNames:
+ - performance-operator
+ resources:
+ - deployments/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - pods
+ - services
+ - services/finalizers
+ verbs:
+ - '*'
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - servicemonitors
+ verbs:
+ - '*'
diff --git a/examples/pao/rbac/role_binding.yaml b/examples/pao/rbac/role_binding.yaml
new file mode 100644
index 000000000..dc745682e
--- /dev/null
+++ b/examples/pao/rbac/role_binding.yaml
@@ -0,0 +1,24 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: performance-operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: performance-operator
+subjects:
+- kind: ServiceAccount
+ name: performance-operator
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: performance-operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: performance-operator
+subjects:
+ - kind: ServiceAccount
+ name: performance-operator
+
diff --git a/examples/pao/samples/kustomization.yaml b/examples/pao/samples/kustomization.yaml
new file mode 100644
index 000000000..39a50a614
--- /dev/null
+++ b/examples/pao/samples/kustomization.yaml
@@ -0,0 +1,6 @@
+## Append samples you want in your CSV to this file as resources ##
+resources:
+- performance_v2_performanceprofile.yaml
+- performance_v1_performanceprofile.yaml
+- performance_v1alpha1_performanceprofile.yaml
+# +kubebuilder:scaffold:manifestskustomizesamples
diff --git a/examples/pao/samples/performance_v1_performanceprofile.yaml b/examples/pao/samples/performance_v1_performanceprofile.yaml
new file mode 100644
index 000000000..4db8eb883
--- /dev/null
+++ b/examples/pao/samples/performance_v1_performanceprofile.yaml
@@ -0,0 +1,25 @@
+apiVersion: performance.openshift.io/v1
+kind: PerformanceProfile
+metadata:
+ name: example-performanceprofile
+spec:
+ additionalKernelArgs:
+ - "nmi_watchdog=0"
+ - "audit=0"
+ - "mce=off"
+ - "processor.max_cstate=1"
+ - "idle=poll"
+ - "intel_idle.max_cstate=0"
+ cpu:
+ isolated: "2-3"
+ reserved: "0-1"
+ hugepages:
+ defaultHugepagesSize: "1G"
+ pages:
+ - size: "1G"
+ count: 2
+ node: 0
+ realTimeKernel:
+ enabled: true
+ nodeSelector:
+ node-role.kubernetes.io/performance: ""
diff --git a/examples/pao/samples/performance_v1alpha1_performanceprofile.yaml b/examples/pao/samples/performance_v1alpha1_performanceprofile.yaml
new file mode 100644
index 000000000..9aa978876
--- /dev/null
+++ b/examples/pao/samples/performance_v1alpha1_performanceprofile.yaml
@@ -0,0 +1,25 @@
+apiVersion: performance.openshift.io/v1alpha1
+kind: PerformanceProfile
+metadata:
+ name: example-performanceprofile
+spec:
+ additionalKernelArgs:
+ - "nmi_watchdog=0"
+ - "audit=0"
+ - "mce=off"
+ - "processor.max_cstate=1"
+ - "idle=poll"
+ - "intel_idle.max_cstate=0"
+ cpu:
+ isolated: "2-3"
+ reserved: "0-1"
+ hugepages:
+ defaultHugepagesSize: "1G"
+ pages:
+ - size: "1G"
+ count: 2
+ node: 0
+ realTimeKernel:
+ enabled: true
+ nodeSelector:
+ node-role.kubernetes.io/performance: ""
diff --git a/examples/pao/samples/performance_v2_performanceprofile.yaml b/examples/pao/samples/performance_v2_performanceprofile.yaml
new file mode 100644
index 000000000..7a95b6b2a
--- /dev/null
+++ b/examples/pao/samples/performance_v2_performanceprofile.yaml
@@ -0,0 +1,26 @@
+apiVersion: performance.openshift.io/v2
+kind: PerformanceProfile
+metadata:
+ name: example-performanceprofile
+spec:
+ additionalKernelArgs:
+ - "nmi_watchdog=0"
+ - "audit=0"
+ - "mce=off"
+ - "processor.max_cstate=1"
+ - "idle=poll"
+ - "intel_idle.max_cstate=0"
+ cpu:
+ isolated: "2-3"
+ reserved: "0-1"
+ hugepages:
+ defaultHugepagesSize: "1G"
+ pages:
+ - size: "1G"
+ count: 2
+ node: 0
+ realTimeKernel:
+ enabled: true
+ nodeSelector:
+ # we can not leave an empty string "" because it interpreted as null value
+ node-role.kubernetes.io/performance: "test"
diff --git a/go.mod b/go.mod
index bb6955cdb..dc99392ee 100644
--- a/go.mod
+++ b/go.mod
@@ -3,76 +3,106 @@ module github.com/openshift/cluster-node-tuning-operator
go 1.17
require (
- github.com/coreos/ignition/v2 v2.7.0
+ github.com/RHsyseng/operator-utils v0.0.0-20200213165520-1a022eb07a43
+ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
+ github.com/coreos/ignition v0.35.0
+ github.com/coreos/ignition/v2 v2.9.0
+ github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
+ github.com/google/go-cmp v0.5.6
+ github.com/jaypipes/ghw v0.8.1-0.20210605191321-eb162add542b
github.com/kevinburke/go-bindata v3.16.0+incompatible
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.17.0
- github.com/openshift/api v0.0.0-20211209135129-c58d9f695577
+ github.com/openshift/api v0.0.0-20220110171111-997c316db5e1
github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3
github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3
- github.com/openshift/library-go v0.0.0-20211209153216-ed9bc958bd8a
+ github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca
+ github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492
github.com/openshift/machine-config-operator v0.0.1-0.20210514234214-c415ce6aed25
+ github.com/operator-framework/api v0.10.7
+ github.com/operator-framework/operator-lifecycle-manager v3.11.0+incompatible
github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.11.0
+ github.com/prometheus/client_golang v1.11.1
+ github.com/sirupsen/logrus v1.8.1
+ github.com/spf13/cobra v1.2.1
+ github.com/spf13/pflag v1.0.5
gopkg.in/fsnotify.v1 v1.4.7
gopkg.in/ini.v1 v1.62.0
- k8s.io/api v0.23.0
- k8s.io/apimachinery v0.23.0
- k8s.io/client-go v0.23.0
- k8s.io/code-generator v0.23.0
+ k8s.io/api v0.23.3
+ k8s.io/apiextensions-apiserver v0.23.3
+ k8s.io/apimachinery v0.23.3
+ k8s.io/client-go v0.23.3
+ k8s.io/code-generator v0.23.3
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.30.0
- k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b
+ k8s.io/kubelet v0.23.3
+ k8s.io/kubernetes v0.23.3
+ k8s.io/utils v0.0.0-20211116205334-6203023598ed
+ kubevirt.io/qe-tools v0.1.6
sigs.k8s.io/controller-runtime v0.11.0
- sigs.k8s.io/controller-tools v0.4.0
+ sigs.k8s.io/controller-tools v0.6.2
+ sigs.k8s.io/yaml v1.3.0
)
require (
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
- github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 // indirect
+ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
+ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
github.com/beorn7/perks v1.0.1 // indirect
+ github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
+ github.com/coreos/go-json v0.0.0-20170920214419-6a2fe990e083 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
- github.com/coreos/vcontext v0.0.0-20191017033345-260217907eb5 // indirect
+ github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful v2.10.0+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fatih/color v1.12.0 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
- github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-logr/logr v1.2.0 // indirect
+ github.com/go-ole/go-ole v1.2.4 // indirect
+ github.com/go-openapi/analysis v0.21.2 // indirect
+ github.com/go-openapi/errors v0.19.9 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
- github.com/go-openapi/jsonreference v0.19.5 // indirect
- github.com/go-openapi/swag v0.19.14 // indirect
+ github.com/go-openapi/jsonreference v0.19.6 // indirect
+ github.com/go-openapi/loads v0.21.1 // indirect
+ github.com/go-openapi/spec v0.20.4 // indirect
+ github.com/go-openapi/strfmt v0.21.2 // indirect
+ github.com/go-openapi/swag v0.21.1 // indirect
+ github.com/go-openapi/validate v0.21.0 // indirect
+ github.com/go-stack/stack v1.8.0 // indirect
github.com/gobuffalo/flect v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
- github.com/google/go-cmp v0.5.6 // indirect
- github.com/google/gofuzz v1.1.0 // indirect
- github.com/google/uuid v1.1.2 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
+ github.com/google/uuid v1.2.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/jaypipes/pcidb v0.6.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/mitchellh/mapstructure v1.4.1 // indirect
+ github.com/moby/spdystream v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/nxadm/tail v1.4.8 // indirect
+ github.com/oklog/ulid v1.3.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.28.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
- github.com/spf13/cobra v1.2.1 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 // indirect
+ go.mongodb.org/mongo-driver v1.7.5 // indirect
golang.org/x/mod v0.4.2 // indirect
- golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
+ golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
@@ -87,42 +117,52 @@ require (
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
- k8s.io/apiextensions-apiserver v0.23.0 // indirect
- k8s.io/component-base v0.23.0 // indirect
+ howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
+ k8s.io/component-base v0.23.3 // indirect
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect
- sigs.k8s.io/yaml v1.3.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
)
-// Pinned to kubernetes-1.23.0
+// Pinned to kubernetes-1.23.3
replace (
- k8s.io/api => k8s.io/api v0.23.0
- k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.0
- k8s.io/apimachinery => k8s.io/apimachinery v0.23.0
- k8s.io/apiserver => k8s.io/apiserver v0.23.0
- k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.0
- k8s.io/client-go => k8s.io/client-go v0.23.0
- k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.0
- k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.0
- k8s.io/code-generator => k8s.io/code-generator v0.23.0
- k8s.io/component-base => k8s.io/component-base v0.23.0
- k8s.io/component-helpers => k8s.io/component-helpers v0.23.0
- k8s.io/controller-manager => k8s.io/controller-manager v0.23.0
- k8s.io/cri-api => k8s.io/cri-api v0.23.0
- k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.0
- k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.0
- k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.0
- k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.0
- k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.0
- k8s.io/kubectl => k8s.io/kubectl v0.23.0
- k8s.io/kubelet => k8s.io/kubelet v0.23.0
- k8s.io/kubernetes => k8s.io/kubernetes v1.23.0
- k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.0
- k8s.io/metrics => k8s.io/metrics v0.23.0
- k8s.io/mount-utils => k8s.io/mount-utils v0.23.0
- k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.0
- sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.11.0
+ k8s.io/api => k8s.io/api v0.23.3
+ k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.3
+ k8s.io/apimachinery => k8s.io/apimachinery v0.23.3
+ k8s.io/apiserver => k8s.io/apiserver v0.23.3
+ k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.3
+ k8s.io/client-go => k8s.io/client-go v0.23.3
+ k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.3
+ k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.3
+ k8s.io/code-generator => k8s.io/code-generator v0.23.3
+ k8s.io/component-base => k8s.io/component-base v0.23.3
+ k8s.io/component-helpers => k8s.io/component-helpers v0.23.3
+ k8s.io/controller-manager => k8s.io/controller-manager v0.23.3
+ k8s.io/cri-api => k8s.io/cri-api v0.23.3
+ k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.3
+ k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.3
+ k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.3
+ k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.3
+ k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.3
+ k8s.io/kubectl => k8s.io/kubectl v0.23.3
+ k8s.io/kubelet => k8s.io/kubelet v0.23.3
+ k8s.io/kubernetes => k8s.io/kubernetes v1.23.3
+ k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.3
+ k8s.io/metrics => k8s.io/metrics v0.23.3
+ k8s.io/mount-utils => k8s.io/mount-utils v0.23.3
+ k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.3
+ k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3
+ sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.11.1
sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.7.0
)
+
+// Other PAO pinned deps
+replace (
+ github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible
+ github.com/coreos/prometheus-operator => github.com/coreos/prometheus-operator v0.40.0
+ github.com/mtrmac/gpgme => github.com/mtrmac/gpgme v0.1.1
+ github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20220203091316-d3010b34d344 // release-4.11
+)
+
+replace vbom.ml/util => github.com/fvbommel/util v0.0.0-20180919145318-efcd4e0f9787
diff --git a/go.sum b/go.sum
index 29e49be22..b0f58c153 100644
--- a/go.sum
+++ b/go.sum
@@ -1,3 +1,5 @@
+bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -39,7 +41,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
@@ -48,32 +51,31 @@ github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgq
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/InVisionApp/go-health v2.1.0+incompatible/go.mod h1:/+Gv1o8JUsrjC6pi6MN6/CgKJo4OqZ6x77XAnImrzhg=
+github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw=
+github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
-github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
-github.com/Masterminds/sprig v2.20.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
-github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o=
-github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
-github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
+github.com/RHsyseng/operator-utils v0.0.0-20200213165520-1a022eb07a43 h1:Sb81vKYD+uXItFlAbCtOd2I13V4B1xaURhCEQb4Lzqs=
+github.com/RHsyseng/operator-utils v0.0.0-20200213165520-1a022eb07a43/go.mod h1:E+hCtYz+9UsXfAGnRjX2LGuaa5gSGNKHCVTmGZR79vY=
+github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
+github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 h1:uwcvnXW76Y0rHM+qs7y8iHknWUWXYFNlD6FEVhc47TU=
-github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -81,15 +83,17 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
-github.com/apparentlymart/go-cidr v1.0.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/ashcrow/osrelease v0.0.0-20180626175927-9b292693c55c/go.mod h1:BRljTyotlu+6N+Qlu5MhjxpdmccCnp9lDvZjNNV8qr4=
-github.com/aws/aws-sdk-go v1.19.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=
+github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM=
github.com/aws/aws-sdk-go v1.30.28/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
+github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -97,79 +101,78 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
+github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/clarketm/json v1.14.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
-github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
-github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
-github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
-github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
+github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
+github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
+github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.11/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
+github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/fcct v0.5.0/go.mod h1:cbE+j77YSQwFB2fozWVB3qsI2Pi3YiVEbDz/b6Yywdo=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-json v0.0.0-20170920214419-6a2fe990e083 h1:iLYct0QOZLUuTbFBf+PDiKvpG1xPicwkcgnKaGCeTgc=
+github.com/coreos/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:FmxyHfvrCFfCsXRylD4QQRlQmvzl+DG6iTHyEEykPfU=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/ign-converter v0.0.0-20201123214124-8dac862888aa/go.mod h1:pqAsDWa5YDi10Va/aqQI0bwOs9hXqoE2xwb5vnFys5s=
github.com/coreos/ignition v0.35.0 h1:UFodoYq1mOPrbEjtxIsZbThcDyQwAI1owczRDqWmKkQ=
github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA=
-github.com/coreos/ignition/v2 v2.1.1/go.mod h1:RqmqU64zxarUJa3l4cHtbhcSwfQLpUhv0WVziZwoXvE=
-github.com/coreos/ignition/v2 v2.7.0 h1:JCKxJllVtnk1lQY1uisxrtFSHG5L2NI1LRzc8wBEk84=
-github.com/coreos/ignition/v2 v2.7.0/go.mod h1:3CjaRpg51hmJzPjarbzB0RvSZbLkNOczxKJobTl6nOY=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/ignition/v2 v2.9.0 h1:Zl5N08OyqlECB8BrBlMDp3Jf1ShwVTtREPcUq/YO034=
+github.com/coreos/ignition/v2 v2.9.0/go.mod h1:A5lFFzA2/zvZQPVEvI1lR5WPLWRb7KZ7Q1QOeUMtcAc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/vcontext v0.0.0-20190529201340-22b159166068/go.mod h1:E+6hug9bFSe0KZ2ZAzr8M9F5JlArJjv5D1JS7KSkPKE=
-github.com/coreos/vcontext v0.0.0-20191017033345-260217907eb5 h1:DjoHHi6+9J7DGYPvBdmszKZLY+ucx2bnA77jf8KIk9M=
-github.com/coreos/vcontext v0.0.0-20191017033345-260217907eb5/go.mod h1:E+6hug9bFSe0KZ2ZAzr8M9F5JlArJjv5D1JS7KSkPKE=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c h1:jA28WeORitsxGFVWhyWB06sAG2HbLHPQuHwDydhU2CQ=
+github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c/go.mod h1:z4pMVvaUrxs98RROlIYdAQCKhEicjnTirOaVyDRH5h8=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU=
github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
@@ -181,23 +184,15 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
-github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libnetwork v0.0.0-20190731215715-7f13a5c99f4b/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
-github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
-github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
-github.com/elazarl/goproxy/ext v0.0.0-20190911111923-ecfe977594f1/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
+github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.10.0+incompatible h1:l6Soi8WCOOVAeCo4W98iBFC6Og7/X8bpRt51oNLZ2C8=
@@ -210,34 +205,34 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
-github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc=
github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
-github.com/fsouza/go-dockerclient v0.0.0-20171004212419-da3951ba2e9e/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
-github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
-github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
-github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA=
+github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=
+github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
+github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -245,65 +240,95 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
-github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
+github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
+github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU=
+github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
+github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.19.9 h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4=
+github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
+github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
+github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0=
+github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
+github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M=
+github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
+github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
+github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
+github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os=
+github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU=
+github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI=
+github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
+github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
-github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
-github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg=
-github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
-github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU=
-github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk=
-github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
-github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
-github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc=
-github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
-github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY=
github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
-github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
-github.com/godbus/dbus v0.0.0-20181025153459-66d97aec3384/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -330,33 +355,12 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
-github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
-github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
-github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
-github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM=
-github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
-github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
-github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
-github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg=
-github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU=
-github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
-github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
-github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
-github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
-github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc=
-github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg=
-github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY=
-github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks=
-github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A=
-github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/cadvisor v0.43.0/go.mod h1:+RdMSbc3FVr5NYCD2dOEJy/LI0jYJ/0xJXkzWXEyiFQ=
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -373,8 +377,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -393,34 +398,28 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
+github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/mux v0.0.0-20191024121256-f395758b854c/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=
+github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
-github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
@@ -437,34 +436,37 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/heketi/heketi v10.3.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o=
+github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
+github.com/jaypipes/ghw v0.8.1-0.20210605191321-eb162add542b h1:gqEethdcv2egL3XtvkHh47m4nj09q4XO/VTioGKDLDI=
+github.com/jaypipes/ghw v0.8.1-0.20210605191321-eb162add542b/go.mod h1:+gR9bjm3W/HnFi90liF+Fj9GpCe/Dsibl9Im8KmC7c4=
+github.com/jaypipes/pcidb v0.6.0 h1:VIM7GKVaW4qba30cvB67xSCgJPTzkG8Kzw/cbs5PHWU=
+github.com/jaypipes/pcidb v0.6.0/go.mod h1:L2RGk04sfRhp5wvHO0gfRAMoLY/F3PKv/nwJeVoho0o=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -476,36 +478,35 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kevinburke/go-bindata v3.16.0+incompatible h1:TFzFZop2KxGhqNwsyjgmIh5JOrpG940MZlm5gNbxr8g=
github.com/kevinburke/go-bindata v3.16.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
-github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
@@ -514,36 +515,45 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
-github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE=
+github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ=
+github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@@ -552,83 +562,68 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
-github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
-github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
-github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
-github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
-github.com/openshift/api v0.0.0-20200326160804-ecb9283fe820/go.mod h1:RKMJ5CBnljLfnej+BJ/xnOWc3kZDvJUaIAEq2oKSPtE=
-github.com/openshift/api v0.0.0-20201214114959-164a2fb63b5f/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg=
-github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg=
-github.com/openshift/api v0.0.0-20210409143810-a99ffa1cac67/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio=
-github.com/openshift/api v0.0.0-20211209135129-c58d9f695577 h1:NUe82M8wMYXbd5s+WBAJ2QAZZivs+nhZ3zYgZFwKfqw=
+github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/openshift/api v0.0.0-20211209135129-c58d9f695577/go.mod h1:DoslCwtqUpr3d/gsbq4ZlkaMEdYqKxuypsDjorcHhME=
-github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc=
-github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
-github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
+github.com/openshift/api v0.0.0-20220110171111-997c316db5e1 h1:gvAPP+X17EZwlyim5d/KCmNng6zp+4fRxul0X2Z068A=
+github.com/openshift/api v0.0.0-20220110171111-997c316db5e1/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4=
github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
-github.com/openshift/build-machinery-go v0.0.0-20210806203541-4ea9b6da3a37/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3 h1:65oBhJYHzYK5VL0gF1eiYY37lLzyLZ47b9y5Kib1nf8=
github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
-github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49/go.mod h1:9/jG4I6sh+5QublJpZZ4Zs/P4/QCXMsQQ/K/058bSB8=
-github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47/go.mod h1:u7NRAjtYVAKokiI9LouzTv4mhds8P4S1TwdVAfbjKSk=
github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3 h1:SG1aqwleU6bGD0X4mhkTNupjVnByMYYuW4XbnCPavQU=
github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3/go.mod h1:cwhyki5lqBmrT0m8Im+9I7PGFaraOzcYPtEz93RcsGY=
-github.com/openshift/library-go v0.0.0-20191003152030-97c62d8a2901/go.mod h1:NBttNjZpWwup/nthuLbPAPSYC8Qyo+BBK5bCtFoyYjo=
-github.com/openshift/library-go v0.0.0-20210205203934-9eb0d970f2f4/go.mod h1:udseDnqxn5ON8i+NBjDp00fBTK0JRu1/6Y6tf6EivDE=
-github.com/openshift/library-go v0.0.0-20211209153216-ed9bc958bd8a h1:MoAaYFrzB5QlYzO7phyjx/JBxghUrLitwb69RaulRAs=
-github.com/openshift/library-go v0.0.0-20211209153216-ed9bc958bd8a/go.mod h1:M/Gi/GUUrMdSS07nrYtTiK43J6/VUAyk/+IfN4ZqUY4=
-github.com/openshift/machine-config-operator v0.0.1-0.20210514234214-c415ce6aed25 h1:3EekrCI6YH2UOCPBB9pOxqv7FW50oaIaRxKLpH83YKI=
-github.com/openshift/machine-config-operator v0.0.1-0.20210514234214-c415ce6aed25/go.mod h1:LC0tawtxYlQ94QiIMOZ68Q+B3xEO8Vq3FIn+srfm4mE=
-github.com/openshift/runtime-utils v0.0.0-20200415173359-c45d4ff3f912/go.mod h1:0OXNy7VoqFexkxKqyQbHJLPwn1MFp1/CxRJAgKHM+/o=
+github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca h1:F1MEnOMwSrTA0YAkO0he9ip9w0JhYzI/iCB2mXmaSPg=
+github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo=
+github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492 h1:oj/rSQqVWVj6YJUydZwLz2frrJreiyI4oa9g/YPgMsM=
+github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492/go.mod h1:4UQ9snU1vg53fyTpHQw3vLPiAxI8ub5xrc+y8KPQQFs=
+github.com/openshift/machine-config-operator v0.0.1-0.20220203091316-d3010b34d344 h1:HfLsauJWwM1LZtPPmG9LExqphdddycUvaYtsfyYFn3o=
+github.com/openshift/machine-config-operator v0.0.1-0.20220203091316-d3010b34d344/go.mod h1:FZ6GifJP0KAKiPE3kvsxdJgkoAMXbSnVbS4to1d+4QA=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
+github.com/operator-framework/api v0.10.7 h1:GlZJ6m+0WSVdSsSjTbhKKAvHXamWJXhwXHUhVwL8LBE=
+github.com/operator-framework/api v0.10.7/go.mod h1:PtQSNSuVrhSC6YE6JJJZv3nnZJc32osKX8FmFUZK05U=
+github.com/operator-framework/operator-lifecycle-manager v3.11.0+incompatible h1:Po8C8RVLRWq7pNQ5pKonM9CXpC/osoBWbmsuf+HJnSI=
+github.com/operator-framework/operator-lifecycle-manager v3.11.0+incompatible/go.mod h1:Ma5ZXd4S1vmMyewWlF7aO8CZiokR7Sd8dhSfkGkNU4U=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY=
@@ -636,21 +631,19 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
-github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -659,85 +652,74 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw=
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
-github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
+github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI=
+github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
+github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
-github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
-github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v1.1.0 h1:MkTeG1DMwsrdH7QtLXy5W+fUxWq+vmb6cLmyJ7aRtF0=
+github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
-github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
-github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@@ -745,46 +727,35 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
-github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
-github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
-github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
-github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U=
github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 h1:uxE3GYdXIOfhMv3unJKETJEhw78gvzuQqRX/rVirc2A=
github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U=
-github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk=
github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -792,10 +763,7 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
@@ -803,6 +771,9 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
+go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
+go.mongodb.org/mongo-driver v1.7.5 h1:ny3p0reEpgsR2cfA5cjgwFZg3Cv/ofFh/8jbhGtz9VI=
+go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -823,7 +794,6 @@ go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
@@ -839,26 +809,29 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
-go4.org v0.0.0-20200104003542-c7e774b10ea0/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
+go4.org v0.0.0-20200104003542-c7e774b10ea0 h1:M6XsnQeLwG+rHQ+/rrGh3puBI3WZEy9TBWmf2H+enQA=
golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
@@ -866,6 +839,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20210220032938-85be41e4509f/go.mod h1:I6l2HNBLBZEcrOoCpyKLdY2lHoRZ8lI4x60KMCQDft4=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -882,28 +857,28 @@ golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPI
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -914,10 +889,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -939,16 +911,19 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -961,6 +936,7 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
@@ -968,6 +944,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -975,7 +952,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -984,27 +960,30 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191110163157-d32e6e3b99c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1012,10 +991,11 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1031,9 +1011,10 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1044,9 +1025,13 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1060,7 +1045,6 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -1070,42 +1054,36 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1116,7 +1094,7 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1136,7 +1114,6 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200610160956-3e83d1e96d0e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
@@ -1161,6 +1138,12 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
+gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -1184,6 +1167,7 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1205,6 +1189,7 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -1238,6 +1223,7 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1245,8 +1231,6 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -1279,32 +1263,31 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1316,12 +1299,11 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20190502103701-55513cacd4ae/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20191010095647-fc94e3f71652/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1331,21 +1313,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro=
-k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg=
-k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY=
-k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4=
-k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ=
-k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
-k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4=
-k8s.io/cli-runtime v0.23.0/go.mod h1:B5N3YH0KP1iKr6gEuJ/RRmGjO0mJQ/f/JrsmEiPQAlU=
-k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY=
-k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA=
-k8s.io/code-generator v0.23.0 h1:lhyd2KJVCEmpjaCpuoooGs+e3xhPwpYvupnNRidO0Ds=
-k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE=
-k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8=
-k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI=
-k8s.io/component-helpers v0.23.0/go.mod h1:liXMh6FZS4qamKtMJQ7uLHnFe3tlC86RX5mJEk/aerg=
+howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
+howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
+k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38=
+k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
+k8s.io/apiextensions-apiserver v0.23.3 h1:JvPJA7hSEAqMRteveq4aj9semilAZYcJv+9HHFWfUdM=
+k8s.io/apiextensions-apiserver v0.23.3/go.mod h1:/ZpRXdgKZA6DvIVPEmXDCZJN53YIQEUDF+hrpIQJL38=
+k8s.io/apimachinery v0.23.3 h1:7IW6jxNzrXTsP0c8yXz2E5Yx/WTzVPTsHIx/2Vm0cIk=
+k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
+k8s.io/apiserver v0.23.3/go.mod h1:3HhsTmC+Pn+Jctw+Ow0LHA4dQ4oXrQ4XJDzrVDG64T4=
+k8s.io/cli-runtime v0.23.3/go.mod h1:yA00O5pDqnjkBh8fkuugBbfIfjB1nOpz+aYLotbnOfc=
+k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs=
+k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE=
+k8s.io/cloud-provider v0.23.3/go.mod h1:Ik+pKlpPOp0Zs906xyOpT3g2xB9A8VGNdejMTZS6EeA=
+k8s.io/cluster-bootstrap v0.23.3/go.mod h1:NwUIksUHKNOKIHg/AfLH4NxqylbfEVXUh9EX2NxHZII=
+k8s.io/code-generator v0.23.3 h1:NSAKIkvkL8OaWr5DrF9CXGBJjhMp3itplT/6fwHQcAY=
+k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
+k8s.io/component-base v0.23.3 h1:q+epprVdylgecijVGVdf4MbizEL2feW4ssd7cdo6LVY=
+k8s.io/component-base v0.23.3/go.mod h1:1Smc4C60rWG7d3HjSYpIwEbySQ3YWg0uzH5a2AtaTLg=
+k8s.io/component-helpers v0.23.3/go.mod h1:SH+W/WPTaTenbWyDEeY7iytAQiMh45aqKxkvlqQ57cg=
+k8s.io/controller-manager v0.23.3/go.mod h1:E0ss6ogA93sZ+AuibQSa7H4xWIiICTYFjowkjellVeU=
+k8s.io/cri-api v0.23.3/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
+k8s.io/csi-translation-lib v0.23.3/go.mod h1:8J7hpeqMoCJWofd1lCs4vZrEshdbVYrqurFeB6GZ/+E=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
@@ -1354,46 +1343,57 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kube-aggregator v0.23.0/go.mod h1:b1vpoaTWKZjCzvbe1KXFw3vPbISrghJsg7/RI8oZUME=
+k8s.io/kube-aggregator v0.23.3/go.mod h1:pt5QJ3QaIdhZzNlUvN5wndbM0LNT4BvhszGkzy2QdFo=
+k8s.io/kube-controller-manager v0.23.3/go.mod h1:e8m5dhjei67DlLZA/QTvenxiGyonG9UhgHtU1LMslJE=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
-k8s.io/kubectl v0.23.0/go.mod h1:TfcGEs3u4dkmoC2eku1GYymdGaMtPMcaLLFrX/RB2kI=
-k8s.io/kubelet v0.23.0/go.mod h1:A4DxfIt5Ka+rz54HAFhs1bgiFjJT6lcaAYUcACZl1/k=
-k8s.io/metrics v0.23.0/go.mod h1:NDiZTwppEtAuKJ1Rxt3S4dhyRzdp6yUcJf0vo023dPo=
+k8s.io/kube-proxy v0.23.3/go.mod h1:XdvwqJkR9r0ddUAX4ruA4V22Kws3qzKvgL3rIq584Ko=
+k8s.io/kube-scheduler v0.23.3/go.mod h1:/thFQoAMv9/olDOEYVSQbUohmkJJyIPUmpVu0UealSM=
+k8s.io/kubectl v0.23.3/go.mod h1:VBeeXNgLhSabu4/k0O7Q0YujgnA3+CLTUE0RcmF73yY=
+k8s.io/kubelet v0.23.3 h1:jYed8HoT0H2zXzf5Av+Ml8z5erN39uJfKh/yplYMgkg=
+k8s.io/kubelet v0.23.3/go.mod h1:RZxGSCsiwoWJ9z6mVla+jhiLfCFIKC16yAS38D7GQSE=
+k8s.io/kubernetes v1.23.3 h1:weuFJOkRP7+057uvhNUYbVTVCog/klquhbtKRD+UHUo=
+k8s.io/kubernetes v1.23.3/go.mod h1:C0AB/I7M4Nu6d1ELyGdC8qrrHEc6J5l8CHUashza1Io=
+k8s.io/legacy-cloud-providers v0.23.3/go.mod h1:s9vv59dUv4SU+HAm9C/YDdyw2OY9qmFYmcGEwr/ecDc=
+k8s.io/metrics v0.23.3/go.mod h1:Ut8TvkbsO4oMVeUzaTArvPrcw9QRFLs2XNzUlORjdYE=
+k8s.io/mount-utils v0.23.3/go.mod h1:OTN3LQPiOGMfx/SmVlsnySwsAmh4gYrDYLchlMHtf98=
+k8s.io/pod-security-admission v0.23.3/go.mod h1:vULEGUgsujyrKBz3RRRZnvrJJt115gu0GICArDmgzqo=
+k8s.io/sample-apiserver v0.23.3/go.mod h1:5yDZRMfFvp7/2BOXBwk0AFNsD00iyuXeEsWZSoLFeGw=
+k8s.io/system-validators v1.6.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
-mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY=
+k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
+k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+kubevirt.io/qe-tools v0.1.6 h1:S6z9CATmgV2/z9CWetij++Rhu7l/Z4ObZqerLdNMo0Y=
+kubevirt.io/qe-tools v0.1.6/go.mod h1:PJyH/YXC4W0AmxfheDmXWMbLNsMSboVGXKpMAwfKzVE=
+modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
+modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
+modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
+modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
+modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I=
-sigs.k8s.io/controller-runtime v0.11.0 h1:DqO+c8mywcZLFJWILq4iktoECTyn30Bkj0CwgqMpZWQ=
-sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4=
+sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU=
+sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
sigs.k8s.io/controller-tools v0.7.0 h1:iZIz1vEcavyEfxjcTLs1WH/MPf4vhPCtTKhoHqV8/G0=
sigs.k8s.io/controller-tools v0.7.0/go.mod h1:bpBAo0VcSDDLuWt47evLhMLPxRPxMDInTEH/YbdeMK0=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
-sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw=
-sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw=
sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8=
sigs.k8s.io/kustomize/cmd/config v0.10.2/go.mod h1:K2aW7nXJ0AaT+VA/eO0/dzFLxmpFcTzudmAgDwPY1HQ=
sigs.k8s.io/kustomize/kustomize/v4 v4.4.1/go.mod h1:qOKJMMz2mBP+vcS7vK+mNz4HBLjaQSWRY22EF6Tb7Io=
sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
-sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
-vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt
new file mode 100644
index 000000000..767efde98
--- /dev/null
+++ b/hack/boilerplate.go.txt
@@ -0,0 +1,15 @@
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
\ No newline at end of file
diff --git a/hack/build-latency-test-bin.sh b/hack/build-latency-test-bin.sh
new file mode 100755
index 000000000..25da6f2e8
--- /dev/null
+++ b/hack/build-latency-test-bin.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -eu
+
+if ! which go &>/dev/null; then
+ echo "No go command available"
+ exit 1
+fi
+
+go test -v -c -o build/_output/bin/latency-e2e.test ./functests/4_latency
diff --git a/hack/build-test-bin.sh b/hack/build-test-bin.sh
new file mode 100755
index 000000000..4b9eb8023
--- /dev/null
+++ b/hack/build-test-bin.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+set -e
+
+if ! which go; then
+ echo "No go command available"
+ exit 1
+fi
+
+GOPATH="${GOPATH:-~/go}"
+export PATH=$PATH:$GOPATH/bin
+
+if ! which gingko; then
+ echo "Downloading ginkgo tool"
+ go install github.com/onsi/ginkgo/ginkgo
+fi
+
+ginkgo build ./functests/*
diff --git a/hack/clean-deploy.sh b/hack/clean-deploy.sh
new file mode 100755
index 000000000..d6b7da84a
--- /dev/null
+++ b/hack/clean-deploy.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# expect oc to be in PATH by default
+OC_TOOL="${OC_TOOL:-oc}"
+
+profiles=$(${OC_TOOL} get performanceprofile -o name)
+for profileName in $profiles
+do
+ nodeSelector="$(${OC_TOOL} get $profileName -o=jsonpath='{.spec.nodeSelector}' | awk -F'[/"]' '{print $3}')"
+
+ if [[ $nodeSelector != "worker" ]]; then
+ mcps+=($(${OC_TOOL} get mcp -l machineconfiguration.openshift.io/role=$nodeSelector -o name | awk -F "/" '{print $2}'))
+ nodes=$(${OC_TOOL} get nodes --selector="node-role.kubernetes.io/${nodeSelector}" -o name)
+ for node in $nodes
+ do
+ echo "[INFO]: Unlabeling node $node"
+ ${OC_TOOL} label $node node-role.kubernetes.io/${nodeSelector}-
+ done
+ fi
+done
+
+# Give MCO some time to notice change
+sleep 10
+
+# Wait for worker MCP being updated
+success=0
+iterations=0
+sleep_time=10
+max_iterations=180 # results in 30 minute timeout
+until [[ $success -eq 1 ]] || [[ $iterations -eq $max_iterations ]]
+do
+ echo "[INFO] Checking if MCP is updated"
+ if ! ${OC_TOOL} wait mcp/worker --for condition=Updated --timeout 1s
+ then
+ iterations=$((iterations + 1))
+ iterations_left=$((max_iterations - iterations))
+ echo "[INFO] MCP not updated yet. $iterations_left retries left."
+ sleep $sleep_time
+ continue
+ fi
+
+ success=1
+
+done
+
+if [[ $success -eq 0 ]]; then
+ echo "[ERROR] MCP update failed, going on nonetheless."
+fi
+
+# Delete CRs: this will undeploy all the MCs etc. (once it is implemented)
+echo "[INFO] Deleting PerformanceProfile and giving the operator some time to undeploy everything"
+$OC_TOOL delete performanceprofile --all
+sleep 30
+
+# Delete worker-cnf MCP
+for mcp in "${mcps[@]}"
+do
+ echo "[INFO] Deleting MCP $mcp"
+ $OC_TOOL delete mcp $mcp
+done
+
diff --git a/hack/deploy.sh b/hack/deploy.sh
new file mode 100755
index 000000000..052da0465
--- /dev/null
+++ b/hack/deploy.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+set -e
+
+# expect oc to be in PATH by default
+OC_TOOL="${OC_TOOL:-oc}"
+
+# Deploy features
+success=0
+iterations=0
+sleep_time=10
+max_iterations=30 # results in 5 minute timeout
+feature_dir=test/e2e/pao/cluster-setup/${CLUSTER}-cluster/performance/
+
+until [[ $success -eq 1 ]] || [[ $iterations -eq $max_iterations ]]
+do
+
+ echo "[INFO] Deploying performance profile."
+ set +e
+
+ # be verbose on last iteration only
+ if [[ $iterations -eq $((max_iterations - 1)) ]] || [[ -n "${VERBOSE}" ]]; then
+ ${OC_TOOL} kustomize $feature_dir | envsubst | ${OC_TOOL} apply -f -
+ else
+ ${OC_TOOL} kustomize $feature_dir | envsubst | ${OC_TOOL} apply -f - &> /dev/null
+ fi
+
+ # shellcheck disable=SC2181
+ if [[ $? != 0 ]];then
+
+ iterations=$((iterations + 1))
+ iterations_left=$((max_iterations - iterations))
+ if [[ $iterations_left != 0 ]]; then
+ echo "[WARN] Deployment did not fully succeed yet, retrying in $sleep_time sec, $iterations_left retries left"
+ sleep $sleep_time
+ else
+ echo "[WARN] At least one deployment failed, giving up"
+ fi
+
+ else
+ # All features deployed successfully
+ success=1
+ fi
+ set -e
+
+done
+
+if [[ $success -eq 0 ]]; then
+ echo "[ERROR] Deployment failed, giving up."
+ exit 1
+fi
+
+echo "[INFO] Deployment successful."
diff --git a/hack/docs-generate.sh b/hack/docs-generate.sh
new file mode 100755
index 000000000..2ee3143ce
--- /dev/null
+++ b/hack/docs-generate.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -e
+
+export GOROOT=$(go env GOROOT)
+
+export PERF_PROFILE_TYPES=api/v2/performanceprofile_types.go
+export PERF_PROFILE_DOC=docs/performance_profile.md
+
+# using the generated CSV, create the real CSV by injecting all the right data into it
+build/_output/bin/docs-generator -- $PERF_PROFILE_TYPES > $PERF_PROFILE_DOC
+
+echo "API docs updated"
diff --git a/hack/label-worker-cnf.sh b/hack/label-worker-cnf.sh
new file mode 100755
index 000000000..4ea755c11
--- /dev/null
+++ b/hack/label-worker-cnf.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -e
+
+# expect oc to be in PATH by default
+OC_TOOL="${OC_TOOL:-oc}"
+
+# Label 1 worker node
+echo "[INFO]: Labeling 1 worker node with worker-cnf"
+node=$(${OC_TOOL} get nodes --selector='node-role.kubernetes.io/worker' \
+ --selector='!node-role.kubernetes.io/master' -o name | head -1)
+
+${OC_TOOL} label --overwrite $node node-role.kubernetes.io/worker-cnf=""
diff --git a/hack/lint.sh b/hack/lint.sh
new file mode 100755
index 000000000..d76dbe34a
--- /dev/null
+++ b/hack/lint.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+which golint
+if [ $? -ne 0 ]; then
+ echo "Downloading golint tool"
+ go get -u golang.org/x/lint/golint
+fi
+
+RETVAL=0
+GENERATED_FILES="zz_generated.*.go"
+for file in $(find . -path ./vendor -prune -o -type f -name '*.go' -print | grep -E -v "$GENERATED_FILES" | grep -E -v "functests"); do
+ golint -set_exit_status "$file"
+ if [[ $? -ne 0 ]]; then
+ RETVAL=1
+ fi
+done
+exit $RETVAL
diff --git a/hack/release-note.sh b/hack/release-note.sh
new file mode 100755
index 000000000..c59c69a17
--- /dev/null
+++ b/hack/release-note.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# This is generating a release note, which will be used for github releases
+
+RELEASE_NOTE_FILE="build/_output/release-note.md"
+
+# Current tag
+RELREF=${RELREF:-$(git describe --abbrev=0 --tags)}
+
+# Previous tag
+PREREF=${PREREF:-$(git describe --abbrev=0 --tags $RELREF^)}
+
+RELSPANREF=$PREREF..$RELREF
+
+GHRELURL="https://github.com/openshift/cluster-node-tuning-operator/releases/tag/"
+RELURL="$GHRELURL$RELREF"
+
+CHANGES_COUNT=$(git log --oneline $RELSPANREF | wc -l)
+CHANGES_BY_COUNT=$(git shortlog -sne $RELSPANREF | wc -l)
+STATS=$(git diff --shortstat $RELSPANREF)
+
+cat < "${RELEASE_NOTE_FILE}"
+## Performance Addon Operator
+
+This is release "${RELREF}" of the performance addon operator, which follows "${PREREF}".
+This release consists of ${CHANGES_COUNT} changes by ${CHANGES_BY_COUNT} contributers:
+${STATS}
+
+The primary release artifact of the performance addon operator is the git tree.
+The source code and selected build artifacts are available for download at:
+${RELURL}
+
+Pre-built containers are published on quay.io and can be viewed at:
+https://quay.io/organization/openshift-kni
+
+### Notable changes
+
+*TODO*
+
+EOF
\ No newline at end of file
diff --git a/hack/run-functests.sh b/hack/run-functests.sh
new file mode 100755
index 000000000..dd6c36766
--- /dev/null
+++ b/hack/run-functests.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+GINKGO_SUITS=${GINKGO_SUITS:-"test/e2e/pao/functests"}
+LATENCY_TEST_RUN=${LATENCY_TEST_RUN:-"false"}
+
+which ginkgo
+if [ $? -ne 0 ]; then
+ echo "Downloading ginkgo tool"
+ # drop -mod=vendor flags, otherwise the installation will fail
+ # because of the package can not be installed under the vendor directory
+ GOFLAGS='' go install github.com/onsi/ginkgo/ginkgo@v1.16.5
+fi
+
+NO_COLOR=""
+if ! which tput &> /dev/null 2>&1 || [[ $(tput -T$TERM colors) -lt 8 ]]; then
+ echo "Terminal does not seem to support colored output, disabling it"
+ NO_COLOR="-noColor"
+fi
+
+# run the latency tests under the OpenShift CI, just to verify that the image works
+if [ -n "${IMAGE_FORMAT}" ]; then
+ LATENCY_TEST_RUN="true"
+fi
+
+
+echo "Running Functional Tests: ${GINKGO_SUITS}"
+# -v: print out the text and location for each spec before running it and flush output to stdout in realtime
+# -r: run suites recursively
+# --failFast: ginkgo will stop the suite right after the first spec failure
+# --flakeAttempts: rerun the test if it fails
+# -requireSuite: fail if tests are not executed because of missing suite
+GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r --failFast --flakeAttempts=2 -requireSuite ${GINKGO_SUITS} -- -junitDir /tmp/artifacts
diff --git a/hack/run-latency-testing.sh b/hack/run-latency-testing.sh
new file mode 100755
index 000000000..4cc95403f
--- /dev/null
+++ b/hack/run-latency-testing.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+GINKGO_SUITS=${GINKGO_SUITS:-functests/5_latency_testing}
+
+which ginkgo
+if [ $? -ne 0 ]; then
+ echo "Downloading ginkgo tool"
+ go install github.com/onsi/ginkgo/ginkgo
+fi
+
+NO_COLOR=""
+if ! which tput &> /dev/null 2>&1 || [[ $(tput -T$TERM colors) -lt 8 ]]; then
+ echo "Terminal does not seem to support colored output, disabling it"
+ NO_COLOR="-noColor"
+fi
+
+
+echo "Running Functional Tests: ${GINKGO_SUITS}"
+# -v: print out the text and location for each spec before running it and flush output to stdout in realtime
+# -r: run suites recursively
+# --failFast: ginkgo will stop the suite right after the first spec failure
+# --flakeAttempts: rerun the test if it fails
+# -requireSuite: fail if tests are not executed because of missing suite
+GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r -requireSuite ${GINKGO_SUITS} -- -junitDir /tmp/artifacts
+
diff --git a/hack/run-perf-profile-creator-functests.sh b/hack/run-perf-profile-creator-functests.sh
new file mode 100755
index 000000000..cc4a67130
--- /dev/null
+++ b/hack/run-perf-profile-creator-functests.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+GINKGO_SUITS=${GINKGO_SUITS:-"test/e2e/pao/functests-performance-profile-creator"}
+
+which ginkgo
+if [ $? -ne 0 ]; then
+ echo "Downloading ginkgo tool"
+ # drop -mod=vendor flags, otherwise the installation will fail
+ # because of the package can not be installed under the vendor directory
+ GOFLAGS='' go install github.com/onsi/ginkgo/ginkgo@v1.16.5
+fi
+
+NO_COLOR=""
+if ! which tput &> /dev/null 2>&1 || [[ $(tput -T$TERM colors) -lt 8 ]]; then
+ echo "Terminal does not seem to support colored output, disabling it"
+ NO_COLOR="-noColor"
+fi
+
+
+echo "Running Functional Tests: ${GINKGO_SUITS}"
+# -v: print out the text and location for each spec before running it and flush output to stdout in realtime
+# -r: run suites recursively
+# --failFast: ginkgo will stop the suite right after the first spec failure
+# --flakeAttempts: rerun the test if it fails
+# -requireSuite: fail if tests are not executed because of missing suite
+GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r --failFast --flakeAttempts=2 -requireSuite ${GINKGO_SUITS} -- -junitDir /tmp/artifacts
diff --git a/hack/run-perf-profile-creator.sh b/hack/run-perf-profile-creator.sh
new file mode 100755
index 000000000..b50e13f37
--- /dev/null
+++ b/hack/run-perf-profile-creator.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+readonly CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-podman}
+readonly CURRENT_SCRIPT=$(basename "$0")
+readonly CMD="${CONTAINER_RUNTIME} run --entrypoint performance-profile-creator"
+readonly IMG_EXISTS_CMD="${CONTAINER_RUNTIME} image exists"
+readonly IMG_PULL_CMD="${CONTAINER_RUNTIME} image pull"
+readonly MUST_GATHER_VOL="/must-gather"
+
+PAO_IMG="quay.io/openshift-kni/performance-addon-operator:4.11-snapshot"
+MG_TARBALL=""
+DATA_DIR=""
+
+usage() {
+ print "Wrapper usage:"
+ print " ${CURRENT_SCRIPT} [-h] [-p image][-t path] -- [performance-profile-creator flags]"
+ print ""
+ print "Options:"
+ print " -h help for ${CURRENT_SCRIPT}"
+ print " -p Performance Addon Operator image"
+ print " -t path to a must-gather tarball"
+
+ ${IMG_EXISTS_CMD} "${PAO_IMG}" && ${CMD} "${PAO_IMG}" -h
+}
+
+function cleanup {
+ [ -d "${DATA_DIR}" ] && rm -rf "${DATA_DIR}"
+}
+trap cleanup EXIT
+
+exit_error() {
+ print "error: $*"
+ usage
+ exit 1
+}
+
+print() {
+ echo "$*" >&2
+}
+
+check_requirements() {
+ ${IMG_EXISTS_CMD} "${PAO_IMG}" || ${IMG_PULL_CMD} "${PAO_IMG}" || \
+ exit_error "Performance Addon Operator image not found"
+
+ [ -n "${MG_TARBALL}" ] || exit_error "Must-gather tarball file path is mandatory"
+ [ -f "${MG_TARBALL}" ] || exit_error "Must-gather tarball file not found"
+
+ DATA_DIR=$(mktemp -d -t "${CURRENT_SCRIPT}XXXX") || exit_error "Cannot create the data directory"
+ tar -zxf "${MG_TARBALL}" --directory "${DATA_DIR}" || exit_error "Cannot decompress the must-gather tarball"
+ chmod a+rx "${DATA_DIR}"
+
+ return 0
+}
+
+main() {
+ while getopts ':hp:t:' OPT; do
+ case "${OPT}" in
+ h)
+ usage
+ exit 0
+ ;;
+ p)
+ PAO_IMG="${OPTARG}"
+ ;;
+ t)
+ MG_TARBALL="${OPTARG}"
+ ;;
+ ?)
+ exit_error "invalid argument: ${OPTARG}"
+ ;;
+ esac
+ done
+ shift $((OPTIND - 1))
+
+ check_requirements || exit 1
+
+ ${CMD} -v "${DATA_DIR}:${MUST_GATHER_VOL}:z" "${PAO_IMG}" "$@" --must-gather-dir-path "${MUST_GATHER_VOL}"
+ echo "" 1>&2
+}
+
+main "$@"
diff --git a/hack/run-render-command-functests.sh b/hack/run-render-command-functests.sh
new file mode 100755
index 000000000..ca1aa93cf
--- /dev/null
+++ b/hack/run-render-command-functests.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+GINKGO_SUITS=${GINKGO_SUITS:-"test/e2e/pao/functests-render-command"}
+
+which ginkgo
+if [ $? -ne 0 ]; then
+ echo "Downloading ginkgo tool"
+ # drop -mod=vendor flags, otherwise the installation will fail
+ # because of the package can not be installed under the vendor directory
+ GOFLAGS='' go install github.com/onsi/ginkgo/ginkgo@v1.16.5
+fi
+
+NO_COLOR=""
+if ! which tput &> /dev/null 2>&1 || [[ $(tput -T$TERM colors) -lt 8 ]]; then
+ echo "Terminal does not seem to support colored output, disabling it"
+ NO_COLOR="-noColor"
+fi
+
+
+echo "Running Functional Tests: ${GINKGO_SUITS}"
+# -v: print out the text and location for each spec before running it and flush output to stdout in realtime
+# -r: run suites recursively
+# --failFast: ginkgo will stop the suite right after the first spec failure
+# --flakeAttempts: rerun the test if it fails
+# -requireSuite: fail if tests are not executed because of missing suite
+GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r --failFast --flakeAttempts=2 -requireSuite ${GINKGO_SUITS} -- -junitDir /tmp/artifacts
diff --git a/hack/show-cluster-version.sh b/hack/show-cluster-version.sh
new file mode 100755
index 000000000..c5718b7e3
--- /dev/null
+++ b/hack/show-cluster-version.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+# expect oc to be in PATH by default
+OC_TOOL="${OC_TOOL:-oc}"
+
+echo "Cluster version"
+${OC_TOOL} version || :
+${OC_TOOL} get nodes -o custom-columns=VERSION:.status.nodeInfo.kubeletVersion || :
+${OC_TOOL} get clusterversion || :
diff --git a/hack/unittests.sh b/hack/unittests.sh
new file mode 100755
index 000000000..84442c296
--- /dev/null
+++ b/hack/unittests.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+set -e
+
+OUTDIR="build/_output/coverage"
+mkdir -p "$OUTDIR"
+
+COVER_FILE="${OUTDIR}/cover.out"
+FUNC_FILE="${OUTDIR}/coverage.txt"
+HTML_FILE="${OUTDIR}/coverage.html"
+
+echo "running unittests with coverage"
+GOFLAGS=-mod=vendor go test -race -covermode=atomic -coverprofile="${COVER_FILE}" -v ./pkg/... ./controllers/... ./api/...
+
+if [[ -n "${DRONE}" ]]; then
+
+ # Uploading coverage report to coveralls.io
+ go get github.com/mattn/goveralls
+
+ # we should update the vendor/modules.txt once we got a new package
+ go mod vendor
+ $(go env GOPATH)/bin/goveralls -coverprofile="$COVER_FILE" -service=drone.io
+
+else
+
+ echo "creating coverage reports"
+ go tool cover -func="${COVER_FILE}" > "${FUNC_FILE}"
+ go tool cover -html="${COVER_FILE}" -o "${HTML_FILE}"
+ echo "find coverage reports at ${OUTDIR}"
+
+fi
diff --git a/hack/verify-generated.sh b/hack/verify-generated.sh
new file mode 100755
index 000000000..434e4e192
--- /dev/null
+++ b/hack/verify-generated.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+if [[ -n "$(git status --porcelain .)" ]]; then
+ echo "uncommitted generated files. run 'make generate' and commit results."
+ echo "$(git status --porcelain .)"
+ exit 1
+fi
diff --git a/hack/wait-for-mcp.sh b/hack/wait-for-mcp.sh
new file mode 100755
index 000000000..2013ef274
--- /dev/null
+++ b/hack/wait-for-mcp.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+set -e
+
+# expect oc to be in PATH by default
+OC_TOOL="${OC_TOOL:-oc}"
+
+success=0
+iterations=0
+sleep_time=10
+max_iterations=180 # results in 30 minute timeout
+
+# Let's gibe the operator some time to do its work before we unpause the MCP (see below)
+echo "[INFO] Waiting a bit for letting the operator do its work"
+sleep 30
+
+until [[ $success -eq 1 ]] || [[ $iterations -eq $max_iterations ]]
+do
+
+ echo "[INFO] Unpausing MCPs"
+ set +e
+ mcps=$(${OC_TOOL} get mcp --no-headers -o custom-columns=":metadata.name")
+ for mcp in $mcps
+ do
+ ${OC_TOOL} patch mcp "${mcp}" -p '{"spec":{"paused":false}}' --type=merge &> /dev/null
+ done
+ set -e
+
+ echo "[INFO] Checking if MCP picked up the performance MC"
+ # MC with new generated name
+ mc_new="$(${OC_TOOL} get mcp worker-cnf -o jsonpath='{.spec.configuration.source[?(@.name=="50-performance-'$CLUSTER'")].name}')"
+ # MC with old generated name
+ mc_old="$(${OC_TOOL} get mcp worker-cnf -o jsonpath='{.spec.configuration.source[?(@.name=="performance-'$CLUSTER'")].name}')"
+ # No output means that the new machine config wasn't picked by MCO yet
+ if [ -z "${mc_new}" ] && [ -z "${mc_old}" ]
+ then
+ iterations=$((iterations + 1))
+ iterations_left=$((max_iterations - iterations))
+ echo "[INFO] Performance MC not picked up yet. $iterations_left retries left."
+ sleep $sleep_time
+ continue
+ fi
+
+ echo "[INFO] Checking if MCP is updated"
+ if ! ${OC_TOOL} wait mcp/worker-cnf --for condition=Updated --timeout 1s &> /dev/null
+ then
+ iterations=$((iterations + 1))
+ iterations_left=$((max_iterations - iterations))
+ if [[ $iterations_left != 0 ]]; then
+ echo "[WARN] MCP not updated yet, retrying in $sleep_time sec, $iterations_left retries left"
+ sleep $sleep_time
+ fi
+ else
+ success=1
+ fi
+
+
+done
+
+if [[ $success -eq 0 ]]; then
+ echo "[ERROR] MCP update failed, giving up."
+ exit 1
+fi
+
+echo "[INFO] MCP update successful."
diff --git a/manifests/20-performance-profile.crd.yaml b/manifests/20-performance-profile.crd.yaml
new file mode 100644
index 000000000..70afc2677
--- /dev/null
+++ b/manifests/20-performance-profile.crd.yaml
@@ -0,0 +1,490 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ include.release.openshift.io/ibm-cloud-managed: "true"
+ include.release.openshift.io/self-managed-high-availability: "true"
+ include.release.openshift.io/single-node-developer: "true"
+ service.beta.openshift.io/inject-cabundle: "true"
+ name: performanceprofiles.performance.openshift.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhook:
+ clientConfig:
+ service:
+ name: performance-addon-operator-service
+ namespace: openshift-cluster-node-tuning-operator
+ path: /convert
+ port: 443
+ conversionReviewVersions:
+ - v1
+ - v1alpha1
+ group: performance.openshift.io
+ names:
+ kind: PerformanceProfile
+ listKind: PerformanceProfileList
+ plural: performanceprofiles
+ singular: performanceprofile
+ scope: Cluster
+ versions:
+ - deprecated: true
+ deprecationWarning: v1 is deprecated and should be removed in next three releases, use v2 instead
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: PerformanceProfile is the Schema for the performanceprofiles API
+ type: object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PerformanceProfileSpec defines the desired state of PerformanceProfile.
+ type: object
+ required:
+ - cpu
+ - nodeSelector
+ properties:
+ additionalKernelArgs:
+ description: Addional kernel arguments.
+ type: array
+ items:
+ type: string
+ cpu:
+ description: CPU defines a set of CPU related parameters.
+ type: object
+ required:
+ - isolated
+ properties:
+ balanceIsolated:
+ description: BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads. When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to explicitly assign each thread to a specific cpu in order to work across multiple CPUs. Setting this to "true" allows workloads to be balanced across CPUs. Setting this to "false" offers the most predictable performance for guaranteed workloads, but it offloads the complexity of cpu load balancing to the application. Defaults to "true"
+ type: boolean
+ isolated:
+ description: 'Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible, which means removing as many extraneous tasks off a CPU as possible. It is important to notice the CPU manager can choose any CPU to run the workload except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU: 1. The union of reserved CPUs and isolated CPUs should include all online CPUs 2. The isolated CPUs field should be the complementary to reserved CPUs field'
+ type: string
+ reserved:
+ description: Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet.
+ type: string
+ globallyDisableIrqLoadBalancing:
+ description: GloballyDisableIrqLoadBalancing toggles whether IRQ load balancing will be disabled for the Isolated CPU set. When the option is set to "true" it disables IRQs load balancing for the Isolated CPU set. Setting the option to "false" allows the IRQs to be balanced across all CPUs, however the IRQs load balancing can be disabled per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io annotations. Defaults to "false"
+ type: boolean
+ hugepages:
+ description: HugePages defines a set of huge pages related parameters. It is possible to set huge pages with multiple size values at the same time. For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator. It is important to notice that setting hugepages default size to 1G will remove all 2M related folders from the node and it will be impossible to configure 2M hugepages under the node.
+ type: object
+ properties:
+ defaultHugepagesSize:
+ description: DefaultHugePagesSize defines huge pages default size under kernel boot parameters.
+ type: string
+ pages:
+ description: Pages defines huge pages that we want to allocate at boot time.
+ type: array
+ items:
+ description: HugePage defines the number of allocated huge pages of the specific size.
+ type: object
+ properties:
+ count:
+ description: Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter.
+ type: integer
+ format: int32
+ node:
+ description: Node defines the NUMA node where hugepages will be allocated, if not specified, pages will be allocated equally between NUMA nodes
+ type: integer
+ format: int32
+ size:
+ description: Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter.
+ type: string
+ machineConfigLabel:
+ description: MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile. Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ additionalProperties:
+ type: string
+ machineConfigPoolSelector:
+ description: MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector of resources like KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ additionalProperties:
+ type: string
+ net:
+ description: Net defines a set of network related features
+ type: object
+ properties:
+ devices:
+ description: Devices contains a list of network device representations that will be set with a netqueue count equal to CPU.Reserved . If no devices are specified then the default is all devices.
+ type: array
+ items:
+ description: 'Device defines a way to represent a network device in several options: device name, vendor ID, model ID, PCI path and MAC address'
+ type: object
+ properties:
+ deviceID:
+ description: Network device ID (model) represnted as a 16 bit hexmadecimal number.
+ type: string
+ interfaceName:
+ description: Network device name to be matched. It uses a syntax of shell-style wildcards which are either positive or negative.
+ type: string
+ vendorID:
+ description: Network device vendor ID represnted as a 16 bit Hexmadecimal number.
+ type: string
+ userLevelNetworking:
+ description: UserLevelNetworking when enabled - sets either all or specified network devices queue size to the amount of reserved CPUs. Defaults to "false".
+ type: boolean
+ nodeSelector:
+ description: 'NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator. It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool which targets this performance profile. In the case when machineConfigLabels or machineConfigPoolSelector are not set, we are expecting a certain NodeSelector format /: "" in order to be able to calculate the default values for the former mentioned fields.'
+ type: object
+ additionalProperties:
+ type: string
+ numa:
+ description: NUMA defines options related to topology aware affinities
+ type: object
+ properties:
+ topologyPolicy:
+ description: Name of the policy applied when TopologyManager is enabled Operator defaults to "best-effort"
+ type: string
+ realTimeKernel:
+ description: RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set.
+ type: object
+ properties:
+ enabled:
+ description: Enabled defines if the real time kernel packages should be installed. Defaults to "false"
+ type: boolean
+ status:
+ description: PerformanceProfileStatus defines the observed state of PerformanceProfile.
+ type: object
+ properties:
+ conditions:
+ description: Conditions represents the latest available observations of current state.
+ type: array
+ items:
+ description: Condition represents the state of the operator's reconciliation functionality.
+ type: object
+ required:
+ - status
+ - type
+ properties:
+ lastHeartbeatTime:
+ type: string
+ format: date-time
+ lastTransitionTime:
+ type: string
+ format: date-time
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType is the state of the operator's reconciliation functionality.
+ type: string
+ runtimeClass:
+ description: RuntimeClass contains the name of the RuntimeClass resource created by the operator.
+ type: string
+ tuned:
+ description: Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator.
+ type: string
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - deprecated: true
+ deprecationWarning: v1alpha1 is deprecated and should be removed in the next release, use v2 instead
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: PerformanceProfile is the Schema for the performanceprofiles API
+ type: object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PerformanceProfileSpec defines the desired state of PerformanceProfile.
+ type: object
+ properties:
+ additionalKernelArgs:
+ description: Addional kernel arguments.
+ type: array
+ items:
+ type: string
+ cpu:
+ description: CPU defines a set of CPU related parameters.
+ type: object
+ properties:
+ balanceIsolated:
+ description: BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads. When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to explicitly assign each thread to a specific cpu in order to work across multiple CPUs. Setting this to "true" allows workloads to be balanced across CPUs. Setting this to "false" offers the most predictable performance for guaranteed workloads, but it offloads the complexity of cpu load balancing to the application. Defaults to "true"
+ type: boolean
+ isolated:
+ description: 'Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible, which means removing as many extraneous tasks off a CPU as possible. It is important to notice the CPU manager can choose any CPU to run the workload except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU: 1. The union of reserved CPUs and isolated CPUs should include all online CPUs 2. The isolated CPUs field should be the complementary to reserved CPUs field'
+ type: string
+ reserved:
+ description: Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet.
+ type: string
+ hugepages:
+ description: HugePages defines a set of huge pages related parameters. It is possible to set huge pages with multiple size values at the same time. For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator. It is important to notice that setting hugepages default size to 1G will remove all 2M related folders from the node and it will be impossible to configure 2M hugepages under the node.
+ type: object
+ properties:
+ defaultHugepagesSize:
+ description: DefaultHugePagesSize defines huge pages default size under kernel boot parameters.
+ type: string
+ pages:
+ description: Pages defines huge pages that we want to allocate at boot time.
+ type: array
+ items:
+ description: HugePage defines the number of allocated huge pages of the specific size.
+ type: object
+ properties:
+ count:
+ description: Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter.
+ type: integer
+ format: int32
+ node:
+ description: Node defines the NUMA node where hugepages will be allocated, if not specified, pages will be allocated equally between NUMA nodes
+ type: integer
+ format: int32
+ size:
+ description: Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter.
+ type: string
+ machineConfigLabel:
+ description: MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile. Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ additionalProperties:
+ type: string
+ machineConfigPoolSelector:
+ description: MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector of resources like KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ additionalProperties:
+ type: string
+ nodeSelector:
+ description: NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator. It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool which targets this performance profile.
+ type: object
+ additionalProperties:
+ type: string
+ numa:
+ description: NUMA defines options related to topology aware affinities
+ type: object
+ properties:
+ topologyPolicy:
+ description: Name of the policy applied when TopologyManager is enabled Operator defaults to "best-effort"
+ type: string
+ realTimeKernel:
+ description: RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set.
+ type: object
+ properties:
+ enabled:
+ description: Enabled defines if the real time kernel packages should be installed. Defaults to "false"
+ type: boolean
+ status:
+ description: PerformanceProfileStatus defines the observed state of PerformanceProfile.
+ type: object
+ properties:
+ conditions:
+ description: Conditions represents the latest available observations of current state.
+ type: array
+ items:
+ description: Condition represents the state of the operator's reconciliation functionality.
+ type: object
+ required:
+ - status
+ - type
+ properties:
+ lastHeartbeatTime:
+ type: string
+ format: date-time
+ lastTransitionTime:
+ type: string
+ format: date-time
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType is the state of the operator's reconciliation functionality.
+ type: string
+ runtimeClass:
+ description: RuntimeClass contains the name of the RuntimeClass resource created by the operator.
+ type: string
+ tuned:
+ description: Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator.
+ type: string
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - name: v2
+ schema:
+ openAPIV3Schema:
+ description: PerformanceProfile is the Schema for the performanceprofiles API
+ type: object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: PerformanceProfileSpec defines the desired state of PerformanceProfile.
+ type: object
+ required:
+ - cpu
+ - nodeSelector
+ properties:
+ additionalKernelArgs:
+ description: Addional kernel arguments.
+ type: array
+ items:
+ type: string
+ cpu:
+ description: CPU defines a set of CPU related parameters.
+ type: object
+ required:
+ - isolated
+ - reserved
+ properties:
+ balanceIsolated:
+ description: BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads. When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to explicitly assign each thread to a specific cpu in order to work across multiple CPUs. Setting this to "true" allows workloads to be balanced across CPUs. Setting this to "false" offers the most predictable performance for guaranteed workloads, but it offloads the complexity of cpu load balancing to the application. Defaults to "true"
+ type: boolean
+ isolated:
+ description: 'Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible, which means removing as many extraneous tasks off a CPU as possible. It is important to notice the CPU manager can choose any CPU to run the workload except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU: 1. The union of reserved CPUs and isolated CPUs should include all online CPUs 2. The isolated CPUs field should be the complementary to reserved CPUs field'
+ type: string
+ reserved:
+ description: Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet.
+ type: string
+ globallyDisableIrqLoadBalancing:
+ description: GloballyDisableIrqLoadBalancing toggles whether IRQ load balancing will be disabled for the Isolated CPU set. When the option is set to "true" it disables IRQs load balancing for the Isolated CPU set. Setting the option to "false" allows the IRQs to be balanced across all CPUs, however the IRQs load balancing can be disabled per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io annotations. Defaults to "false"
+ type: boolean
+ hugepages:
+ description: HugePages defines a set of huge pages related parameters. It is possible to set huge pages with multiple size values at the same time. For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator. It is important to notice that setting hugepages default size to 1G will remove all 2M related folders from the node and it will be impossible to configure 2M hugepages under the node.
+ type: object
+ properties:
+ defaultHugepagesSize:
+ description: DefaultHugePagesSize defines huge pages default size under kernel boot parameters.
+ type: string
+ pages:
+ description: Pages defines huge pages that we want to allocate at boot time.
+ type: array
+ items:
+ description: HugePage defines the number of allocated huge pages of the specific size.
+ type: object
+ properties:
+ count:
+ description: Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter.
+ type: integer
+ format: int32
+ node:
+ description: Node defines the NUMA node where hugepages will be allocated, if not specified, pages will be allocated equally between NUMA nodes
+ type: integer
+ format: int32
+ size:
+ description: Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter.
+ type: string
+ machineConfigLabel:
+ description: MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile. Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ additionalProperties:
+ type: string
+ machineConfigPoolSelector:
+ description: MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector of resources like KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role="
+ type: object
+ additionalProperties:
+ type: string
+ net:
+ description: Net defines a set of network related features
+ type: object
+ properties:
+ devices:
+ description: Devices contains a list of network device representations that will be set with a netqueue count equal to CPU.Reserved . If no devices are specified then the default is all devices.
+ type: array
+ items:
+ description: 'Device defines a way to represent a network device in several options: device name, vendor ID, model ID, PCI path and MAC address'
+ type: object
+ properties:
+ deviceID:
+ description: Network device ID (model) represnted as a 16 bit hexmadecimal number.
+ type: string
+ interfaceName:
+ description: Network device name to be matched. It uses a syntax of shell-style wildcards which are either positive or negative.
+ type: string
+ vendorID:
+ description: Network device vendor ID represnted as a 16 bit Hexmadecimal number.
+ type: string
+ userLevelNetworking:
+ description: UserLevelNetworking when enabled - sets either all or specified network devices queue size to the amount of reserved CPUs. Defaults to "false".
+ type: boolean
+ nodeSelector:
+ description: 'NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator. It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool which targets this performance profile. In the case when machineConfigLabels or machineConfigPoolSelector are not set, we are expecting a certain NodeSelector format /: "" in order to be able to calculate the default values for the former mentioned fields.'
+ type: object
+ additionalProperties:
+ type: string
+ numa:
+ description: NUMA defines options related to topology aware affinities
+ type: object
+ properties:
+ topologyPolicy:
+ description: Name of the policy applied when TopologyManager is enabled Operator defaults to "best-effort"
+ type: string
+ realTimeKernel:
+ description: RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set.
+ type: object
+ properties:
+ enabled:
+ description: Enabled defines if the real time kernel packages should be installed. Defaults to "false"
+ type: boolean
+ status:
+ description: PerformanceProfileStatus defines the observed state of PerformanceProfile.
+ type: object
+ properties:
+ conditions:
+ description: Conditions represents the latest available observations of current state.
+ type: array
+ items:
+ description: Condition represents the state of the operator's reconciliation functionality.
+ type: object
+ required:
+ - status
+ - type
+ properties:
+ lastHeartbeatTime:
+ type: string
+ format: date-time
+ lastTransitionTime:
+ type: string
+ format: date-time
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType is the state of the operator's reconciliation functionality.
+ type: string
+ runtimeClass:
+ description: RuntimeClass contains the name of the RuntimeClass resource created by the operator.
+ type: string
+ tuned:
+ description: Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator.
+ type: string
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/manifests/40-rbac.yaml b/manifests/40-rbac.yaml
index 419a06a49..a31bee605 100644
--- a/manifests/40-rbac.yaml
+++ b/manifests/40-rbac.yaml
@@ -38,7 +38,7 @@ rules:
# "" indicates the core API group
- apiGroups: [""]
resources: ["configmaps","events"]
- verbs: ["create","get","delete","list","update","watch"]
+ verbs: ["create","get","delete","list","update","watch","patch"]
# The pod-matching functionality will likely be deprecated in the
# future. When it is, remove "pods" below.
- apiGroups: [""]
@@ -59,7 +59,7 @@ rules:
verbs: ["update"]
# Needed by the core operator functionality.
- apiGroups: ["machineconfiguration.openshift.io"]
- resources: ["machineconfigs"]
+ resources: ["kubeletconfigs", "machineconfigs"]
verbs: ["create","get","delete","list","update","watch"]
# Needed by the core operator functionality.
- apiGroups: ["machineconfiguration.openshift.io"]
@@ -69,6 +69,18 @@ rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create","get","update","patch"]
+# Needed by the performance-addon-controller.
+# The PAO creates runtime class for each profile that can be used under pods to
+# extend CRI-O functionality.
+- apiGroups: ["node.k8s.io"]
+ resources: ["runtimeclasses"]
+ verbs: ["create","get","delete","list","update","watch"]
+- apiGroups: ["performance.openshift.io"]
+ resources: ["*"]
+ verbs: ["*"]
+- apiGroups: ["operators.coreos.com"]
+ resources: ["clusterserviceversions","operatorgroups","subscriptions"]
+ verbs: ["get","delete","list","update","watch"]
---
# Bind the operator cluster role to its Service Account.
diff --git a/manifests/45-webhook-configuration.yaml b/manifests/45-webhook-configuration.yaml
new file mode 100644
index 000000000..498174c9d
--- /dev/null
+++ b/manifests/45-webhook-configuration.yaml
@@ -0,0 +1,59 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ include.release.openshift.io/self-managed-high-availability: "true"
+ include.release.openshift.io/single-node-developer: "true"
+ include.release.openshift.io/ibm-cloud-managed: "true"
+ service.beta.openshift.io/serving-cert-secret-name: performance-addon-operator-webhook-cert
+ labels:
+ name: performance-addon-operator-service
+ name: performance-addon-operator-service
+ namespace: openshift-cluster-node-tuning-operator
+spec:
+ ports:
+ - name: "443"
+ port: 443
+ protocol: TCP
+ targetPort: 4343
+ selector:
+ name: cluster-node-tuning-operator
+ type: ClusterIP
+
+---
+
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ annotations:
+ include.release.openshift.io/self-managed-high-availability: "true"
+ include.release.openshift.io/single-node-developer: "true"
+ include.release.openshift.io/ibm-cloud-managed: "true"
+ service.beta.openshift.io/inject-cabundle: "true"
+ name: performance-addon-operator
+webhooks:
+ - admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: performance-addon-operator-service
+ namespace: openshift-cluster-node-tuning-operator
+ path: /validate-performance-openshift-io-v2-performanceprofile
+ port: 443
+ failurePolicy: Fail
+ matchPolicy: Equivalent
+ name: vwb.performance.openshift.io
+ rules:
+ - apiGroups:
+ - performance.openshift.io
+ apiVersions:
+ - v2
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - performanceprofiles
+ scope: '*'
+ sideEffects: None
+ timeoutSeconds: 10
diff --git a/manifests/50-operator-ibm-cloud-managed.yaml b/manifests/50-operator-ibm-cloud-managed.yaml
index 7de8b1d40..35a2e940c 100644
--- a/manifests/50-operator-ibm-cloud-managed.yaml
+++ b/manifests/50-operator-ibm-cloud-managed.yaml
@@ -54,6 +54,8 @@ spec:
name: node-tuning-operator-tls
- mountPath: /var/run/configmaps/trusted-ca/
name: trusted-ca
+ - mountPath: /apiserver.local.config/certificates
+ name: apiservice-cert
priorityClassName: system-cluster-critical
securityContext:
runAsNonRoot: true
@@ -75,6 +77,15 @@ spec:
- name: node-tuning-operator-tls
secret:
secretName: node-tuning-operator-tls
+ - name: apiservice-cert
+ secret:
+ defaultMode: 420
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ - key: tls.key
+ path: apiserver.key
+ secretName: performance-addon-operator-webhook-cert
- configMap:
items:
- key: ca-bundle.crt
diff --git a/manifests/50-operator.yaml b/manifests/50-operator.yaml
index 45b572a5c..8348673d0 100644
--- a/manifests/50-operator.yaml
+++ b/manifests/50-operator.yaml
@@ -1,3 +1,4 @@
+---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -74,10 +75,21 @@ spec:
mountPath: /etc/secrets
- name: trusted-ca
mountPath: /var/run/configmaps/trusted-ca/
+ - name: apiservice-cert
+ mountPath: /apiserver.local.config/certificates
volumes:
- name: node-tuning-operator-tls
secret:
secretName: node-tuning-operator-tls
+ - name: apiservice-cert
+ secret:
+ defaultMode: 420
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ - key: tls.key
+ path: apiserver.key
+ secretName: performance-addon-operator-webhook-cert
- name: trusted-ca
configMap:
name: trusted-ca
diff --git a/pkg/apis/pao/performance_suite_test.go b/pkg/apis/pao/performance_suite_test.go
new file mode 100644
index 000000000..90276c5f1
--- /dev/null
+++ b/pkg/apis/pao/performance_suite_test.go
@@ -0,0 +1,13 @@
+package performance
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestPerformance(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Performance Suite")
+}
diff --git a/pkg/apis/pao/performance_test.go b/pkg/apis/pao/performance_test.go
new file mode 100644
index 000000000..495209e55
--- /dev/null
+++ b/pkg/apis/pao/performance_test.go
@@ -0,0 +1,99 @@
+package performance
+
+import (
+ "io/ioutil"
+ "strings"
+
+ "github.com/RHsyseng/operator-utils/pkg/validation"
+ "github.com/ghodss/yaml"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+)
+
+const (
+ crFilename = "../../../examples/pao/samples/performance_v2_performanceprofile.yaml"
+ crdFilename = "../../../examples/pao/crd/bases/performance.openshift.io_performanceprofiles.yaml"
+ lastHeartbeatPath = "/status/conditions/lastHeartbeatTime"
+ lastTransitionPath = "/status/conditions/lastTransitionTime"
+)
+
+var _ = Describe("PerformanceProfile CR(D) Schema", func() {
+ var schema validation.Schema
+
+ BeforeEach(func() {
+ var err error
+ schema, err = getSchema(crdFilename)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(schema).ToNot(BeNil())
+ })
+
+ It("should validate PerformanceProfile struct fields are represented recursively in the CRD", func() {
+ // add any CRD paths to omit from validation check [deeply nested properties, generated timestamps, etc.]
+ pathOmissions := []string{
+ lastHeartbeatPath,
+ lastTransitionPath,
+ }
+ missingEntries := getMissingEntries(schema, &performancev2.PerformanceProfile{}, pathOmissions...)
+ Expect(missingEntries).To(BeEmpty())
+ })
+
+ It("should validate CR contents & formatting against provided CRD schema", func() {
+ cr, err := getCR(crFilename)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(cr).ToNot(BeNil())
+
+ // schema.Validate wraps a number of custom validator triggers for slice/string formatting, schema layout, etc.
+ // reference operator-utils/validate/schema:NewSchemaValidator for inclusive list
+ err = schema.Validate(cr)
+ Expect(err).ToNot(HaveOccurred())
+ })
+})
+
+// getSchema reads in & returns CRD schema file as openAPIV3Schema{} for validation usage.
+// See references operator-utils/validation/schema & go-openapi/spec/schema
+func getSchema(crdPath string) (validation.Schema, error) {
+ bytes, err := ioutil.ReadFile(crdPath)
+ if err != nil {
+ return nil, err
+ }
+ schema, err := validation.NewVersioned(bytes, "v2")
+ if err != nil {
+ return nil, err
+ }
+ return schema, nil
+}
+
+// getCR unmarshals a *_cr.yaml file and returns the representing struct
+func getCR(crPath string) (map[string]interface{}, error) {
+ bytes, err := ioutil.ReadFile(crPath)
+ if err != nil {
+ return nil, err
+ }
+ var input map[string]interface{}
+ if err = yaml.Unmarshal(bytes, &input); err != nil {
+ return nil, err
+ }
+ return input, nil
+}
+
+// getMissingEntries recursively walks schemaInstance fields (PerformanceProfile), checking that each (and its fields
+// recursively) are represented in CRD (schema); returns list of missing fields with specified omissions filtered out
+func getMissingEntries(schema validation.Schema, schemaInstance interface{}, omissions ...string) []validation.SchemaEntry {
+ missingEntries := schema.GetMissingEntries(schemaInstance)
+ var filtered bool
+ var filteredMissing []validation.SchemaEntry
+ for _, missing := range missingEntries {
+ filtered = false
+ for _, omit := range omissions {
+ if strings.HasPrefix(missing.Path, omit) {
+ filtered = true
+ break
+ }
+ }
+ if !filtered {
+ filteredMissing = append(filteredMissing, missing)
+ }
+ }
+ return filteredMissing
+}
diff --git a/pkg/apis/pao/v1/groupversion_info.go b/pkg/apis/pao/v1/groupversion_info.go
new file mode 100644
index 000000000..0e54f45db
--- /dev/null
+++ b/pkg/apis/pao/v1/groupversion_info.go
@@ -0,0 +1,36 @@
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 contains API Schema definitions for the performance v1 API group
+// +kubebuilder:object:generate=true
+// +groupName=performance.openshift.io
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "performance.openshift.io", Version: "v1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/pkg/apis/pao/v1/performanceprofile_conversion.go b/pkg/apis/pao/v1/performanceprofile_conversion.go
new file mode 100644
index 000000000..776708923
--- /dev/null
+++ b/pkg/apis/pao/v1/performanceprofile_conversion.go
@@ -0,0 +1,4 @@
+package v1
+
+// Hub marks this type as a conversion hub.
+func (*PerformanceProfile) Hub() {}
diff --git a/pkg/apis/pao/v1/performanceprofile_types.go b/pkg/apis/pao/v1/performanceprofile_types.go
new file mode 100644
index 000000000..d12a92174
--- /dev/null
+++ b/pkg/apis/pao/v1/performanceprofile_types.go
@@ -0,0 +1,198 @@
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// PerformanceProfilePauseAnnotation allows an admin to suspend the operator's
+// reconcile loop in order to perform manual changes to performance profile owned
+// objects.
+const PerformanceProfilePauseAnnotation = "performance.openshift.io/pause-reconcile"
+
+// PerformanceProfileSpec defines the desired state of PerformanceProfile.
+type PerformanceProfileSpec struct {
+ // CPU defines a set of CPU related parameters.
+ CPU *CPU `json:"cpu"`
+ // HugePages defines a set of huge pages related parameters.
+ // It is possible to set huge pages with multiple size values at the same time.
+ // For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator.
+ // It is important to notice that setting hugepages default size to 1G will remove all 2M related
+ // folders from the node and it will be impossible to configure 2M hugepages under the node.
+ HugePages *HugePages `json:"hugepages,omitempty"`
+ // MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be
+ // used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile.
+ // Defaults to "machineconfiguration.openshift.io/role="
+ // +optional
+ MachineConfigLabel map[string]string `json:"machineConfigLabel,omitempty"`
+ // MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector
+ // of resources like KubeletConfigs created by the operator.
+ // Defaults to "machineconfiguration.openshift.io/role="
+ // +optional
+ MachineConfigPoolSelector map[string]string `json:"machineConfigPoolSelector,omitempty"`
+ // NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator.
+ // It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool
+ // which targets this performance profile.
+ // In the case when machineConfigLabels or machineConfigPoolSelector are not set, we are expecting a certain NodeSelector format
+ // /: "" in order to be able to calculate the default values for the former mentioned fields.
+ NodeSelector map[string]string `json:"nodeSelector"`
+ // RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set.
+ RealTimeKernel *RealTimeKernel `json:"realTimeKernel,omitempty"`
+ // Addional kernel arguments.
+ // +optional
+ AdditionalKernelArgs []string `json:"additionalKernelArgs,omitempty"`
+ // NUMA defines options related to topology aware affinities
+ // +optional
+ NUMA *NUMA `json:"numa,omitempty"`
+ // Net defines a set of network related features
+ // +optional
+ Net *Net `json:"net,omitempty"`
+ // GloballyDisableIrqLoadBalancing toggles whether IRQ load balancing will be disabled for the Isolated CPU set.
+ // When the option is set to "true" it disables IRQs load balancing for the Isolated CPU set.
+ // Setting the option to "false" allows the IRQs to be balanced across all CPUs, however the IRQs load balancing
+ // can be disabled per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io annotations.
+ // Defaults to "false"
+ // +optional
+ GloballyDisableIrqLoadBalancing *bool `json:"globallyDisableIrqLoadBalancing,omitempty"`
+}
+
+// CPUSet defines the set of CPUs(0-3,8-11).
+type CPUSet string
+
+// CPU defines a set of CPU related features.
+type CPU struct {
+ // Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet.
+ Reserved *CPUSet `json:"reserved,omitempty"`
+ // Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible,
+ // which means removing as many extraneous tasks off a CPU as possible.
+ // It is important to notice the CPU manager can choose any CPU to run the workload
+ // except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU:
+ // 1. The union of reserved CPUs and isolated CPUs should include all online CPUs
+ // 2. The isolated CPUs field should be the complementary to reserved CPUs field
+ Isolated *CPUSet `json:"isolated"`
+ // BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads.
+ // When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to
+ // explicitly assign each thread to a specific cpu in order to work across multiple CPUs.
+ // Setting this to "true" allows workloads to be balanced across CPUs.
+ // Setting this to "false" offers the most predictable performance for guaranteed workloads, but it
+ // offloads the complexity of cpu load balancing to the application.
+ // Defaults to "true"
+ // +optional
+ BalanceIsolated *bool `json:"balanceIsolated,omitempty"`
+}
+
+// HugePageSize defines size of huge pages, can be 2M or 1G.
+type HugePageSize string
+
+// HugePages defines a set of huge pages that we want to allocate at boot.
+type HugePages struct {
+ // DefaultHugePagesSize defines huge pages default size under kernel boot parameters.
+ DefaultHugePagesSize *HugePageSize `json:"defaultHugepagesSize,omitempty"`
+ // Pages defines huge pages that we want to allocate at boot time.
+ Pages []HugePage `json:"pages,omitempty"`
+}
+
+// HugePage defines the number of allocated huge pages of the specific size.
+type HugePage struct {
+ // Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter.
+ Size HugePageSize `json:"size,omitempty"`
+ // Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter.
+ Count int32 `json:"count,omitempty"`
+ // Node defines the NUMA node where hugepages will be allocated,
+ // if not specified, pages will be allocated equally between NUMA nodes
+ // +optional
+ Node *int32 `json:"node,omitempty"`
+}
+
+// NUMA defines parameters related to topology awareness and affinity.
+type NUMA struct {
+ // Name of the policy applied when TopologyManager is enabled
+ // Operator defaults to "best-effort"
+ // +optional
+ TopologyPolicy *string `json:"topologyPolicy,omitempty"`
+}
+
+// Net defines a set of network related features
+type Net struct {
+ // UserLevelNetworking when enabled - sets either all or specified network devices queue size to the amount of reserved CPUs. Defaults to "false".
+ UserLevelNetworking *bool `json:"userLevelNetworking,omitempty"`
+ // Devices contains a list of network device representations that will be
+ // set with a netqueue count equal to CPU.Reserved .
+ // If no devices are specified then the default is all devices.
+ Devices []Device `json:"devices,omitempty"`
+}
+
+// Device defines a way to represent a network device in several options:
+// device name, vendor ID, model ID, PCI path and MAC address
+type Device struct {
+ // Network device name to be matched. It uses a syntax of shell-style wildcards which are either positive or negative.
+ // +optional
+ InterfaceName *string `json:"interfaceName,omitempty"`
+ // Network device vendor ID represnted as a 16 bit Hexmadecimal number.
+ // +optional
+ VendorID *string `json:"vendorID,omitempty"`
+ // Network device ID (model) represnted as a 16 bit hexmadecimal number.
+ // +optional
+ DeviceID *string `json:"deviceID,omitempty"`
+}
+
+// RealTimeKernel defines the set of parameters relevant for the real time kernel.
+type RealTimeKernel struct {
+ // Enabled defines if the real time kernel packages should be installed. Defaults to "false"
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
+// PerformanceProfileStatus defines the observed state of PerformanceProfile.
+type PerformanceProfileStatus struct {
+ // Conditions represents the latest available observations of current state.
+ // +optional
+ Conditions []conditionsv1.Condition `json:"conditions,omitempty"`
+ // Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator.
+ // +optional
+ Tuned *string `json:"tuned,omitempty"`
+ // RuntimeClass contains the name of the RuntimeClass resource created by the operator.
+ RuntimeClass *string `json:"runtimeClass,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=performanceprofiles,scope=Cluster
+// +kubebuilder:deprecatedversion:warning="v1 is deprecated and should be removed in next three releases, use v2 instead"
+
+// PerformanceProfile is the Schema for the performanceprofiles API
+type PerformanceProfile struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec PerformanceProfileSpec `json:"spec,omitempty"`
+ Status PerformanceProfileStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// PerformanceProfileList contains a list of PerformanceProfile
+type PerformanceProfileList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []PerformanceProfile `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&PerformanceProfile{}, &PerformanceProfileList{})
+}
diff --git a/pkg/apis/pao/v1/performanceprofile_webhook.go b/pkg/apis/pao/v1/performanceprofile_webhook.go
new file mode 100644
index 000000000..c3af72fef
--- /dev/null
+++ b/pkg/apis/pao/v1/performanceprofile_webhook.go
@@ -0,0 +1,12 @@
+package v1
+
+import (
+ ctrl "sigs.k8s.io/controller-runtime"
+)
+
+// SetupWebhookWithManager enables Webhooks - needed for version conversion
+func (r *PerformanceProfile) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).
+ For(r).
+ Complete()
+}
diff --git a/pkg/apis/pao/v1/zz_generated.deepcopy.go b/pkg/apis/pao/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..f5eeee6da
--- /dev/null
+++ b/pkg/apis/pao/v1/zz_generated.deepcopy.go
@@ -0,0 +1,363 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CPU) DeepCopyInto(out *CPU) {
+ *out = *in
+ if in.Reserved != nil {
+ in, out := &in.Reserved, &out.Reserved
+ *out = new(CPUSet)
+ **out = **in
+ }
+ if in.Isolated != nil {
+ in, out := &in.Isolated, &out.Isolated
+ *out = new(CPUSet)
+ **out = **in
+ }
+ if in.BalanceIsolated != nil {
+ in, out := &in.BalanceIsolated, &out.BalanceIsolated
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU.
+func (in *CPU) DeepCopy() *CPU {
+ if in == nil {
+ return nil
+ }
+ out := new(CPU)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Device) DeepCopyInto(out *Device) {
+ *out = *in
+ if in.InterfaceName != nil {
+ in, out := &in.InterfaceName, &out.InterfaceName
+ *out = new(string)
+ **out = **in
+ }
+ if in.VendorID != nil {
+ in, out := &in.VendorID, &out.VendorID
+ *out = new(string)
+ **out = **in
+ }
+ if in.DeviceID != nil {
+ in, out := &in.DeviceID, &out.DeviceID
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device.
+func (in *Device) DeepCopy() *Device {
+ if in == nil {
+ return nil
+ }
+ out := new(Device)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HugePage) DeepCopyInto(out *HugePage) {
+ *out = *in
+ if in.Node != nil {
+ in, out := &in.Node, &out.Node
+ *out = new(int32)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePage.
+func (in *HugePage) DeepCopy() *HugePage {
+ if in == nil {
+ return nil
+ }
+ out := new(HugePage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HugePages) DeepCopyInto(out *HugePages) {
+ *out = *in
+ if in.DefaultHugePagesSize != nil {
+ in, out := &in.DefaultHugePagesSize, &out.DefaultHugePagesSize
+ *out = new(HugePageSize)
+ **out = **in
+ }
+ if in.Pages != nil {
+ in, out := &in.Pages, &out.Pages
+ *out = make([]HugePage, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePages.
+func (in *HugePages) DeepCopy() *HugePages {
+ if in == nil {
+ return nil
+ }
+ out := new(HugePages)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NUMA) DeepCopyInto(out *NUMA) {
+ *out = *in
+ if in.TopologyPolicy != nil {
+ in, out := &in.TopologyPolicy, &out.TopologyPolicy
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMA.
+func (in *NUMA) DeepCopy() *NUMA {
+ if in == nil {
+ return nil
+ }
+ out := new(NUMA)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Net) DeepCopyInto(out *Net) {
+ *out = *in
+ if in.UserLevelNetworking != nil {
+ in, out := &in.UserLevelNetworking, &out.UserLevelNetworking
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Devices != nil {
+ in, out := &in.Devices, &out.Devices
+ *out = make([]Device, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Net.
+func (in *Net) DeepCopy() *Net {
+ if in == nil {
+ return nil
+ }
+ out := new(Net)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfile) DeepCopyInto(out *PerformanceProfile) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfile.
+func (in *PerformanceProfile) DeepCopy() *PerformanceProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PerformanceProfile) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfileList) DeepCopyInto(out *PerformanceProfileList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PerformanceProfile, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileList.
+func (in *PerformanceProfileList) DeepCopy() *PerformanceProfileList {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfileList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PerformanceProfileList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfileSpec) DeepCopyInto(out *PerformanceProfileSpec) {
+ *out = *in
+ if in.CPU != nil {
+ in, out := &in.CPU, &out.CPU
+ *out = new(CPU)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.HugePages != nil {
+ in, out := &in.HugePages, &out.HugePages
+ *out = new(HugePages)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MachineConfigLabel != nil {
+ in, out := &in.MachineConfigLabel, &out.MachineConfigLabel
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.MachineConfigPoolSelector != nil {
+ in, out := &in.MachineConfigPoolSelector, &out.MachineConfigPoolSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.RealTimeKernel != nil {
+ in, out := &in.RealTimeKernel, &out.RealTimeKernel
+ *out = new(RealTimeKernel)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AdditionalKernelArgs != nil {
+ in, out := &in.AdditionalKernelArgs, &out.AdditionalKernelArgs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NUMA != nil {
+ in, out := &in.NUMA, &out.NUMA
+ *out = new(NUMA)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Net != nil {
+ in, out := &in.Net, &out.Net
+ *out = new(Net)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GloballyDisableIrqLoadBalancing != nil {
+ in, out := &in.GloballyDisableIrqLoadBalancing, &out.GloballyDisableIrqLoadBalancing
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileSpec.
+func (in *PerformanceProfileSpec) DeepCopy() *PerformanceProfileSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfileSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfileStatus) DeepCopyInto(out *PerformanceProfileStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]conditionsv1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Tuned != nil {
+ in, out := &in.Tuned, &out.Tuned
+ *out = new(string)
+ **out = **in
+ }
+ if in.RuntimeClass != nil {
+ in, out := &in.RuntimeClass, &out.RuntimeClass
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileStatus.
+func (in *PerformanceProfileStatus) DeepCopy() *PerformanceProfileStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfileStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RealTimeKernel) DeepCopyInto(out *RealTimeKernel) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealTimeKernel.
+func (in *RealTimeKernel) DeepCopy() *RealTimeKernel {
+ if in == nil {
+ return nil
+ }
+ out := new(RealTimeKernel)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/apis/pao/v1alpha1/groupversion_info.go b/pkg/apis/pao/v1alpha1/groupversion_info.go
new file mode 100644
index 000000000..031ebdc99
--- /dev/null
+++ b/pkg/apis/pao/v1alpha1/groupversion_info.go
@@ -0,0 +1,36 @@
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1alpha1 contains API Schema definitions for the performance v1alpha1 API group
+// +kubebuilder:object:generate=true
+// +groupName=performance.openshift.io
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "performance.openshift.io", Version: "v1alpha1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/pkg/apis/pao/v1alpha1/performanceprofile_conversion.go b/pkg/apis/pao/v1alpha1/performanceprofile_conversion.go
new file mode 100644
index 000000000..c27a8073f
--- /dev/null
+++ b/pkg/apis/pao/v1alpha1/performanceprofile_conversion.go
@@ -0,0 +1,221 @@
+package v1alpha1
+
+import (
+ "k8s.io/utils/pointer"
+
+ v1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1"
+ conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
+
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
+
+// ConvertTo converts this PerformanceProfile to the Hub version (v1).
+func (curr *PerformanceProfile) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*v1.PerformanceProfile)
+
+ // ObjectMeta
+ dst.ObjectMeta = curr.ObjectMeta
+
+ // Spec
+ if curr.Spec.CPU != nil {
+ dst.Spec.CPU = new(v1.CPU)
+
+ if curr.Spec.CPU.Reserved != nil {
+ reserved := v1.CPUSet(*curr.Spec.CPU.Reserved)
+ dst.Spec.CPU.Reserved = &reserved
+ }
+ if curr.Spec.CPU.Isolated != nil {
+ isolated := v1.CPUSet(*curr.Spec.CPU.Isolated)
+ dst.Spec.CPU.Isolated = &isolated
+ }
+ if curr.Spec.CPU.BalanceIsolated != nil {
+ dst.Spec.CPU.BalanceIsolated = pointer.BoolPtr(*curr.Spec.CPU.BalanceIsolated)
+ }
+ }
+
+ if curr.Spec.HugePages != nil {
+ dst.Spec.HugePages = new(v1.HugePages)
+
+ if curr.Spec.HugePages.DefaultHugePagesSize != nil {
+ defaultHugePagesSize := v1.HugePageSize(*curr.Spec.HugePages.DefaultHugePagesSize)
+ dst.Spec.HugePages.DefaultHugePagesSize = &defaultHugePagesSize
+ }
+
+ if curr.Spec.HugePages.Pages != nil {
+ dst.Spec.HugePages.Pages = make([]v1.HugePage, len(curr.Spec.HugePages.Pages))
+
+ for i, p := range curr.Spec.HugePages.Pages {
+ dst.Spec.HugePages.Pages[i] = v1.HugePage{
+ Size: v1.HugePageSize(p.Size), Count: p.Count,
+ }
+ if p.Node != nil {
+ dst.Spec.HugePages.Pages[i].Node = pointer.Int32Ptr(*p.Node)
+ }
+ }
+ }
+ }
+
+ if curr.Spec.MachineConfigLabel != nil {
+ dst.Spec.MachineConfigLabel = make(map[string]string)
+ for k, v := range curr.Spec.MachineConfigLabel {
+ dst.Spec.MachineConfigLabel[k] = v
+ }
+ }
+
+ if curr.Spec.MachineConfigPoolSelector != nil {
+ dst.Spec.MachineConfigPoolSelector = make(map[string]string)
+ for k, v := range curr.Spec.MachineConfigPoolSelector {
+ dst.Spec.MachineConfigPoolSelector[k] = v
+ }
+ }
+
+ if curr.Spec.NodeSelector != nil {
+ dst.Spec.NodeSelector = make(map[string]string)
+ for k, v := range curr.Spec.NodeSelector {
+ dst.Spec.NodeSelector[k] = v
+ }
+ }
+
+ if curr.Spec.RealTimeKernel != nil {
+ dst.Spec.RealTimeKernel = new(v1.RealTimeKernel)
+
+ if curr.Spec.RealTimeKernel.Enabled != nil {
+ dst.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(*curr.Spec.RealTimeKernel.Enabled)
+ }
+ }
+
+ if curr.Spec.AdditionalKernelArgs != nil {
+ dst.Spec.AdditionalKernelArgs = make([]string, len(curr.Spec.AdditionalKernelArgs))
+ copy(dst.Spec.AdditionalKernelArgs, curr.Spec.AdditionalKernelArgs)
+ }
+
+ if curr.Spec.NUMA != nil {
+ dst.Spec.NUMA = new(v1.NUMA)
+
+ if curr.Spec.NUMA.TopologyPolicy != nil {
+ dst.Spec.NUMA.TopologyPolicy = pointer.StringPtr(*curr.Spec.NUMA.TopologyPolicy)
+ }
+ }
+
+ // Status
+ if curr.Status.Conditions != nil {
+ dst.Status.Conditions = make([]conditionsv1.Condition, len(curr.Status.Conditions))
+ copy(dst.Status.Conditions, curr.Status.Conditions)
+ }
+
+ if curr.Status.Tuned != nil {
+ dst.Status.Tuned = pointer.StringPtr(*curr.Status.Tuned)
+ }
+
+ if curr.Status.RuntimeClass != nil {
+ dst.Status.RuntimeClass = pointer.StringPtr(*curr.Status.RuntimeClass)
+ }
+
+ // +kubebuilder:docs-gen:collapse=rote conversion
+ return nil
+}
+
+// ConvertFrom converts from the Hub version (v1) to this version.
+func (curr *PerformanceProfile) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*v1.PerformanceProfile)
+
+ // ObjectMeta
+ curr.ObjectMeta = src.ObjectMeta
+
+ // Spec
+ if src.Spec.CPU != nil {
+ curr.Spec.CPU = new(CPU)
+
+ if src.Spec.CPU.Reserved != nil {
+ reserved := CPUSet(*src.Spec.CPU.Reserved)
+ curr.Spec.CPU.Reserved = &reserved
+ }
+ if src.Spec.CPU.Isolated != nil {
+ isolated := CPUSet(*src.Spec.CPU.Isolated)
+ curr.Spec.CPU.Isolated = &isolated
+ }
+ if src.Spec.CPU.BalanceIsolated != nil {
+ curr.Spec.CPU.BalanceIsolated = pointer.BoolPtr(*src.Spec.CPU.BalanceIsolated)
+ }
+ }
+
+ if src.Spec.HugePages != nil {
+ curr.Spec.HugePages = new(HugePages)
+
+ if src.Spec.HugePages.DefaultHugePagesSize != nil {
+ defaultHugePagesSize := HugePageSize(*src.Spec.HugePages.DefaultHugePagesSize)
+ curr.Spec.HugePages.DefaultHugePagesSize = &defaultHugePagesSize
+ }
+
+ if src.Spec.HugePages.Pages != nil {
+ curr.Spec.HugePages.Pages = make([]HugePage, len(src.Spec.HugePages.Pages))
+ for i, p := range src.Spec.HugePages.Pages {
+ curr.Spec.HugePages.Pages[i] = HugePage{
+ Size: HugePageSize(p.Size), Count: p.Count,
+ }
+ if p.Node != nil {
+ curr.Spec.HugePages.Pages[i].Node = pointer.Int32Ptr(*p.Node)
+ }
+ }
+ }
+ }
+
+ if src.Spec.MachineConfigLabel != nil {
+ curr.Spec.MachineConfigLabel = make(map[string]string)
+ for k, v := range src.Spec.MachineConfigLabel {
+ curr.Spec.MachineConfigLabel[k] = v
+ }
+ }
+
+ if src.Spec.MachineConfigPoolSelector != nil {
+ curr.Spec.MachineConfigPoolSelector = make(map[string]string)
+ for k, v := range src.Spec.MachineConfigPoolSelector {
+ curr.Spec.MachineConfigPoolSelector[k] = v
+ }
+ }
+
+ if src.Spec.NodeSelector != nil {
+ curr.Spec.NodeSelector = make(map[string]string)
+ for k, v := range src.Spec.NodeSelector {
+ curr.Spec.NodeSelector[k] = v
+ }
+ }
+
+ if src.Spec.RealTimeKernel != nil {
+ curr.Spec.RealTimeKernel = new(RealTimeKernel)
+
+ if src.Spec.RealTimeKernel.Enabled != nil {
+ curr.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(*src.Spec.RealTimeKernel.Enabled)
+ }
+ }
+
+ if src.Spec.AdditionalKernelArgs != nil {
+ curr.Spec.AdditionalKernelArgs = make([]string, len(src.Spec.AdditionalKernelArgs))
+ copy(curr.Spec.AdditionalKernelArgs, src.Spec.AdditionalKernelArgs)
+ }
+
+ if src.Spec.NUMA != nil {
+ curr.Spec.NUMA = new(NUMA)
+
+ if src.Spec.NUMA.TopologyPolicy != nil {
+ curr.Spec.NUMA.TopologyPolicy = pointer.StringPtr(*src.Spec.NUMA.TopologyPolicy)
+ }
+ }
+
+ // Status
+ if src.Status.Conditions != nil {
+ curr.Status.Conditions = make([]conditionsv1.Condition, len(src.Status.Conditions))
+ copy(curr.Status.Conditions, src.Status.Conditions)
+ }
+
+ if src.Status.Tuned != nil {
+ curr.Status.Tuned = pointer.StringPtr(*src.Status.Tuned)
+ }
+
+ if src.Status.RuntimeClass != nil {
+ curr.Status.RuntimeClass = pointer.StringPtr(*src.Status.RuntimeClass)
+ }
+
+ // +kubebuilder:docs-gen:collapse=rote conversion
+ return nil
+}
diff --git a/pkg/apis/pao/v1alpha1/performanceprofile_types.go b/pkg/apis/pao/v1alpha1/performanceprofile_types.go
new file mode 100644
index 000000000..e56c3aabf
--- /dev/null
+++ b/pkg/apis/pao/v1alpha1/performanceprofile_types.go
@@ -0,0 +1,163 @@
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// PerformanceProfilePauseAnnotation allows an admin to suspend the operator's
+// reconcile loop in order to perform manual changes to performance profile owned
+// objects.
+const PerformanceProfilePauseAnnotation = "performance.openshift.io/pause-reconcile"
+
+// PerformanceProfileSpec defines the desired state of PerformanceProfile.
+type PerformanceProfileSpec struct {
+ // CPU defines a set of CPU related parameters.
+ CPU *CPU `json:"cpu,omitempty"`
+ // HugePages defines a set of huge pages related parameters.
+ // It is possible to set huge pages with multiple size values at the same time.
+ // For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator.
+ // It is important to notice that setting hugepages default size to 1G will remove all 2M related
+ // folders from the node and it will be impossible to configure 2M hugepages under the node.
+ HugePages *HugePages `json:"hugepages,omitempty"`
+ // MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be
+ // used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile.
+ // Defaults to "machineconfiguration.openshift.io/role="
+ // +optional
+ MachineConfigLabel map[string]string `json:"machineConfigLabel,omitempty"`
+ // MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector
+ // of resources like KubeletConfigs created by the operator.
+ // Defaults to "machineconfiguration.openshift.io/role="
+ // +optional
+ MachineConfigPoolSelector map[string]string `json:"machineConfigPoolSelector,omitempty"`
+ // NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator.
+ // It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool
+ // which targets this performance profile.
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ // RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set.
+ RealTimeKernel *RealTimeKernel `json:"realTimeKernel,omitempty"`
+ // Addional kernel arguments.
+ // +optional
+ AdditionalKernelArgs []string `json:"additionalKernelArgs,omitempty"`
+ // NUMA defines options related to topology aware affinities
+ // +optional
+ NUMA *NUMA `json:"numa,omitempty"`
+}
+
+// CPUSet defines the set of CPUs(0-3,8-11).
+type CPUSet string
+
+// CPU defines a set of CPU related features.
+type CPU struct {
+ // Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet.
+ Reserved *CPUSet `json:"reserved,omitempty"`
+ // Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible,
+ // which means removing as many extraneous tasks off a CPU as possible.
+ // It is important to notice the CPU manager can choose any CPU to run the workload
+ // except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU:
+ // 1. The union of reserved CPUs and isolated CPUs should include all online CPUs
+ // 2. The isolated CPUs field should be the complementary to reserved CPUs field
+ // +optional
+ Isolated *CPUSet `json:"isolated,omitempty"`
+ // BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads.
+ // When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to
+ // explicitly assign each thread to a specific cpu in order to work across multiple CPUs.
+ // Setting this to "true" allows workloads to be balanced across CPUs.
+ // Setting this to "false" offers the most predictable performance for guaranteed workloads, but it
+ // offloads the complexity of cpu load balancing to the application.
+ // Defaults to "true"
+ // +optional
+ BalanceIsolated *bool `json:"balanceIsolated,omitempty"`
+}
+
+// HugePageSize defines size of huge pages, can be 2M or 1G.
+type HugePageSize string
+
+// HugePages defines a set of huge pages that we want to allocate at boot.
+type HugePages struct {
+ // DefaultHugePagesSize defines huge pages default size under kernel boot parameters.
+ DefaultHugePagesSize *HugePageSize `json:"defaultHugepagesSize,omitempty"`
+ // Pages defines huge pages that we want to allocate at boot time.
+ Pages []HugePage `json:"pages,omitempty"`
+}
+
+// HugePage defines the number of allocated huge pages of the specific size.
+type HugePage struct {
+ // Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter.
+ Size HugePageSize `json:"size,omitempty"`
+ // Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter.
+ Count int32 `json:"count,omitempty"`
+ // Node defines the NUMA node where hugepages will be allocated,
+ // if not specified, pages will be allocated equally between NUMA nodes
+ // +optional
+ Node *int32 `json:"node,omitempty"`
+}
+
+// NUMA defines parameters related to topology awareness and affinity.
+type NUMA struct {
+ // Name of the policy applied when TopologyManager is enabled
+ // Operator defaults to "best-effort"
+ // +optional
+ TopologyPolicy *string `json:"topologyPolicy,omitempty"`
+}
+
+// RealTimeKernel defines the set of parameters relevant for the real time kernel.
+type RealTimeKernel struct {
+ // Enabled defines if the real time kernel packages should be installed. Defaults to "false"
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
+// PerformanceProfileStatus defines the observed state of PerformanceProfile.
+type PerformanceProfileStatus struct {
+ // Conditions represents the latest available observations of current state.
+ // +optional
+ Conditions []conditionsv1.Condition `json:"conditions,omitempty"`
+ // Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator.
+ // +optional
+ Tuned *string `json:"tuned,omitempty"`
+ // RuntimeClass contains the name of the RuntimeClass resource created by the operator.
+ RuntimeClass *string `json:"runtimeClass,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=performanceprofiles,scope=Cluster
+// +kubebuilder:deprecatedversion:warning="v1alpha1 is deprecated and should be removed in the next release, use v2 instead"
+
+// PerformanceProfile is the Schema for the performanceprofiles API
+type PerformanceProfile struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec PerformanceProfileSpec `json:"spec,omitempty"`
+ Status PerformanceProfileStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// PerformanceProfileList contains a list of PerformanceProfile
+type PerformanceProfileList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []PerformanceProfile `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&PerformanceProfile{}, &PerformanceProfileList{})
+}
diff --git a/pkg/apis/pao/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pao/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..e99aecf49
--- /dev/null
+++ b/pkg/apis/pao/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,296 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "github.com/openshift/custom-resource-status/conditions/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CPU) DeepCopyInto(out *CPU) {
+ *out = *in
+ if in.Reserved != nil {
+ in, out := &in.Reserved, &out.Reserved
+ *out = new(CPUSet)
+ **out = **in
+ }
+ if in.Isolated != nil {
+ in, out := &in.Isolated, &out.Isolated
+ *out = new(CPUSet)
+ **out = **in
+ }
+ if in.BalanceIsolated != nil {
+ in, out := &in.BalanceIsolated, &out.BalanceIsolated
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU.
+func (in *CPU) DeepCopy() *CPU {
+ if in == nil {
+ return nil
+ }
+ out := new(CPU)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HugePage) DeepCopyInto(out *HugePage) {
+ *out = *in
+ if in.Node != nil {
+ in, out := &in.Node, &out.Node
+ *out = new(int32)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePage.
+func (in *HugePage) DeepCopy() *HugePage {
+ if in == nil {
+ return nil
+ }
+ out := new(HugePage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HugePages) DeepCopyInto(out *HugePages) {
+ *out = *in
+ if in.DefaultHugePagesSize != nil {
+ in, out := &in.DefaultHugePagesSize, &out.DefaultHugePagesSize
+ *out = new(HugePageSize)
+ **out = **in
+ }
+ if in.Pages != nil {
+ in, out := &in.Pages, &out.Pages
+ *out = make([]HugePage, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePages.
+func (in *HugePages) DeepCopy() *HugePages {
+ if in == nil {
+ return nil
+ }
+ out := new(HugePages)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NUMA) DeepCopyInto(out *NUMA) {
+ *out = *in
+ if in.TopologyPolicy != nil {
+ in, out := &in.TopologyPolicy, &out.TopologyPolicy
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMA.
+func (in *NUMA) DeepCopy() *NUMA {
+ if in == nil {
+ return nil
+ }
+ out := new(NUMA)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfile) DeepCopyInto(out *PerformanceProfile) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfile.
+func (in *PerformanceProfile) DeepCopy() *PerformanceProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PerformanceProfile) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfileList) DeepCopyInto(out *PerformanceProfileList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PerformanceProfile, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileList.
+func (in *PerformanceProfileList) DeepCopy() *PerformanceProfileList {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfileList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PerformanceProfileList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfileSpec) DeepCopyInto(out *PerformanceProfileSpec) {
+ *out = *in
+ if in.CPU != nil {
+ in, out := &in.CPU, &out.CPU
+ *out = new(CPU)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.HugePages != nil {
+ in, out := &in.HugePages, &out.HugePages
+ *out = new(HugePages)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MachineConfigLabel != nil {
+ in, out := &in.MachineConfigLabel, &out.MachineConfigLabel
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.MachineConfigPoolSelector != nil {
+ in, out := &in.MachineConfigPoolSelector, &out.MachineConfigPoolSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.RealTimeKernel != nil {
+ in, out := &in.RealTimeKernel, &out.RealTimeKernel
+ *out = new(RealTimeKernel)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AdditionalKernelArgs != nil {
+ in, out := &in.AdditionalKernelArgs, &out.AdditionalKernelArgs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NUMA != nil {
+ in, out := &in.NUMA, &out.NUMA
+ *out = new(NUMA)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileSpec.
+func (in *PerformanceProfileSpec) DeepCopy() *PerformanceProfileSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfileSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfileStatus) DeepCopyInto(out *PerformanceProfileStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Tuned != nil {
+ in, out := &in.Tuned, &out.Tuned
+ *out = new(string)
+ **out = **in
+ }
+ if in.RuntimeClass != nil {
+ in, out := &in.RuntimeClass, &out.RuntimeClass
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileStatus.
+func (in *PerformanceProfileStatus) DeepCopy() *PerformanceProfileStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfileStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RealTimeKernel) DeepCopyInto(out *RealTimeKernel) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealTimeKernel.
+func (in *RealTimeKernel) DeepCopy() *RealTimeKernel {
+ if in == nil {
+ return nil
+ }
+ out := new(RealTimeKernel)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/apis/pao/v2/groupversion_info.go b/pkg/apis/pao/v2/groupversion_info.go
new file mode 100644
index 000000000..694319047
--- /dev/null
+++ b/pkg/apis/pao/v2/groupversion_info.go
@@ -0,0 +1,36 @@
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v2 contains API Schema definitions for the performance v2 API group
+// +kubebuilder:object:generate=true
+// +groupName=performance.openshift.io
+package v2
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "performance.openshift.io", Version: "v2"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/pkg/apis/pao/v2/performanceprofile_conversion.go b/pkg/apis/pao/v2/performanceprofile_conversion.go
new file mode 100644
index 000000000..a2292a746
--- /dev/null
+++ b/pkg/apis/pao/v2/performanceprofile_conversion.go
@@ -0,0 +1,293 @@
+package v2
+
+import (
+ "k8s.io/utils/pointer"
+
+ v1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1"
+ conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
+
+ "sigs.k8s.io/controller-runtime/pkg/conversion"
+)
+
+// ConvertTo converts this PerformanceProfile to the Hub version (v1).
+func (curr *PerformanceProfile) ConvertTo(dstRaw conversion.Hub) error {
+ dst := dstRaw.(*v1.PerformanceProfile)
+
+ // ObjectMeta
+ dst.ObjectMeta = curr.ObjectMeta
+
+ // Spec
+ if curr.Spec.CPU != nil {
+ dst.Spec.CPU = new(v1.CPU)
+
+ if curr.Spec.CPU.Reserved != nil {
+ reserved := v1.CPUSet(*curr.Spec.CPU.Reserved)
+ dst.Spec.CPU.Reserved = &reserved
+ }
+ if curr.Spec.CPU.Isolated != nil {
+ isolated := v1.CPUSet(*curr.Spec.CPU.Isolated)
+ dst.Spec.CPU.Isolated = &isolated
+ }
+ if curr.Spec.CPU.BalanceIsolated != nil {
+ dst.Spec.CPU.BalanceIsolated = pointer.BoolPtr(*curr.Spec.CPU.BalanceIsolated)
+ }
+ }
+
+ if curr.Spec.HugePages != nil {
+ dst.Spec.HugePages = new(v1.HugePages)
+
+ if curr.Spec.HugePages.DefaultHugePagesSize != nil {
+ defaultHugePagesSize := v1.HugePageSize(*curr.Spec.HugePages.DefaultHugePagesSize)
+ dst.Spec.HugePages.DefaultHugePagesSize = &defaultHugePagesSize
+ }
+
+ if curr.Spec.HugePages.Pages != nil {
+ dst.Spec.HugePages.Pages = make([]v1.HugePage, len(curr.Spec.HugePages.Pages))
+
+ for i, p := range curr.Spec.HugePages.Pages {
+ dst.Spec.HugePages.Pages[i] = v1.HugePage{
+ Size: v1.HugePageSize(p.Size), Count: p.Count,
+ }
+ if p.Node != nil {
+ dst.Spec.HugePages.Pages[i].Node = pointer.Int32Ptr(*p.Node)
+ }
+ }
+ }
+ }
+
+ if curr.Spec.MachineConfigLabel != nil {
+ dst.Spec.MachineConfigLabel = make(map[string]string)
+ for k, v := range curr.Spec.MachineConfigLabel {
+ dst.Spec.MachineConfigLabel[k] = v
+ }
+ }
+
+ if curr.Spec.MachineConfigPoolSelector != nil {
+ dst.Spec.MachineConfigPoolSelector = make(map[string]string)
+ for k, v := range curr.Spec.MachineConfigPoolSelector {
+ dst.Spec.MachineConfigPoolSelector[k] = v
+ }
+ }
+
+ if curr.Spec.NodeSelector != nil {
+ dst.Spec.NodeSelector = make(map[string]string)
+ for k, v := range curr.Spec.NodeSelector {
+ dst.Spec.NodeSelector[k] = v
+ }
+ }
+
+ if curr.Spec.RealTimeKernel != nil {
+ dst.Spec.RealTimeKernel = new(v1.RealTimeKernel)
+
+ if curr.Spec.RealTimeKernel.Enabled != nil {
+ dst.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(*curr.Spec.RealTimeKernel.Enabled)
+ }
+ }
+
+ if curr.Spec.AdditionalKernelArgs != nil {
+ dst.Spec.AdditionalKernelArgs = make([]string, len(curr.Spec.AdditionalKernelArgs))
+ copy(dst.Spec.AdditionalKernelArgs, curr.Spec.AdditionalKernelArgs)
+ }
+
+ if curr.Spec.NUMA != nil {
+ dst.Spec.NUMA = new(v1.NUMA)
+
+ if curr.Spec.NUMA.TopologyPolicy != nil {
+ dst.Spec.NUMA.TopologyPolicy = pointer.StringPtr(*curr.Spec.NUMA.TopologyPolicy)
+ }
+ }
+
+ // Convert Net fields
+ if curr.Spec.Net != nil {
+ dst.Spec.Net = new(v1.Net)
+
+ if curr.Spec.Net.UserLevelNetworking != nil {
+ dst.Spec.Net.UserLevelNetworking = pointer.BoolPtr(*curr.Spec.Net.UserLevelNetworking)
+ }
+
+ if curr.Spec.Net.Devices != nil {
+ dst.Spec.Net.Devices = []v1.Device{}
+
+ for _, d := range curr.Spec.Net.Devices {
+ device := v1.Device{}
+
+ if d.VendorID != nil {
+ device.VendorID = pointer.StringPtr(*d.VendorID)
+ }
+
+ if d.DeviceID != nil {
+ device.DeviceID = pointer.StringPtr(*d.DeviceID)
+ }
+
+ if d.InterfaceName != nil {
+ device.InterfaceName = pointer.StringPtr(*d.InterfaceName)
+ }
+
+ dst.Spec.Net.Devices = append(dst.Spec.Net.Devices, device)
+ }
+ }
+ }
+
+ if curr.Spec.GloballyDisableIrqLoadBalancing != nil {
+ dst.Spec.GloballyDisableIrqLoadBalancing = pointer.BoolPtr(*curr.Spec.GloballyDisableIrqLoadBalancing)
+ }
+
+ // Status
+ if curr.Status.Conditions != nil {
+ dst.Status.Conditions = make([]conditionsv1.Condition, len(curr.Status.Conditions))
+ copy(dst.Status.Conditions, curr.Status.Conditions)
+ }
+
+ if curr.Status.Tuned != nil {
+ dst.Status.Tuned = pointer.StringPtr(*curr.Status.Tuned)
+ }
+
+ if curr.Status.RuntimeClass != nil {
+ dst.Status.RuntimeClass = pointer.StringPtr(*curr.Status.RuntimeClass)
+ }
+
+ // +kubebuilder:docs-gen:collapse=rote conversion
+ return nil
+}
+
+// ConvertFrom converts from the Hub version (v1) to this version.
+func (curr *PerformanceProfile) ConvertFrom(srcRaw conversion.Hub) error {
+ src := srcRaw.(*v1.PerformanceProfile)
+
+ // ObjectMeta
+ curr.ObjectMeta = src.ObjectMeta
+
+ // Spec
+ if src.Spec.CPU != nil {
+ curr.Spec.CPU = new(CPU)
+
+ if src.Spec.CPU.Reserved != nil {
+ reserved := CPUSet(*src.Spec.CPU.Reserved)
+ curr.Spec.CPU.Reserved = &reserved
+ }
+ if src.Spec.CPU.Isolated != nil {
+ isolated := CPUSet(*src.Spec.CPU.Isolated)
+ curr.Spec.CPU.Isolated = &isolated
+ }
+ if src.Spec.CPU.BalanceIsolated != nil {
+ curr.Spec.CPU.BalanceIsolated = pointer.BoolPtr(*src.Spec.CPU.BalanceIsolated)
+ }
+ }
+
+ if src.Spec.HugePages != nil {
+ curr.Spec.HugePages = new(HugePages)
+
+ if src.Spec.HugePages.DefaultHugePagesSize != nil {
+ defaultHugePagesSize := HugePageSize(*src.Spec.HugePages.DefaultHugePagesSize)
+ curr.Spec.HugePages.DefaultHugePagesSize = &defaultHugePagesSize
+ }
+
+ if src.Spec.HugePages.Pages != nil {
+ curr.Spec.HugePages.Pages = make([]HugePage, len(src.Spec.HugePages.Pages))
+ for i, p := range src.Spec.HugePages.Pages {
+ curr.Spec.HugePages.Pages[i] = HugePage{
+ Size: HugePageSize(p.Size), Count: p.Count,
+ }
+ if p.Node != nil {
+ curr.Spec.HugePages.Pages[i].Node = pointer.Int32Ptr(*p.Node)
+ }
+ }
+ }
+ }
+
+ if src.Spec.MachineConfigLabel != nil {
+ curr.Spec.MachineConfigLabel = make(map[string]string)
+ for k, v := range src.Spec.MachineConfigLabel {
+ curr.Spec.MachineConfigLabel[k] = v
+ }
+ }
+
+ if src.Spec.MachineConfigPoolSelector != nil {
+ curr.Spec.MachineConfigPoolSelector = make(map[string]string)
+ for k, v := range src.Spec.MachineConfigPoolSelector {
+ curr.Spec.MachineConfigPoolSelector[k] = v
+ }
+ }
+
+ if src.Spec.NodeSelector != nil {
+ curr.Spec.NodeSelector = make(map[string]string)
+ for k, v := range src.Spec.NodeSelector {
+ curr.Spec.NodeSelector[k] = v
+ }
+ }
+
+ if src.Spec.RealTimeKernel != nil {
+ curr.Spec.RealTimeKernel = new(RealTimeKernel)
+
+ if src.Spec.RealTimeKernel.Enabled != nil {
+ curr.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(*src.Spec.RealTimeKernel.Enabled)
+ }
+ }
+
+ if src.Spec.AdditionalKernelArgs != nil {
+ curr.Spec.AdditionalKernelArgs = make([]string, len(src.Spec.AdditionalKernelArgs))
+ copy(curr.Spec.AdditionalKernelArgs, src.Spec.AdditionalKernelArgs)
+ }
+
+ if src.Spec.NUMA != nil {
+ curr.Spec.NUMA = new(NUMA)
+
+ if src.Spec.NUMA.TopologyPolicy != nil {
+ curr.Spec.NUMA.TopologyPolicy = pointer.StringPtr(*src.Spec.NUMA.TopologyPolicy)
+ }
+ }
+
+ // Convert Net fields
+ if src.Spec.Net != nil {
+ curr.Spec.Net = new(Net)
+
+ if src.Spec.Net.UserLevelNetworking != nil {
+ curr.Spec.Net.UserLevelNetworking = pointer.BoolPtr(*src.Spec.Net.UserLevelNetworking)
+ }
+
+ if src.Spec.Net.Devices != nil {
+ curr.Spec.Net.Devices = []Device{}
+
+ for _, d := range src.Spec.Net.Devices {
+ device := Device{}
+
+ if d.VendorID != nil {
+ device.VendorID = pointer.StringPtr(*d.VendorID)
+ }
+
+ if d.DeviceID != nil {
+ device.DeviceID = pointer.StringPtr(*d.DeviceID)
+ }
+
+ if d.InterfaceName != nil {
+ device.InterfaceName = pointer.StringPtr(*d.InterfaceName)
+ }
+
+ curr.Spec.Net.Devices = append(curr.Spec.Net.Devices, device)
+ }
+ }
+ }
+
+ if src.Spec.GloballyDisableIrqLoadBalancing != nil {
+ curr.Spec.GloballyDisableIrqLoadBalancing = pointer.BoolPtr(*src.Spec.GloballyDisableIrqLoadBalancing)
+ } else { // set to true by default
+ curr.Spec.GloballyDisableIrqLoadBalancing = pointer.BoolPtr(true)
+ }
+
+ // Status
+ if src.Status.Conditions != nil {
+ curr.Status.Conditions = make([]conditionsv1.Condition, len(src.Status.Conditions))
+ copy(curr.Status.Conditions, src.Status.Conditions)
+ }
+
+ if src.Status.Tuned != nil {
+ curr.Status.Tuned = pointer.StringPtr(*src.Status.Tuned)
+ }
+
+ if src.Status.RuntimeClass != nil {
+ curr.Status.RuntimeClass = pointer.StringPtr(*src.Status.RuntimeClass)
+ }
+
+ // +kubebuilder:docs-gen:collapse=rote conversion
+ return nil
+}
diff --git a/pkg/apis/pao/v2/performanceprofile_types.go b/pkg/apis/pao/v2/performanceprofile_types.go
new file mode 100644
index 000000000..9022020a9
--- /dev/null
+++ b/pkg/apis/pao/v2/performanceprofile_types.go
@@ -0,0 +1,198 @@
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v2
+
+import (
+ conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// PerformanceProfilePauseAnnotation allows an admin to suspend the operator's
+// reconcile loop in order to perform manual changes to performance profile owned
+// objects.
+const PerformanceProfilePauseAnnotation = "performance.openshift.io/pause-reconcile"
+
+// PerformanceProfileSpec defines the desired state of PerformanceProfile.
+type PerformanceProfileSpec struct {
+ // CPU defines a set of CPU related parameters.
+ CPU *CPU `json:"cpu"`
+ // HugePages defines a set of huge pages related parameters.
+ // It is possible to set huge pages with multiple size values at the same time.
+ // For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator.
+ // It is important to notice that setting hugepages default size to 1G will remove all 2M related
+ // folders from the node and it will be impossible to configure 2M hugepages under the node.
+ HugePages *HugePages `json:"hugepages,omitempty"`
+ // MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be
+ // used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile.
+ // Defaults to "machineconfiguration.openshift.io/role="
+ // +optional
+ MachineConfigLabel map[string]string `json:"machineConfigLabel,omitempty"`
+ // MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector
+ // of resources like KubeletConfigs created by the operator.
+ // Defaults to "machineconfiguration.openshift.io/role="
+ // +optional
+ MachineConfigPoolSelector map[string]string `json:"machineConfigPoolSelector,omitempty"`
+ // NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator.
+ // It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool
+ // which targets this performance profile.
+ // In the case when machineConfigLabels or machineConfigPoolSelector are not set, we are expecting a certain NodeSelector format
+ // /: "" in order to be able to calculate the default values for the former mentioned fields.
+ NodeSelector map[string]string `json:"nodeSelector"`
+ // RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set.
+ RealTimeKernel *RealTimeKernel `json:"realTimeKernel,omitempty"`
+ // Addional kernel arguments.
+ // +optional
+ AdditionalKernelArgs []string `json:"additionalKernelArgs,omitempty"`
+ // NUMA defines options related to topology aware affinities
+ // +optional
+ NUMA *NUMA `json:"numa,omitempty"`
+ // Net defines a set of network related features
+ // +optional
+ Net *Net `json:"net,omitempty"`
+ // GloballyDisableIrqLoadBalancing toggles whether IRQ load balancing will be disabled for the Isolated CPU set.
+ // When the option is set to "true" it disables IRQs load balancing for the Isolated CPU set.
+ // Setting the option to "false" allows the IRQs to be balanced across all CPUs, however the IRQs load balancing
+ // can be disabled per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io annotations.
+ // Defaults to "false"
+ // +optional
+ GloballyDisableIrqLoadBalancing *bool `json:"globallyDisableIrqLoadBalancing,omitempty"`
+}
+
+// CPUSet defines the set of CPUs(0-3,8-11).
+type CPUSet string
+
+// CPU defines a set of CPU related features.
+type CPU struct {
+ // Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet.
+ Reserved *CPUSet `json:"reserved"`
+ // Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible,
+ // which means removing as many extraneous tasks off a CPU as possible.
+ // It is important to notice the CPU manager can choose any CPU to run the workload
+ // except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU:
+ // 1. The union of reserved CPUs and isolated CPUs should include all online CPUs
+ // 2. The isolated CPUs field should be the complementary to reserved CPUs field
+ Isolated *CPUSet `json:"isolated"`
+ // BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads.
+ // When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to
+ // explicitly assign each thread to a specific cpu in order to work across multiple CPUs.
+ // Setting this to "true" allows workloads to be balanced across CPUs.
+ // Setting this to "false" offers the most predictable performance for guaranteed workloads, but it
+ // offloads the complexity of cpu load balancing to the application.
+ // Defaults to "true"
+ // +optional
+ BalanceIsolated *bool `json:"balanceIsolated,omitempty"`
+}
+
+// HugePageSize defines size of huge pages, can be 2M or 1G.
+type HugePageSize string
+
+// HugePages defines a set of huge pages that we want to allocate at boot.
+type HugePages struct {
+ // DefaultHugePagesSize defines huge pages default size under kernel boot parameters.
+ DefaultHugePagesSize *HugePageSize `json:"defaultHugepagesSize,omitempty"`
+ // Pages defines huge pages that we want to allocate at boot time.
+ Pages []HugePage `json:"pages,omitempty"`
+}
+
+// HugePage defines the number of allocated huge pages of the specific size.
+type HugePage struct {
+ // Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter.
+ Size HugePageSize `json:"size,omitempty"`
+ // Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter.
+ Count int32 `json:"count,omitempty"`
+ // Node defines the NUMA node where hugepages will be allocated,
+ // if not specified, pages will be allocated equally between NUMA nodes
+ // +optional
+ Node *int32 `json:"node,omitempty"`
+}
+
+// NUMA defines parameters related to topology awareness and affinity.
+type NUMA struct {
+ // Name of the policy applied when TopologyManager is enabled
+ // Operator defaults to "best-effort"
+ // +optional
+ TopologyPolicy *string `json:"topologyPolicy,omitempty"`
+}
+
+// Net defines a set of network related features
+type Net struct {
+ // UserLevelNetworking when enabled - sets either all or specified network devices queue size to the amount of reserved CPUs. Defaults to "false".
+ UserLevelNetworking *bool `json:"userLevelNetworking,omitempty"`
+ // Devices contains a list of network device representations that will be
+ // set with a netqueue count equal to CPU.Reserved .
+ // If no devices are specified then the default is all devices.
+ Devices []Device `json:"devices,omitempty"`
+}
+
+// Device defines a way to represent a network device in several options:
+// device name, vendor ID, model ID, PCI path and MAC address
+type Device struct {
+ // Network device name to be matched. It uses a syntax of shell-style wildcards which are either positive or negative.
+ // +optional
+ InterfaceName *string `json:"interfaceName,omitempty"`
+ // Network device vendor ID represnted as a 16 bit Hexmadecimal number.
+ // +optional
+ VendorID *string `json:"vendorID,omitempty"`
+ // Network device ID (model) represnted as a 16 bit hexmadecimal number.
+ // +optional
+ DeviceID *string `json:"deviceID,omitempty"`
+}
+
+// RealTimeKernel defines the set of parameters relevant for the real time kernel.
+type RealTimeKernel struct {
+ // Enabled defines if the real time kernel packages should be installed. Defaults to "false"
+ Enabled *bool `json:"enabled,omitempty"`
+}
+
+// PerformanceProfileStatus defines the observed state of PerformanceProfile.
+type PerformanceProfileStatus struct {
+ // Conditions represents the latest available observations of current state.
+ // +optional
+ Conditions []conditionsv1.Condition `json:"conditions,omitempty"`
+ // Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator.
+ // +optional
+ Tuned *string `json:"tuned,omitempty"`
+ // RuntimeClass contains the name of the RuntimeClass resource created by the operator.
+ RuntimeClass *string `json:"runtimeClass,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=performanceprofiles,scope=Cluster
+// +kubebuilder:storageversion
+
+// PerformanceProfile is the Schema for the performanceprofiles API
+type PerformanceProfile struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec PerformanceProfileSpec `json:"spec,omitempty"`
+ Status PerformanceProfileStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// PerformanceProfileList contains a list of PerformanceProfile
+type PerformanceProfileList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []PerformanceProfile `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&PerformanceProfile{}, &PerformanceProfileList{})
+}
diff --git a/pkg/apis/pao/v2/performanceprofile_validation.go b/pkg/apis/pao/v2/performanceprofile_validation.go
new file mode 100644
index 000000000..baf5601f6
--- /dev/null
+++ b/pkg/apis/pao/v2/performanceprofile_validation.go
@@ -0,0 +1,291 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
+package v2
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "regexp"
+
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/klog"
+ kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
+)
+
+const (
+ hugepagesSize2M = "2M"
+ hugepagesSize1G = "1G"
+)
+
+// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
+func (r *PerformanceProfile) ValidateCreate() error {
+ klog.Infof("Create validation for the performance profile %q", r.Name)
+
+ return r.validateCreateOrUpdate()
+}
+
+// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
+func (r *PerformanceProfile) ValidateUpdate(old runtime.Object) error {
+ klog.Infof("Update validation for the performance profile %q", r.Name)
+
+ return r.validateCreateOrUpdate()
+}
+
+func (r *PerformanceProfile) validateCreateOrUpdate() error {
+ var allErrs field.ErrorList
+
+ // validate node selector duplication
+ ppList := &PerformanceProfileList{}
+ if err := validatorClient.List(context.TODO(), ppList); err != nil {
+ return apierrors.NewInternalError(err)
+ }
+
+ allErrs = append(allErrs, r.validateNodeSelectorDuplication(ppList)...)
+
+ // validate basic fields
+ allErrs = append(allErrs, r.validateFields()...)
+
+ if len(allErrs) == 0 {
+ return nil
+ }
+
+ return apierrors.NewInvalid(
+ schema.GroupKind{Group: "performance.openshift.io", Kind: "PerformanceProfile"},
+ r.Name, allErrs)
+}
+
+// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
+func (r *PerformanceProfile) ValidateDelete() error {
+ klog.Infof("Delete validation for the performance profile %q", r.Name)
+
+ // TODO(user): fill in your validation logic upon object deletion.
+ return nil
+}
+
+func (r *PerformanceProfile) validateNodeSelectorDuplication(ppList *PerformanceProfileList) field.ErrorList {
+ var allErrs field.ErrorList
+
+ // validate node selector duplication
+ for _, pp := range ppList.Items {
+ // exclude the current profile from the check
+ if pp.Name == r.Name {
+ continue
+ }
+
+ if reflect.DeepEqual(pp.Spec.NodeSelector, r.Spec.NodeSelector) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.nodeSelector"), r.Spec.NodeSelector, fmt.Sprintf("the profile has the same node selector as the performance profile %q", pp.Name)))
+ }
+ }
+
+ return allErrs
+}
+
+func (r *PerformanceProfile) validateFields() field.ErrorList {
+ var allErrs field.ErrorList
+
+ allErrs = append(allErrs, r.validateCPUs()...)
+ allErrs = append(allErrs, r.validateSelectors()...)
+ allErrs = append(allErrs, r.validateHugePages()...)
+ allErrs = append(allErrs, r.validateNUMA()...)
+ allErrs = append(allErrs, r.validateNet()...)
+
+ return allErrs
+}
+
+func (r *PerformanceProfile) validateCPUs() field.ErrorList {
+ var allErrs field.ErrorList
+
+ if r.Spec.CPU == nil {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.cpu"), "cpu section required"))
+ } else {
+ if r.Spec.CPU.Isolated == nil {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.cpu.isolated"), "isolated CPUs required"))
+ }
+
+ if r.Spec.CPU.Reserved == nil {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.cpu.reserved"), "reserved CPUs required"))
+ }
+
+ if r.Spec.CPU.Isolated != nil && r.Spec.CPU.Reserved != nil {
+ cpuLists, err := components.NewCPULists(string(*r.Spec.CPU.Reserved), string(*r.Spec.CPU.Isolated))
+ if err != nil {
+ allErrs = append(allErrs, field.InternalError(field.NewPath("spec.cpu"), err))
+ }
+
+ if cpuLists != nil {
+ if cpuLists.CountIsolated() == 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.cpu.isolated"), r.Spec.CPU.Isolated, "isolated CPUs can not be empty"))
+ }
+
+ if overlap := cpuLists.Intersect(); len(overlap) != 0 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.cpu"), r.Spec.CPU, fmt.Sprintf("reserved and isolated cpus overlap: %v", overlap)))
+ }
+ }
+ }
+ }
+
+ return allErrs
+}
+
+func (r *PerformanceProfile) validateSelectors() field.ErrorList {
+ var allErrs field.ErrorList
+
+ if r.Spec.MachineConfigLabel != nil && len(r.Spec.MachineConfigLabel) > 1 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.machineConfigLabel"), r.Spec.MachineConfigLabel, "you should provide only 1 MachineConfigLabel"))
+ }
+
+ if r.Spec.MachineConfigPoolSelector != nil && len(r.Spec.MachineConfigPoolSelector) > 1 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.machineConfigPoolSelector"), r.Spec.MachineConfigLabel, "you should provide only 1 MachineConfigPoolSelector"))
+ }
+
+ if r.Spec.NodeSelector == nil {
+ allErrs = append(allErrs, field.Required(field.NewPath("spec.nodeSelector"), "the nodeSelector required"))
+ }
+
+ if len(r.Spec.NodeSelector) > 1 {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.nodeSelector"), r.Spec.NodeSelector, "you should provide ony 1 NodeSelector"))
+ }
+
+ // in case MachineConfigLabels or MachineConfigPoolSelector are not set, we expect a certain format (domain/role)
+ // on the NodeSelector in order to be able to calculate the default values for the former metioned fields.
+ if r.Spec.MachineConfigLabel == nil || r.Spec.MachineConfigPoolSelector == nil {
+ k, _ := components.GetFirstKeyAndValue(r.Spec.NodeSelector)
+ if _, _, err := components.SplitLabelKey(k); err != nil {
+ allErrs = append(allErrs, field.Invalid(
+ field.NewPath("spec.nodeSelector"),
+ r.Spec.NodeSelector,
+ "machineConfigLabels or machineConfigPoolSelector are not set, but we can not set it automatically because of an invalid NodeSelector label key that can't be split into domain/role"))
+ }
+ }
+
+ return allErrs
+}
+
+func (r *PerformanceProfile) validateHugePages() field.ErrorList {
+ var allErrs field.ErrorList
+
+ if r.Spec.HugePages == nil {
+ return allErrs
+ }
+
+ // validate that default hugepages size has correct value, currently we support only 2M and 1G(x86_64 architecture)
+ if r.Spec.HugePages.DefaultHugePagesSize != nil {
+ defaultSize := *r.Spec.HugePages.DefaultHugePagesSize
+ if defaultSize != hugepagesSize1G && defaultSize != hugepagesSize2M {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.hugepages.defaultHugepagesSize"), r.Spec.HugePages.DefaultHugePagesSize, fmt.Sprintf("hugepages default size should be equal to %q or %q", hugepagesSize1G, hugepagesSize2M)))
+ }
+ }
+
+ for i, page := range r.Spec.HugePages.Pages {
+ if page.Size != hugepagesSize1G && page.Size != hugepagesSize2M {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.hugepages.pages"), r.Spec.HugePages.Pages, fmt.Sprintf("the page size should be equal to %q or %q", hugepagesSize1G, hugepagesSize2M)))
+ }
+
+ allErrs = append(allErrs, r.validatePageDuplication(&page, r.Spec.HugePages.Pages[i+1:])...)
+ }
+
+ return allErrs
+}
+
+func (r *PerformanceProfile) validatePageDuplication(page *HugePage, pages []HugePage) field.ErrorList {
+ var allErrs field.ErrorList
+
+ for _, p := range pages {
+ if page.Size != p.Size {
+ continue
+ }
+
+ if page.Node != nil && p.Node == nil {
+ continue
+ }
+
+ if page.Node == nil && p.Node != nil {
+ continue
+ }
+
+ if page.Node == nil && p.Node == nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.hugepages.pages"), r.Spec.HugePages.Pages, fmt.Sprintf("the page with the size %q and without the specified NUMA node, has duplication", page.Size)))
+ continue
+ }
+
+ if *page.Node == *p.Node {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.hugepages.pages"), r.Spec.HugePages.Pages, fmt.Sprintf("the page with the size %q and with specified NUMA node %d, has duplication", page.Size, *page.Node)))
+ }
+ }
+
+ return allErrs
+}
+
+func (r *PerformanceProfile) validateNUMA() field.ErrorList {
+ var allErrs field.ErrorList
+
+ if r.Spec.NUMA == nil {
+ return allErrs
+ }
+
+ // validate NUMA topology policy matches allowed values
+ if r.Spec.NUMA.TopologyPolicy != nil {
+ policy := *r.Spec.NUMA.TopologyPolicy
+ if policy != kubeletconfigv1beta1.NoneTopologyManagerPolicy &&
+ policy != kubeletconfigv1beta1.BestEffortTopologyManagerPolicy &&
+ policy != kubeletconfigv1beta1.RestrictedTopologyManagerPolicy &&
+ policy != kubeletconfigv1beta1.SingleNumaNodeTopologyManagerPolicy {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.numa.topologyPolicy"), r.Spec.NUMA.TopologyPolicy, "unrecognized value for topologyPolicy"))
+ }
+ }
+
+ return allErrs
+}
+
+func (r *PerformanceProfile) validateNet() field.ErrorList {
+ var allErrs field.ErrorList
+
+ if r.Spec.Net == nil {
+ return allErrs
+ }
+
+ if r.Spec.Net.UserLevelNetworking != nil && *r.Spec.Net.UserLevelNetworking && r.Spec.CPU.Reserved == nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net"), r.Spec.Net, "can not set network devices queues count without specifiying spec.cpu.reserved"))
+ }
+
+ for _, device := range r.Spec.Net.Devices {
+ if device.InterfaceName != nil && *device.InterfaceName == "" {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net.devices"), r.Spec.Net.Devices, "device name cannot be empty"))
+ }
+ if device.VendorID != nil && !isValid16bitsHexID(*device.VendorID) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net.devices"), r.Spec.Net.Devices, fmt.Sprintf("device vendor ID %s has an invalid format. Vendor ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", *device.VendorID)))
+ }
+ if device.DeviceID != nil && !isValid16bitsHexID(*device.DeviceID) {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net.devices"), r.Spec.Net.Devices, fmt.Sprintf("device model ID %s has an invalid format. Model ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", *device.DeviceID)))
+ }
+ if device.DeviceID != nil && device.VendorID == nil {
+ allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net.devices"), r.Spec.Net.Devices, fmt.Sprintf("device model ID can not be used without specifying the device vendor ID.")))
+ }
+ }
+ return allErrs
+}
+
+func isValid16bitsHexID(v string) bool {
+ re := regexp.MustCompile("^0x[0-9a-fA-F]+$")
+ return re.MatchString(v) && len(v) < 7
+}
diff --git a/pkg/apis/pao/v2/performanceprofile_validation_suite_test.go b/pkg/apis/pao/v2/performanceprofile_validation_suite_test.go
new file mode 100644
index 000000000..7615c7990
--- /dev/null
+++ b/pkg/apis/pao/v2/performanceprofile_validation_suite_test.go
@@ -0,0 +1,13 @@
+package v2
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestProfile(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Profile Suite")
+}
diff --git a/pkg/apis/pao/v2/performanceprofile_validation_test.go b/pkg/apis/pao/v2/performanceprofile_validation_test.go
new file mode 100644
index 000000000..5ae98e495
--- /dev/null
+++ b/pkg/apis/pao/v2/performanceprofile_validation_test.go
@@ -0,0 +1,322 @@
+package v2
+
+import (
+ "fmt"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/pointer"
+)
+
+const (
+ NodeSelectorRole = "barRole"
+)
+
+const (
+ // HugePageSize defines the huge page size used for tests
+ HugePageSize1G = HugePageSize("1G")
+ // HugePagesCount defines the huge page count used for tests
+ HugePagesCount = 4
+ // IsolatedCPUs defines the isolated CPU set used for tests
+ IsolatedCPUs = CPUSet("4-7")
+ // ReservedCPUs defines the reserved CPU set used for tests
+ ReservedCPUs = CPUSet("0-3")
+ // SingleNUMAPolicy defines the topologyManager policy used for tests
+ SingleNUMAPolicy = "single-numa-node"
+
+ //MachineConfigLabelKey defines the MachineConfig label key of the test profile
+ MachineConfigLabelKey = "mcKey"
+ //MachineConfigLabelValue defines the MachineConfig label value of the test profile
+ MachineConfigLabelValue = "mcValue"
+ //MachineConfigPoolLabelKey defines the MachineConfigPool label key of the test profile
+ MachineConfigPoolLabelKey = "mcpKey"
+ //MachineConfigPoolLabelValue defines the MachineConfigPool label value of the test profile
+ MachineConfigPoolLabelValue = "mcpValue"
+
+ //NetDeviceName defines a net device name for the test profile
+ NetDeviceName = "enp0s4"
+ //NetDeviceVendorID defines a net device vendor ID for the test profile
+ NetDeviceVendorID = "0x1af4"
+ //NetDeviceModelID defines a net device model ID for the test profile
+ NetDeviceModelID = "0x1000"
+)
+
+// NewPerformanceProfile returns new performance profile object that used for tests
+func NewPerformanceProfile(name string) *PerformanceProfile {
+ size := HugePageSize1G
+ isolatedCPUs := IsolatedCPUs
+ reservedCPUs := ReservedCPUs
+ numaPolicy := SingleNUMAPolicy
+
+ netDeviceName := NetDeviceName
+ netDeviceVendorID := NetDeviceVendorID
+ netDeviceModelID := NetDeviceModelID
+
+ return &PerformanceProfile{
+ TypeMeta: metav1.TypeMeta{Kind: "PerformanceProfile"},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ UID: "11111111-1111-1111-1111-1111111111111",
+ },
+ Spec: PerformanceProfileSpec{
+ CPU: &CPU{
+ Isolated: &isolatedCPUs,
+ Reserved: &reservedCPUs,
+ },
+ HugePages: &HugePages{
+ DefaultHugePagesSize: &size,
+ Pages: []HugePage{
+ {
+ Count: HugePagesCount,
+ Size: size,
+ },
+ },
+ },
+ RealTimeKernel: &RealTimeKernel{
+ Enabled: pointer.BoolPtr(true),
+ },
+ NUMA: &NUMA{
+ TopologyPolicy: &numaPolicy,
+ },
+ Net: &Net{
+ UserLevelNetworking: pointer.BoolPtr(true),
+ Devices: []Device{
+ {
+ InterfaceName: &netDeviceName,
+ VendorID: &netDeviceVendorID,
+ DeviceID: &netDeviceModelID,
+ },
+ },
+ },
+ MachineConfigLabel: map[string]string{
+ MachineConfigLabelKey: MachineConfigLabelValue,
+ },
+ MachineConfigPoolSelector: map[string]string{
+ MachineConfigPoolLabelKey: MachineConfigPoolLabelValue,
+ },
+ NodeSelector: map[string]string{
+ "nodekey": "nodeValue",
+ },
+ },
+ }
+}
+
+var _ = Describe("PerformanceProfile", func() {
+ var profile *PerformanceProfile
+
+ BeforeEach(func() {
+ profile = NewPerformanceProfile("test")
+ })
+
+ Describe("CPU validation", func() {
+ It("should have CPU fields populated", func() {
+ errors := profile.validateCPUs()
+ Expect(errors).To(BeEmpty(), "should not have validation errors with populated CPU fields")
+
+ profile.Spec.CPU.Isolated = nil
+ errors = profile.validateCPUs()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error with missing CPU Isolated field")
+ Expect(errors[0].Error()).To(ContainSubstring("isolated CPUs required"))
+
+ cpus := CPUSet("0")
+ profile.Spec.CPU.Isolated = &cpus
+ profile.Spec.CPU.Reserved = nil
+ errors = profile.validateCPUs()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error with missing CPU reserved field")
+ Expect(errors[0].Error()).To(ContainSubstring("reserved CPUs required"))
+
+ invalidCPUs := CPUSet("bla")
+ profile.Spec.CPU.Isolated = &invalidCPUs
+ errors = profile.validateCPUs()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error when isolated CPUs has invalid format")
+
+ profile.Spec.CPU = nil
+ errors = profile.validateCPUs()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error with missing CPU")
+ Expect(errors[0].Error()).To(ContainSubstring("cpu section required"))
+ })
+
+ It("should allow cpus allocation with no reserved CPUs", func() {
+ reservedCPUs := CPUSet("")
+ isolatedCPUs := CPUSet("0-7")
+ profile.Spec.CPU.Reserved = &reservedCPUs
+ profile.Spec.CPU.Isolated = &isolatedCPUs
+ errors := profile.validateCPUs()
+ Expect(errors).To(BeEmpty())
+ })
+
+ It("should reject cpus allocation with no isolated CPUs", func() {
+ reservedCPUs := CPUSet("0-3")
+ isolatedCPUs := CPUSet("")
+ profile.Spec.CPU.Reserved = &reservedCPUs
+ profile.Spec.CPU.Isolated = &isolatedCPUs
+ errors := profile.validateCPUs()
+ Expect(errors).NotTo(BeEmpty())
+ Expect(errors[0].Error()).To(ContainSubstring("isolated CPUs can not be empty"))
+ })
+
+ It("should reject cpus allocation with overlapping sets", func() {
+ reservedCPUs := CPUSet("0-7")
+ isolatedCPUs := CPUSet("0-15")
+ profile.Spec.CPU.Reserved = &reservedCPUs
+ profile.Spec.CPU.Isolated = &isolatedCPUs
+ errors := profile.validateCPUs()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error when reserved and isolation CPUs have overlap")
+ Expect(errors[0].Error()).To(ContainSubstring("reserved and isolated cpus overlap"))
+ })
+ })
+
+ Describe("Label selectors validation", func() {
+ It("should have 0 or 1 MachineConfigLabels", func() {
+ errors := profile.validateSelectors()
+ Expect(errors).To(BeEmpty(), "should not have validation errors when the profile has only 1 MachineConfigSelector")
+
+ profile.Spec.MachineConfigLabel["foo"] = "bar"
+ errors = profile.validateSelectors()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error when the profile has two machine config selectors")
+ Expect(errors[0].Error()).To(ContainSubstring("you should provide only 1 MachineConfigLabel"))
+
+ profile.Spec.MachineConfigLabel = nil
+ setValidNodeSelector(profile)
+
+ errors = profile.validateSelectors()
+ Expect(profile.validateSelectors()).To(BeEmpty(), "should not have validation errors when machine config selector nil")
+ })
+
+ It("should should have 0 or 1 MachineConfigPoolSelector labels", func() {
+ errors := profile.validateSelectors()
+ Expect(errors).To(BeEmpty(), "should not have validation errors when the profile has only 1 MachineConfigPoolSelector")
+
+ profile.Spec.MachineConfigPoolSelector["foo"] = "bar"
+ errors = profile.validateSelectors()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error when the profile has two machine config pool selectors")
+ Expect(errors[0].Error()).To(ContainSubstring("you should provide only 1 MachineConfigPoolSelector"))
+
+ profile.Spec.MachineConfigPoolSelector = nil
+ setValidNodeSelector(profile)
+
+ errors = profile.validateSelectors()
+ Expect(profile.validateSelectors()).To(BeEmpty(), "should not have validation errors when machine config pool selector nil")
+ })
+
+ It("should have sensible NodeSelector in case MachineConfigLabel or MachineConfigPoolSelector is empty", func() {
+ profile.Spec.MachineConfigLabel = nil
+ errors := profile.validateSelectors()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error with invalid NodeSelector")
+ Expect(errors[0].Error()).To(ContainSubstring("invalid NodeSelector label key that can't be split into domain/role"))
+
+ setValidNodeSelector(profile)
+ errors = profile.validateSelectors()
+ Expect(errors).To(BeEmpty(), "should not have validation errors when the node selector has correct format")
+ })
+ })
+
+ Describe("Hugepages validation", func() {
+ It("should reject on incorrect default hugepages size", func() {
+ incorrectDefaultSize := HugePageSize("!#@")
+ profile.Spec.HugePages.DefaultHugePagesSize = &incorrectDefaultSize
+
+ errors := profile.validateHugePages()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error when default huge pages size has invalid value")
+ Expect(errors[0].Error()).To(ContainSubstring("hugepages default size should be equal"))
+ })
+
+ It("should reject hugepages allocation with unexpected page size", func() {
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{
+ Count: 128,
+ Node: pointer.Int32Ptr(0),
+ Size: "14M",
+ })
+ errors := profile.validateHugePages()
+ Expect(errors).NotTo(BeEmpty(), "should have validation error when page with invalid format presents")
+ Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("the page size should be equal to %q or %q", hugepagesSize1G, hugepagesSize2M)))
+ })
+
+ When("pages have duplication", func() {
+ Context("with specified NUMA node", func() {
+ It("should raise the validation error", func() {
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{
+ Count: 128,
+ Size: hugepagesSize1G,
+ Node: pointer.Int32Ptr(0),
+ })
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{
+ Count: 64,
+ Size: hugepagesSize1G,
+ Node: pointer.Int32Ptr(0),
+ })
+ errors := profile.validateHugePages()
+ Expect(errors).NotTo(BeEmpty())
+ Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("the page with the size %q and with specified NUMA node 0, has duplication", hugepagesSize1G)))
+ })
+ })
+
+ Context("without specified NUMA node", func() {
+ It("should raise the validation error", func() {
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{
+ Count: 128,
+ Size: hugepagesSize1G,
+ })
+ errors := profile.validateHugePages()
+ Expect(errors).NotTo(BeEmpty())
+ Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("the page with the size %q and without the specified NUMA node, has duplication", hugepagesSize1G)))
+ })
+ })
+
+ Context("with not sequentially duplication blocks", func() {
+ It("should raise the validation error", func() {
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{
+ Count: 128,
+ Size: hugepagesSize2M,
+ })
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{
+ Count: 128,
+ Size: hugepagesSize1G,
+ })
+ errors := profile.validateHugePages()
+ Expect(errors).NotTo(BeEmpty())
+ Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("the page with the size %q and without the specified NUMA node, has duplication", hugepagesSize1G)))
+ })
+ })
+ })
+ })
+
+ Describe("Net validation", func() {
+ Context("with properly populated fields", func() {
+ It("should have net fields properly populated", func() {
+ errors := profile.validateNet()
+ Expect(errors).To(BeEmpty(), "should not have validation errors with properly populated net devices fields")
+ })
+ })
+ Context("with misconfigured fields", func() {
+ It("should raise the validation syntax errors", func() {
+ invalidVendor := "123"
+ invalidDevice := "0x12345"
+ profile.Spec.Net.Devices[0].InterfaceName = pointer.StringPtr("")
+ profile.Spec.Net.Devices[0].VendorID = pointer.StringPtr(invalidVendor)
+ profile.Spec.Net.Devices[0].DeviceID = pointer.StringPtr(invalidDevice)
+ errors := profile.validateNet()
+ Expect(len(errors)).To(Equal(3))
+ Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("device name cannot be empty")))
+ Expect(errors[1].Error()).To(ContainSubstring(fmt.Sprintf("device vendor ID %s has an invalid format. Vendor ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", invalidVendor)))
+ Expect(errors[2].Error()).To(ContainSubstring(fmt.Sprintf("device model ID %s has an invalid format. Model ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", invalidDevice)))
+
+ })
+ It("should raise the validation errors for missing fields", func() {
+ profile.Spec.Net.Devices[0].VendorID = nil
+ profile.Spec.Net.Devices[0].DeviceID = pointer.StringPtr("0x1")
+ errors := profile.validateNet()
+ Expect(errors).NotTo(BeEmpty())
+ Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("device model ID can not be used without specifying the device vendor ID.")))
+ })
+ })
+ })
+})
+
+func setValidNodeSelector(profile *PerformanceProfile) {
+ selector := make(map[string]string)
+ selector["fooDomain/"+NodeSelectorRole] = ""
+ profile.Spec.NodeSelector = selector
+}
diff --git a/pkg/apis/pao/v2/performanceprofile_webhook.go b/pkg/apis/pao/v2/performanceprofile_webhook.go
new file mode 100644
index 000000000..3f6ebba34
--- /dev/null
+++ b/pkg/apis/pao/v2/performanceprofile_webhook.go
@@ -0,0 +1,23 @@
+package v2
+
+import (
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
+)
+
+var _ webhook.Validator = &PerformanceProfile{}
+
+// we need this variable only because our validate methods should have access to the client
+var validatorClient client.Client
+
+// SetupWebhookWithManager enables Webhooks - needed for version conversion
+func (r *PerformanceProfile) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ if validatorClient == nil {
+ validatorClient = mgr.GetClient()
+ }
+
+ return ctrl.NewWebhookManagedBy(mgr).
+ For(r).
+ Complete()
+}
diff --git a/pkg/apis/pao/v2/zz_generated.deepcopy.go b/pkg/apis/pao/v2/zz_generated.deepcopy.go
new file mode 100644
index 000000000..79b55102e
--- /dev/null
+++ b/pkg/apis/pao/v2/zz_generated.deepcopy.go
@@ -0,0 +1,363 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "github.com/openshift/custom-resource-status/conditions/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CPU) DeepCopyInto(out *CPU) {
+ *out = *in
+ if in.Reserved != nil {
+ in, out := &in.Reserved, &out.Reserved
+ *out = new(CPUSet)
+ **out = **in
+ }
+ if in.Isolated != nil {
+ in, out := &in.Isolated, &out.Isolated
+ *out = new(CPUSet)
+ **out = **in
+ }
+ if in.BalanceIsolated != nil {
+ in, out := &in.BalanceIsolated, &out.BalanceIsolated
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU.
+func (in *CPU) DeepCopy() *CPU {
+ if in == nil {
+ return nil
+ }
+ out := new(CPU)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Device) DeepCopyInto(out *Device) {
+ *out = *in
+ if in.InterfaceName != nil {
+ in, out := &in.InterfaceName, &out.InterfaceName
+ *out = new(string)
+ **out = **in
+ }
+ if in.VendorID != nil {
+ in, out := &in.VendorID, &out.VendorID
+ *out = new(string)
+ **out = **in
+ }
+ if in.DeviceID != nil {
+ in, out := &in.DeviceID, &out.DeviceID
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device.
+func (in *Device) DeepCopy() *Device {
+ if in == nil {
+ return nil
+ }
+ out := new(Device)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HugePage) DeepCopyInto(out *HugePage) {
+ *out = *in
+ if in.Node != nil {
+ in, out := &in.Node, &out.Node
+ *out = new(int32)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePage.
+func (in *HugePage) DeepCopy() *HugePage {
+ if in == nil {
+ return nil
+ }
+ out := new(HugePage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HugePages) DeepCopyInto(out *HugePages) {
+ *out = *in
+ if in.DefaultHugePagesSize != nil {
+ in, out := &in.DefaultHugePagesSize, &out.DefaultHugePagesSize
+ *out = new(HugePageSize)
+ **out = **in
+ }
+ if in.Pages != nil {
+ in, out := &in.Pages, &out.Pages
+ *out = make([]HugePage, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePages.
+func (in *HugePages) DeepCopy() *HugePages {
+ if in == nil {
+ return nil
+ }
+ out := new(HugePages)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NUMA) DeepCopyInto(out *NUMA) {
+ *out = *in
+ if in.TopologyPolicy != nil {
+ in, out := &in.TopologyPolicy, &out.TopologyPolicy
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMA.
+func (in *NUMA) DeepCopy() *NUMA {
+ if in == nil {
+ return nil
+ }
+ out := new(NUMA)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Net) DeepCopyInto(out *Net) {
+ *out = *in
+ if in.UserLevelNetworking != nil {
+ in, out := &in.UserLevelNetworking, &out.UserLevelNetworking
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Devices != nil {
+ in, out := &in.Devices, &out.Devices
+ *out = make([]Device, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Net.
+func (in *Net) DeepCopy() *Net {
+ if in == nil {
+ return nil
+ }
+ out := new(Net)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfile) DeepCopyInto(out *PerformanceProfile) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfile.
+func (in *PerformanceProfile) DeepCopy() *PerformanceProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PerformanceProfile) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfileList) DeepCopyInto(out *PerformanceProfileList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PerformanceProfile, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileList.
+func (in *PerformanceProfileList) DeepCopy() *PerformanceProfileList {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfileList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PerformanceProfileList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfileSpec) DeepCopyInto(out *PerformanceProfileSpec) {
+ *out = *in
+ if in.CPU != nil {
+ in, out := &in.CPU, &out.CPU
+ *out = new(CPU)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.HugePages != nil {
+ in, out := &in.HugePages, &out.HugePages
+ *out = new(HugePages)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MachineConfigLabel != nil {
+ in, out := &in.MachineConfigLabel, &out.MachineConfigLabel
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.MachineConfigPoolSelector != nil {
+ in, out := &in.MachineConfigPoolSelector, &out.MachineConfigPoolSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.RealTimeKernel != nil {
+ in, out := &in.RealTimeKernel, &out.RealTimeKernel
+ *out = new(RealTimeKernel)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AdditionalKernelArgs != nil {
+ in, out := &in.AdditionalKernelArgs, &out.AdditionalKernelArgs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NUMA != nil {
+ in, out := &in.NUMA, &out.NUMA
+ *out = new(NUMA)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Net != nil {
+ in, out := &in.Net, &out.Net
+ *out = new(Net)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GloballyDisableIrqLoadBalancing != nil {
+ in, out := &in.GloballyDisableIrqLoadBalancing, &out.GloballyDisableIrqLoadBalancing
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileSpec.
+func (in *PerformanceProfileSpec) DeepCopy() *PerformanceProfileSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfileSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerformanceProfileStatus) DeepCopyInto(out *PerformanceProfileStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Tuned != nil {
+ in, out := &in.Tuned, &out.Tuned
+ *out = new(string)
+ **out = **in
+ }
+ if in.RuntimeClass != nil {
+ in, out := &in.RuntimeClass, &out.RuntimeClass
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileStatus.
+func (in *PerformanceProfileStatus) DeepCopy() *PerformanceProfileStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PerformanceProfileStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RealTimeKernel) DeepCopyInto(out *RealTimeKernel) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new(bool)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealTimeKernel.
+func (in *RealTimeKernel) DeepCopy() *RealTimeKernel {
+ if in == nil {
+ return nil
+ }
+ out := new(RealTimeKernel)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/pao/cmd/render/render.go b/pkg/pao/cmd/render/render.go
new file mode 100644
index 000000000..2aceeb849
--- /dev/null
+++ b/pkg/pao/cmd/render/render.go
@@ -0,0 +1,165 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package render
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/ghodss/yaml"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/manifestset"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/klog"
+)
+
+type renderOpts struct {
+ performanceProfileInputFiles performanceProfileFiles
+ assetsInDir string
+ assetsOutDir string
+}
+
+type performanceProfileFiles []string
+
+func (ppf *performanceProfileFiles) String() string {
+ return fmt.Sprint(*ppf)
+}
+
+func (ppf *performanceProfileFiles) Type() string {
+ return "performanceProfileFiles"
+}
+
+// Set parse performance-profile-input-files flag and store it in ppf
+func (ppf *performanceProfileFiles) Set(value string) error {
+ if len(*ppf) > 0 {
+ return errors.New("performance-profile-input-files flag already set")
+ }
+
+ for _, s := range strings.Split(value, ",") {
+ *ppf = append(*ppf, s)
+ }
+ return nil
+}
+
+//NewRenderCommand creates a render command.
+func NewRenderCommand() *cobra.Command {
+ renderOpts := renderOpts{}
+
+ cmd := &cobra.Command{
+ Use: "render",
+ Short: "Render performance-addon-operator manifests",
+ Run: func(cmd *cobra.Command, args []string) {
+
+ if err := renderOpts.Validate(); err != nil {
+ klog.Fatal(err)
+ }
+
+ if err := renderOpts.Run(); err != nil {
+ klog.Fatal(err)
+ }
+ },
+ }
+
+ renderOpts.AddFlags(cmd.Flags())
+
+ return cmd
+}
+
+func (r *renderOpts) AddFlags(fs *pflag.FlagSet) {
+ fs.Var(&r.performanceProfileInputFiles, "performance-profile-input-files", "A comma-separated list of performance-profile manifests.")
+ fs.StringVar(&r.assetsInDir, "asset-input-dir", components.AssetsDir, "Input path for the assets directory.")
+ fs.StringVar(&r.assetsOutDir, "asset-output-dir", r.assetsOutDir, "Output path for the rendered manifests.")
+ // environment variables has precedence over standard input
+ r.readFlagsFromEnv()
+}
+
+func (r *renderOpts) readFlagsFromEnv() {
+ if ppInFiles := os.Getenv("PERFORMANCE_PROFILE_INPUT_FILES"); len(ppInFiles) > 0 {
+ r.performanceProfileInputFiles.Set(ppInFiles)
+ }
+
+ if assetInDir := os.Getenv("ASSET_INPUT_DIR"); len(assetInDir) > 0 {
+ r.assetsInDir = assetInDir
+ }
+
+ if assetsOutDir := os.Getenv("ASSET_OUTPUT_DIR"); len(assetsOutDir) > 0 {
+ r.assetsOutDir = assetsOutDir
+ }
+}
+
+func (r *renderOpts) Validate() error {
+ if len(r.performanceProfileInputFiles) == 0 {
+ return fmt.Errorf("performance-profile-input-files must be specified")
+ }
+
+ if len(r.assetsOutDir) == 0 {
+ return fmt.Errorf("asset-output-dir must be specified")
+ }
+
+ return nil
+}
+
+func (r *renderOpts) Run() error {
+ for _, pp := range r.performanceProfileInputFiles {
+ b, err := ioutil.ReadFile(pp)
+ if err != nil {
+ return err
+ }
+
+ profile := &performancev2.PerformanceProfile{}
+ err = yaml.Unmarshal(b, profile)
+ if err != nil {
+ return err
+ }
+
+ components, err := manifestset.GetNewComponents(profile, nil)
+ if err != nil {
+ return err
+ }
+ or := []v1.OwnerReference{
+ {
+ Kind: profile.Kind,
+ Name: profile.Name,
+ },
+ }
+
+ for _, componentObj := range components.ToObjects() {
+ componentObj.SetOwnerReferences(or)
+ }
+
+ for kind, manifest := range components.ToManifestTable() {
+ b, err := yaml.Marshal(manifest)
+ if err != nil {
+ return err
+ }
+
+ fileName := fmt.Sprintf("%s_%s.yaml", profile.Name, strings.ToLower(kind))
+ err = ioutil.WriteFile(filepath.Join(r.assetsOutDir, fileName), b, 0644)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/pao/controller/performanceprofile/components/consts.go b/pkg/pao/controller/performanceprofile/components/consts.go
new file mode 100644
index 000000000..9318baf14
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/consts.go
@@ -0,0 +1,31 @@
+package components
+
+const (
+ // AssetsDir defines the directory with assets under the operator image
+ AssetsDir = "/assets"
+)
+
+const (
+ // ComponentNamePrefix defines the worker role for performance sensitive workflows
+ // TODO: change it back to longer name once https://bugzilla.redhat.com/show_bug.cgi?id=1787907 fixed
+ // ComponentNamePrefix = "worker-performance"
+ ComponentNamePrefix = "performance"
+ // MachineConfigRoleLabelKey is the label key to use as label and in MachineConfigSelector of MCP which targets the performance profile
+ MachineConfigRoleLabelKey = "machineconfiguration.openshift.io/role"
+ // NodeRoleLabelPrefix is the prefix for the role label of a node
+ NodeRoleLabelPrefix = "node-role.kubernetes.io/"
+)
+
+const (
+ // NamespaceNodeTuningOperator defines the tuned profiles namespace
+ NamespaceNodeTuningOperator = "openshift-cluster-node-tuning-operator"
+ // ProfileNamePerformance defines the performance tuned profile name
+ ProfileNamePerformance = "openshift-node-performance"
+)
+
+const (
+ // HugepagesSize2M contains the size of 2M hugepages
+ HugepagesSize2M = "2M"
+ // HugepagesSize1G contains the size of 1G hugepages
+ HugepagesSize1G = "1G"
+)
diff --git a/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig.go b/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig.go
new file mode 100644
index 000000000..45d20d24e
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig.go
@@ -0,0 +1,167 @@
+package kubeletconfig
+
+import (
+ "encoding/json"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+)
+
+const (
+ // experimentalKubeletSnippetAnnotation contains the annotation key that should be used to provide a KubeletConfig snippet with additional
+ // configurations you want to apply on top of the generated KubeletConfig resource.
+ // To find the specific argument see https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/.
+ // By default, the performance-addon-operator will override:
+ // 1. CPU manager policy
+ // 2. CPU manager reconcile period
+ // 3. Topology manager policy
+ // 4. Reserved CPUs
+ // 5. Memory manager policy
+ // Please avoid specifying them and use the relevant API to configure these parameters.
+ experimentalKubeletSnippetAnnotation = "kubeletconfig.experimental"
+ cpuManagerPolicyStatic = "static"
+ cpuManagerPolicyOptionFullPCPUsOnly = "full-pcpus-only"
+ memoryManagerPolicyStatic = "Static"
+ defaultKubeReservedMemory = "500Mi"
+ defaultSystemReservedMemory = "500Mi"
+ defaultHardEvictionThreshold = "100Mi"
+ evictionHardMemoryAvailable = "memory.available"
+)
+
+// New returns new KubeletConfig object for performance sensetive workflows
+func New(profile *performancev2.PerformanceProfile, profileMCPLabels map[string]string) (*machineconfigv1.KubeletConfig, error) {
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ kubeletConfig := &kubeletconfigv1beta1.KubeletConfiguration{}
+ if v, ok := profile.Annotations[experimentalKubeletSnippetAnnotation]; ok {
+ if err := json.Unmarshal([]byte(v), kubeletConfig); err != nil {
+ return nil, err
+ }
+ }
+
+ kubeletConfig.TypeMeta = metav1.TypeMeta{
+ APIVersion: kubeletconfigv1beta1.SchemeGroupVersion.String(),
+ Kind: "KubeletConfiguration",
+ }
+
+ kubeletConfig.CPUManagerPolicy = cpuManagerPolicyStatic
+ kubeletConfig.CPUManagerReconcilePeriod = metav1.Duration{Duration: 5 * time.Second}
+ kubeletConfig.TopologyManagerPolicy = kubeletconfigv1beta1.BestEffortTopologyManagerPolicy
+
+ // set the default hard eviction memory threshold
+ if kubeletConfig.EvictionHard == nil {
+ kubeletConfig.EvictionHard = map[string]string{}
+ }
+ if _, ok := kubeletConfig.EvictionHard[evictionHardMemoryAvailable]; !ok {
+ kubeletConfig.EvictionHard[evictionHardMemoryAvailable] = defaultHardEvictionThreshold
+ }
+
+ // set the default memory kube-reserved
+ if kubeletConfig.KubeReserved == nil {
+ kubeletConfig.KubeReserved = map[string]string{}
+ }
+ if _, ok := kubeletConfig.KubeReserved[string(corev1.ResourceMemory)]; !ok {
+ kubeletConfig.KubeReserved[string(corev1.ResourceMemory)] = defaultKubeReservedMemory
+ }
+
+ // set the default memory system-reserved
+ if kubeletConfig.SystemReserved == nil {
+ kubeletConfig.SystemReserved = map[string]string{}
+ }
+ if _, ok := kubeletConfig.SystemReserved[string(corev1.ResourceMemory)]; !ok {
+ kubeletConfig.SystemReserved[string(corev1.ResourceMemory)] = defaultSystemReservedMemory
+ }
+
+ if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
+ kubeletConfig.ReservedSystemCPUs = string(*profile.Spec.CPU.Reserved)
+ }
+
+ if profile.Spec.NUMA != nil {
+ if profile.Spec.NUMA.TopologyPolicy != nil {
+ topologyPolicy := *profile.Spec.NUMA.TopologyPolicy
+ kubeletConfig.TopologyManagerPolicy = topologyPolicy
+
+ // set the memory manager policy to static only when the topology policy is
+ // restricted or single NUMA node
+ if topologyPolicy == kubeletconfigv1beta1.RestrictedTopologyManagerPolicy ||
+ topologyPolicy == kubeletconfigv1beta1.SingleNumaNodeTopologyManagerPolicy {
+ kubeletConfig.MemoryManagerPolicy = memoryManagerPolicyStatic
+
+ if kubeletConfig.ReservedMemory == nil {
+ reservedMemory := resource.NewQuantity(0, resource.DecimalSI)
+ if err := addStringToQuantity(reservedMemory, kubeletConfig.KubeReserved[string(corev1.ResourceMemory)]); err != nil {
+ return nil, err
+ }
+ if err := addStringToQuantity(reservedMemory, kubeletConfig.SystemReserved[string(corev1.ResourceMemory)]); err != nil {
+ return nil, err
+ }
+ if err := addStringToQuantity(reservedMemory, kubeletConfig.EvictionHard[evictionHardMemoryAvailable]); err != nil {
+ return nil, err
+ }
+
+ kubeletConfig.ReservedMemory = []kubeletconfigv1beta1.MemoryReservation{
+ {
+ // the NUMA node 0 is the only safe choice for non NUMA machines
+ // in the future we can extend our API to get this information from a user
+ NumaNode: 0,
+ Limits: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceMemory: *reservedMemory,
+ },
+ },
+ }
+ }
+
+ // require full physical CPUs only to ensure maximum isolation
+ if topologyPolicy == kubeletconfigv1beta1.SingleNumaNodeTopologyManagerPolicy {
+ if kubeletConfig.CPUManagerPolicyOptions == nil {
+ kubeletConfig.CPUManagerPolicyOptions = make(map[string]string)
+ }
+
+ if _, ok := kubeletConfig.CPUManagerPolicyOptions[cpuManagerPolicyOptionFullPCPUsOnly]; !ok {
+ kubeletConfig.CPUManagerPolicyOptions[cpuManagerPolicyOptionFullPCPUsOnly] = "true"
+ }
+ }
+ }
+ }
+ }
+
+ raw, err := json.Marshal(kubeletConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ return &machineconfigv1.KubeletConfig{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: machineconfigv1.GroupVersion.String(),
+ Kind: "KubeletConfig",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ Spec: machineconfigv1.KubeletConfigSpec{
+ MachineConfigPoolSelector: &metav1.LabelSelector{
+ MatchLabels: profileMCPLabels,
+ },
+ KubeletConfig: &runtime.RawExtension{
+ Raw: raw,
+ },
+ },
+ }, nil
+}
+
+func addStringToQuantity(q *resource.Quantity, value string) error {
+ v, err := resource.ParseQuantity(value)
+ if err != nil {
+ return err
+ }
+ q.Add(v)
+
+ return nil
+}
diff --git a/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_suite_test.go b/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_suite_test.go
new file mode 100644
index 000000000..39830753d
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_suite_test.go
@@ -0,0 +1,13 @@
+package kubeletconfig
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestKubeletConfig(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Kubelet Config Suite")
+}
diff --git a/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go b/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go
new file mode 100644
index 000000000..df519651d
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go
@@ -0,0 +1,169 @@
+package kubeletconfig
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
+ "k8s.io/utils/pointer"
+
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing"
+)
+
+const testReservedMemory = `reservedMemory:
+ - limits:
+ memory: 1100Mi
+ numaNode: 0`
+
+var _ = Describe("Kubelet Config", func() {
+ It("should generate yaml with expected parameters", func() {
+ profile := testutils.NewPerformanceProfile("test")
+ selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := New(profile, map[string]string{selectorKey: selectorValue})
+ Expect(err).ToNot(HaveOccurred())
+
+ y, err := yaml.Marshal(kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest := string(y)
+
+ Expect(manifest).To(ContainSubstring(fmt.Sprintf("%s: %s", selectorKey, selectorValue)))
+ Expect(manifest).To(ContainSubstring("reservedSystemCPUs: 0-3"))
+ Expect(manifest).To(ContainSubstring("topologyManagerPolicy: single-numa-node"))
+ Expect(manifest).To(ContainSubstring("cpuManagerPolicy: static"))
+ Expect(manifest).To(ContainSubstring("memoryManagerPolicy: Static"))
+ Expect(manifest).To(ContainSubstring("cpuManagerPolicyOptions"))
+ Expect(manifest).To(ContainSubstring(testReservedMemory))
+ })
+
+ Context("with topology manager restricted policy", func() {
+ It("should have the memory manager related parameters", func() {
+ profile := testutils.NewPerformanceProfile("test")
+ profile.Spec.NUMA.TopologyPolicy = pointer.String(kubeletconfigv1beta1.RestrictedTopologyManagerPolicy)
+ selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := New(profile, map[string]string{selectorKey: selectorValue})
+ Expect(err).ToNot(HaveOccurred())
+
+ y, err := yaml.Marshal(kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest := string(y)
+ Expect(manifest).To(ContainSubstring("memoryManagerPolicy: Static"))
+ Expect(manifest).To(ContainSubstring(testReservedMemory))
+ })
+
+ It("should not have the cpumanager policy options set", func() {
+ profile := testutils.NewPerformanceProfile("test")
+ profile.Spec.NUMA.TopologyPolicy = pointer.String(kubeletconfigv1beta1.RestrictedTopologyManagerPolicy)
+ selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := New(profile, map[string]string{selectorKey: selectorValue})
+ Expect(err).ToNot(HaveOccurred())
+
+ y, err := yaml.Marshal(kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest := string(y)
+ Expect(manifest).ToNot(ContainSubstring("cpuManagerPolicyOptions"))
+ })
+
+ })
+
+ Context("with topology manager best-effort policy", func() {
+ It("should not have the memory manager related parameters", func() {
+ profile := testutils.NewPerformanceProfile("test")
+ profile.Spec.NUMA.TopologyPolicy = pointer.String(kubeletconfigv1beta1.BestEffortTopologyManagerPolicy)
+ selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := New(profile, map[string]string{selectorKey: selectorValue})
+ Expect(err).ToNot(HaveOccurred())
+
+ y, err := yaml.Marshal(kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest := string(y)
+ Expect(manifest).ToNot(ContainSubstring("memoryManagerPolicy: Static"))
+ Expect(manifest).ToNot(ContainSubstring(testReservedMemory))
+ })
+ })
+
+ Context("with additional kubelet arguments", func() {
+ It("should not override CPU manager parameters", func() {
+ profile := testutils.NewPerformanceProfile("test")
+ profile.Annotations = map[string]string{
+ experimentalKubeletSnippetAnnotation: `{"cpuManagerPolicy": "none", "cpuManagerReconcilePeriod": "10s", "reservedSystemCPUs": "4,5"}`,
+ }
+ selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := New(profile, map[string]string{selectorKey: selectorValue})
+ y, err := yaml.Marshal(kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest := string(y)
+ Expect(manifest).ToNot(ContainSubstring("cpuManagerPolicy: none"))
+ Expect(manifest).ToNot(ContainSubstring("cpuManagerReconcilePeriod: 10s"))
+ Expect(manifest).ToNot(ContainSubstring("reservedSystemCPUs: 4-5"))
+ })
+
+ It("should not override topology manager parameters", func() {
+ profile := testutils.NewPerformanceProfile("test")
+ profile.Annotations = map[string]string{
+ experimentalKubeletSnippetAnnotation: `{"topologyManagerPolicy": "none"}`,
+ }
+ selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := New(profile, map[string]string{selectorKey: selectorValue})
+ y, err := yaml.Marshal(kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest := string(y)
+ Expect(manifest).ToNot(ContainSubstring("topologyManagerPolicy: none"))
+ })
+
+ It("should not override memory manager policy", func() {
+ profile := testutils.NewPerformanceProfile("test")
+
+ profile.Annotations = map[string]string{
+ experimentalKubeletSnippetAnnotation: `{"memoryManagerPolicy": "None", "reservedMemory": [{"numaNode": 10, "limits": {"test": "1024"}}]}`,
+ }
+ selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := New(profile, map[string]string{selectorKey: selectorValue})
+ y, err := yaml.Marshal(kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest := string(y)
+ Expect(manifest).ToNot(ContainSubstring("memoryManagerPolicy: None"))
+ Expect(manifest).To(ContainSubstring("numaNode: 10"))
+ })
+
+ It("should set the kubelet config accordingly", func() {
+ profile := testutils.NewPerformanceProfile("test")
+ profile.Annotations = map[string]string{
+ experimentalKubeletSnippetAnnotation: `{"allowedUnsafeSysctls": ["net.core.somaxconn"], "evictionHard": {"memory.available": "200Mi"}}`,
+ }
+ selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := New(profile, map[string]string{selectorKey: selectorValue})
+ y, err := yaml.Marshal(kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest := string(y)
+ Expect(manifest).To(ContainSubstring("net.core.somaxconn"))
+ Expect(manifest).To(ContainSubstring("memory.available: 200Mi"))
+ })
+
+ It("should allow to override the cpumanager policy options and update the kubelet config accordingly", func() {
+ profile := testutils.NewPerformanceProfile("test")
+ profile.Annotations = map[string]string{
+ experimentalKubeletSnippetAnnotation: `{"allowedUnsafeSysctls": ["net.core.somaxconn"], "cpuManagerPolicyOptions": {"full-pcpus-only": "false"}}`,
+ }
+ selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := New(profile, map[string]string{selectorKey: selectorValue})
+ data, err := yaml.Marshal(kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest := string(data)
+ Expect(manifest).To(ContainSubstring("net.core.somaxconn"))
+ Expect(manifest).To(ContainSubstring(`full-pcpus-only: "false"`))
+ })
+
+ })
+})
diff --git a/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig.go b/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig.go
new file mode 100644
index 000000000..f4b2ef079
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig.go
@@ -0,0 +1,354 @@
+package machineconfig
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "text/template"
+
+ assets "github.com/openshift/cluster-node-tuning-operator/assets/pao"
+
+ "github.com/coreos/go-systemd/unit"
+ igntypes "github.com/coreos/ignition/v2/config/v3_2/types"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/pointer"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ profilecomponent "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile"
+ machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+)
+
+const (
+ defaultIgnitionVersion = "3.2.0"
+ defaultIgnitionContentSource = "data:text/plain;charset=utf-8;base64"
+)
+
+const (
+ // MCKernelRT is the value of the kernel setting in MachineConfig for the RT kernel
+ MCKernelRT = "realtime"
+ // MCKernelDefault is the value of the kernel setting in MachineConfig for the default kernel
+ MCKernelDefault = "default"
+ // HighPerformanceRuntime contains the name of the high-performance runtime
+ HighPerformanceRuntime = "high-performance"
+
+ bashScriptsDir = "/usr/local/bin"
+ crioConfd = "/etc/crio/crio.conf.d"
+ crioRuntimesConfig = "99-runtimes.conf"
+ // OCIHooksConfigDir is the default directory for the OCI hooks
+ OCIHooksConfigDir = "/etc/containers/oci/hooks.d"
+ // OCIHooksConfig file contains the low latency hooks configuration
+ OCIHooksConfig = "99-low-latency-hooks.json"
+ ociTemplateRPSMask = "RPSMask"
+ udevRulesDir = "/etc/udev/rules.d"
+ udevRpsRules = "99-netdev-rps.rules"
+ // scripts
+ hugepagesAllocation = "hugepages-allocation"
+ ociHooks = "low-latency-hooks"
+ setRPSMask = "set-rps-mask"
+)
+
+const (
+ systemdSectionUnit = "Unit"
+ systemdSectionService = "Service"
+ systemdSectionInstall = "Install"
+ systemdDescription = "Description"
+ systemdBefore = "Before"
+ systemdEnvironment = "Environment"
+ systemdType = "Type"
+ systemdRemainAfterExit = "RemainAfterExit"
+ systemdExecStart = "ExecStart"
+ systemdWantedBy = "WantedBy"
+)
+
+const (
+ systemdServiceKubelet = "kubelet.service"
+ systemdServiceTypeOneshot = "oneshot"
+ systemdTargetMultiUser = "multi-user.target"
+ systemdTrue = "true"
+)
+
+const (
+ environmentHugepagesSize = "HUGEPAGES_SIZE"
+ environmentHugepagesCount = "HUGEPAGES_COUNT"
+ environmentNUMANode = "NUMA_NODE"
+)
+
+const (
+ templateReservedCpus = "ReservedCpus"
+)
+
+// New returns new machine configuration object for performance sensitive workloads
+func New(profile *performancev2.PerformanceProfile) (*machineconfigv1.MachineConfig, error) {
+ name := GetMachineConfigName(profile)
+ mc := &machineconfigv1.MachineConfig{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: machineconfigv1.GroupVersion.String(),
+ Kind: "MachineConfig",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Labels: profilecomponent.GetMachineConfigLabel(profile),
+ },
+ Spec: machineconfigv1.MachineConfigSpec{},
+ }
+
+ ignitionConfig, err := getIgnitionConfig(profile)
+ if err != nil {
+ return nil, err
+ }
+
+ rawIgnition, err := json.Marshal(ignitionConfig)
+ if err != nil {
+ return nil, err
+ }
+ mc.Spec.Config = runtime.RawExtension{Raw: rawIgnition}
+
+ enableRTKernel := profile.Spec.RealTimeKernel != nil &&
+ profile.Spec.RealTimeKernel.Enabled != nil &&
+ *profile.Spec.RealTimeKernel.Enabled
+
+ if enableRTKernel {
+ mc.Spec.KernelType = MCKernelRT
+ } else {
+ mc.Spec.KernelType = MCKernelDefault
+ }
+
+ return mc, nil
+}
+
+// GetMachineConfigName generates machine config name from the performance profile
+func GetMachineConfigName(profile *performancev2.PerformanceProfile) string {
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ return fmt.Sprintf("50-%s", name)
+}
+
+func getIgnitionConfig(profile *performancev2.PerformanceProfile) (*igntypes.Config, error) {
+ ignitionConfig := &igntypes.Config{
+ Ignition: igntypes.Ignition{
+ Version: defaultIgnitionVersion,
+ },
+ Storage: igntypes.Storage{
+ Files: []igntypes.File{},
+ },
+ }
+
+ // add script files under the node /usr/local/bin directory
+ mode := 0700
+ for _, script := range []string{hugepagesAllocation, ociHooks, setRPSMask} {
+ dst := getBashScriptPath(script)
+ content, err := assets.Scripts.ReadFile(fmt.Sprintf("scripts/%s.sh", script))
+ if err != nil {
+ return nil, err
+ }
+ addContent(ignitionConfig, content, dst, &mode)
+ }
+
+ // add crio config snippet under the node /etc/crio/crio.conf.d/ directory
+ crioConfdRuntimesMode := 0644
+ crioConfigSnippetContent, err := renderCrioConfigSnippet(profile, filepath.Join("configs", crioRuntimesConfig))
+ if err != nil {
+ return nil, err
+ }
+ crioConfSnippetDst := filepath.Join(crioConfd, crioRuntimesConfig)
+ addContent(ignitionConfig, crioConfigSnippetContent, crioConfSnippetDst, &crioConfdRuntimesMode)
+
+ // add crio hooks config under the node cri-o hook directory
+ crioHooksConfigsMode := 0644
+ ociHooksConfigContent, err := GetOCIHooksConfigContent(OCIHooksConfig, profile)
+ if err != nil {
+ return nil, err
+ }
+ ociHookConfigDst := filepath.Join(OCIHooksConfigDir, OCIHooksConfig)
+ addContent(ignitionConfig, ociHooksConfigContent, ociHookConfigDst, &crioHooksConfigsMode)
+
+ // add rps udev rule
+ rpsRulesMode := 0644
+ rpsRulesContent, err := assets.Configs.ReadFile(filepath.Join("configs", udevRpsRules))
+ if err != nil {
+ return nil, err
+ }
+ rpsRulesDst := filepath.Join(udevRulesDir, udevRpsRules)
+ addContent(ignitionConfig, rpsRulesContent, rpsRulesDst, &rpsRulesMode)
+
+ if profile.Spec.HugePages != nil {
+ for _, page := range profile.Spec.HugePages.Pages {
+ // we already allocated non NUMA specific hugepages via kernel arguments
+ if page.Node == nil {
+ continue
+ }
+
+ hugepagesSize, err := GetHugepagesSizeKilobytes(page.Size)
+ if err != nil {
+ return nil, err
+ }
+
+ hugepagesService, err := getSystemdContent(getHugepagesAllocationUnitOptions(
+ hugepagesSize,
+ page.Count,
+ *page.Node,
+ ))
+ if err != nil {
+ return nil, err
+ }
+
+ ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
+ Contents: &hugepagesService,
+ Enabled: pointer.BoolPtr(true),
+ Name: getSystemdService(fmt.Sprintf("%s-%skB-NUMA%d", hugepagesAllocation, hugepagesSize, *page.Node)),
+ })
+ }
+ }
+
+ if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
+ rpsMask, err := components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
+ if err != nil {
+ return nil, err
+ }
+
+ rpsService, err := getSystemdContent(getRPSUnitOptions(rpsMask))
+ if err != nil {
+ return nil, err
+ }
+
+ ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{
+ Contents: &rpsService,
+ Name: getSystemdService("update-rps@"),
+ })
+ }
+
+ return ignitionConfig, nil
+}
+
+func getBashScriptPath(scriptName string) string {
+ return fmt.Sprintf("%s/%s.sh", bashScriptsDir, scriptName)
+}
+
+func getSystemdEnvironment(key string, value string) string {
+ return fmt.Sprintf("%s=%s", key, value)
+}
+
+func getSystemdService(serviceName string) string {
+ return fmt.Sprintf("%s.service", serviceName)
+}
+
+func getSystemdContent(options []*unit.UnitOption) (string, error) {
+ outReader := unit.Serialize(options)
+ outBytes, err := ioutil.ReadAll(outReader)
+ if err != nil {
+ return "", err
+ }
+ return string(outBytes), nil
+}
+
+// GetOCIHooksConfigContent reads and returns the content of the OCI hook file
+func GetOCIHooksConfigContent(configFile string, profile *performancev2.PerformanceProfile) ([]byte, error) {
+ ociHookConfigTemplate, err := template.ParseFS(assets.Configs, filepath.Join("configs", configFile))
+ if err != nil {
+ return nil, err
+ }
+
+ rpsMask := "0" // RPS disabled
+ if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil {
+ rpsMask, err = components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ outContent := &bytes.Buffer{}
+ templateArgs := map[string]string{ociTemplateRPSMask: rpsMask}
+ if err := ociHookConfigTemplate.Execute(outContent, templateArgs); err != nil {
+ return nil, err
+ }
+
+ return outContent.Bytes(), nil
+}
+
+// GetHugepagesSizeKilobytes retruns hugepages size in kilobytes
+func GetHugepagesSizeKilobytes(hugepagesSize performancev2.HugePageSize) (string, error) {
+ switch hugepagesSize {
+ case "1G":
+ return "1048576", nil
+ case "2M":
+ return "2048", nil
+ default:
+ return "", fmt.Errorf("can not convert size %q to kilobytes", hugepagesSize)
+ }
+}
+
+func getHugepagesAllocationUnitOptions(hugepagesSize string, hugepagesCount int32, numaNode int32) []*unit.UnitOption {
+ return []*unit.UnitOption{
+ // [Unit]
+ // Description
+ unit.NewUnitOption(systemdSectionUnit, systemdDescription, fmt.Sprintf("Hugepages-%skB allocation on the node %d", hugepagesSize, numaNode)),
+ // Before
+ unit.NewUnitOption(systemdSectionUnit, systemdBefore, systemdServiceKubelet),
+ // [Service]
+ // Environment
+ unit.NewUnitOption(systemdSectionService, systemdEnvironment, getSystemdEnvironment(environmentHugepagesCount, fmt.Sprint(hugepagesCount))),
+ unit.NewUnitOption(systemdSectionService, systemdEnvironment, getSystemdEnvironment(environmentHugepagesSize, hugepagesSize)),
+ unit.NewUnitOption(systemdSectionService, systemdEnvironment, getSystemdEnvironment(environmentNUMANode, fmt.Sprint(numaNode))),
+ // Type
+ unit.NewUnitOption(systemdSectionService, systemdType, systemdServiceTypeOneshot),
+ // RemainAfterExit
+ unit.NewUnitOption(systemdSectionService, systemdRemainAfterExit, systemdTrue),
+ // ExecStart
+ unit.NewUnitOption(systemdSectionService, systemdExecStart, getBashScriptPath(hugepagesAllocation)),
+ // [Install]
+ // WantedBy
+ unit.NewUnitOption(systemdSectionInstall, systemdWantedBy, systemdTargetMultiUser),
+ }
+}
+
+func getRPSUnitOptions(rpsMask string) []*unit.UnitOption {
+ cmd := fmt.Sprintf("%s %%i %s", getBashScriptPath(setRPSMask), rpsMask)
+ return []*unit.UnitOption{
+ // [Unit]
+ // Description
+ unit.NewUnitOption(systemdSectionUnit, systemdDescription, "Sets network devices RPS mask"),
+ // [Service]
+ // Type
+ unit.NewUnitOption(systemdSectionService, systemdType, systemdServiceTypeOneshot),
+ // ExecStart
+ unit.NewUnitOption(systemdSectionService, systemdExecStart, cmd),
+ }
+}
+
+func addContent(ignitionConfig *igntypes.Config, content []byte, dst string, mode *int) {
+ contentBase64 := base64.StdEncoding.EncodeToString(content)
+ ignitionConfig.Storage.Files = append(ignitionConfig.Storage.Files, igntypes.File{
+ Node: igntypes.Node{
+ Path: dst,
+ },
+ FileEmbedded1: igntypes.FileEmbedded1{
+ Contents: igntypes.Resource{
+ Source: pointer.StringPtr(fmt.Sprintf("%s,%s", defaultIgnitionContentSource, contentBase64)),
+ },
+ Mode: mode,
+ },
+ })
+}
+
+func renderCrioConfigSnippet(profile *performancev2.PerformanceProfile, src string) ([]byte, error) {
+ templateArgs := make(map[string]string)
+ if profile.Spec.CPU.Reserved != nil {
+ templateArgs[templateReservedCpus] = string(*profile.Spec.CPU.Reserved)
+ }
+
+ profileTemplate, err := template.ParseFS(assets.Configs, src)
+ if err != nil {
+ return nil, err
+ }
+
+ crioConfig := &bytes.Buffer{}
+ if err := profileTemplate.Execute(crioConfig, templateArgs); err != nil {
+ return nil, err
+ }
+
+ return crioConfig.Bytes(), nil
+}
diff --git a/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_suite_test.go b/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_suite_test.go
new file mode 100644
index 000000000..84ce7ad85
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_suite_test.go
@@ -0,0 +1,13 @@
+package machineconfig
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestMachineConfig(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Machine Config Suite")
+}
diff --git a/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_test.go b/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_test.go
new file mode 100644
index 000000000..e88e1df92
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_test.go
@@ -0,0 +1,77 @@
+package machineconfig
+
+import (
+ "fmt"
+
+ "k8s.io/utils/pointer"
+
+ "github.com/ghodss/yaml"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing"
+)
+
+const hugepagesAllocationService = `
+ - contents: |
+ [Unit]
+ Description=Hugepages-1048576kB allocation on the node 0
+ Before=kubelet.service
+
+ [Service]
+ Environment=HUGEPAGES_COUNT=4
+ Environment=HUGEPAGES_SIZE=1048576
+ Environment=NUMA_NODE=0
+ Type=oneshot
+ RemainAfterExit=true
+ ExecStart=/usr/local/bin/hugepages-allocation.sh
+
+ [Install]
+ WantedBy=multi-user.target
+ enabled: true
+ name: hugepages-allocation-1048576kB-NUMA0.service
+`
+
+var _ = Describe("Machine Config", func() {
+
+ Context("machine config creation ", func() {
+ It("should create machine config with valid assets", func() {
+ profile := testutils.NewPerformanceProfile("test")
+ profile.Spec.HugePages.Pages[0].Node = pointer.Int32Ptr(0)
+
+ _, err := New(profile)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ })
+
+ Context("with hugepages with specified NUMA node", func() {
+ var manifest string
+
+ BeforeEach(func() {
+ profile := testutils.NewPerformanceProfile("test")
+ profile.Spec.HugePages.Pages[0].Node = pointer.Int32Ptr(0)
+
+ labelKey, labelValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigLabel)
+ mc, err := New(profile)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(mc.Spec.KernelType).To(Equal(MCKernelRT))
+
+ y, err := yaml.Marshal(mc)
+ Expect(err).ToNot(HaveOccurred())
+
+ manifest = string(y)
+ Expect(manifest).To(ContainSubstring(fmt.Sprintf("%s: %s", labelKey, labelValue)))
+ })
+
+ It("should not add hugepages kernel boot parameters", func() {
+ Expect(manifest).ToNot(ContainSubstring("- hugepagesz=1G"))
+ Expect(manifest).ToNot(ContainSubstring("- hugepages=4"))
+ })
+
+ It("should add systemd unit to allocate hugepages", func() {
+ Expect(manifest).To(ContainSubstring(hugepagesAllocationService))
+ })
+
+ })
+})
diff --git a/pkg/pao/controller/performanceprofile/components/manifestset/manifestset.go b/pkg/pao/controller/performanceprofile/components/manifestset/manifestset.go
new file mode 100644
index 000000000..e1eab5518
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/manifestset/manifestset.go
@@ -0,0 +1,79 @@
+package manifestset
+
+import (
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/kubeletconfig"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig"
+ profilecomponent "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/runtimeclass"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/tuned"
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+
+ nodev1beta1 "k8s.io/api/node/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ManifestResultSet contains all component's instances that should be created according to performance-profile
+type ManifestResultSet struct {
+ MachineConfig *mcov1.MachineConfig
+ KubeletConfig *mcov1.KubeletConfig
+ Tuned *tunedv1.Tuned
+ RuntimeClass *nodev1beta1.RuntimeClass
+}
+
+// ManifestTable is map with Kind name as key and component's instance as value
+type ManifestTable map[string]interface{}
+
+// ToObjects return a list of all manifests converted to objects
+func (ms *ManifestResultSet) ToObjects() []metav1.Object {
+ objs := make([]metav1.Object, 0)
+
+ objs = append(objs,
+ ms.MachineConfig.GetObjectMeta(),
+ ms.KubeletConfig.GetObjectMeta(),
+ ms.Tuned.GetObjectMeta(),
+ ms.RuntimeClass.GetObjectMeta(),
+ )
+ return objs
+}
+
+// ToManifestTable return a map with Kind name as key and component's instance as value
+func (ms *ManifestResultSet) ToManifestTable() ManifestTable {
+ manifests := make(map[string]interface{}, 0)
+ manifests[ms.MachineConfig.Kind] = ms.MachineConfig
+ manifests[ms.KubeletConfig.Kind] = ms.KubeletConfig
+ manifests[ms.Tuned.Kind] = ms.Tuned
+ manifests[ms.RuntimeClass.Kind] = ms.RuntimeClass
+ return manifests
+}
+
+// GetNewComponents return a list of all component's instances that should be created according to profile
+func GetNewComponents(profile *performancev2.PerformanceProfile, profileMCP *mcov1.MachineConfigPool) (*ManifestResultSet, error) {
+ machineConfigPoolSelector := profilecomponent.GetMachineConfigPoolSelector(profile, profileMCP)
+
+ mc, err := machineconfig.New(profile)
+ if err != nil {
+ return nil, err
+ }
+
+ kc, err := kubeletconfig.New(profile, machineConfigPoolSelector)
+ if err != nil {
+ return nil, err
+ }
+
+ performanceTuned, err := tuned.NewNodePerformance(profile)
+ if err != nil {
+ return nil, err
+ }
+
+ runtimeClass := runtimeclass.New(profile, machineconfig.HighPerformanceRuntime)
+
+ manifestResultSet := ManifestResultSet{
+ MachineConfig: mc,
+ KubeletConfig: kc,
+ Tuned: performanceTuned,
+ RuntimeClass: runtimeClass,
+ }
+ return &manifestResultSet, nil
+}
diff --git a/pkg/pao/controller/performanceprofile/components/profile/profile.go b/pkg/pao/controller/performanceprofile/components/profile/profile.go
new file mode 100644
index 000000000..7c2d1134b
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/profile/profile.go
@@ -0,0 +1,57 @@
+package profile
+
+import (
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+)
+
+// GetMachineConfigPoolSelector returns the MachineConfigPoolSelector from the CR or a default value calculated based on NodeSelector
+func GetMachineConfigPoolSelector(profile *performancev2.PerformanceProfile, profileMCP *mcov1.MachineConfigPool) map[string]string {
+ // we do not really need profile.spec.machineConfigPoolSelector anymore, but we should use it for backward compatability
+ if profile.Spec.MachineConfigPoolSelector != nil {
+ return profile.Spec.MachineConfigPoolSelector
+ }
+
+ if profileMCP != nil {
+ return profileMCP.Labels
+ }
+
+ // we still need to construct the machineConfigPoolSelector when the command called from the render command
+ return getDefaultLabel(profile)
+}
+
+// GetMachineConfigLabel returns the MachineConfigLabels from the CR or a default value calculated based on NodeSelector
+func GetMachineConfigLabel(profile *performancev2.PerformanceProfile) map[string]string {
+ if profile.Spec.MachineConfigLabel != nil {
+ return profile.Spec.MachineConfigLabel
+ }
+
+ return getDefaultLabel(profile)
+}
+
+func getDefaultLabel(profile *performancev2.PerformanceProfile) map[string]string {
+ nodeSelectorKey, _ := components.GetFirstKeyAndValue(profile.Spec.NodeSelector)
+ // no error handling needed, it's validated already
+ _, nodeRole, _ := components.SplitLabelKey(nodeSelectorKey)
+
+ labels := make(map[string]string)
+ labels[components.MachineConfigRoleLabelKey] = nodeRole
+
+ return labels
+}
+
+// IsPaused returns whether or not a performance profile's reconcile loop is paused
+func IsPaused(profile *performancev2.PerformanceProfile) bool {
+ if profile.Annotations == nil {
+ return false
+ }
+
+ isPaused, ok := profile.Annotations[performancev2.PerformanceProfilePauseAnnotation]
+ if ok && isPaused == "true" {
+ return true
+ }
+
+ return false
+}
diff --git a/pkg/pao/controller/performanceprofile/components/profile/profile_suite_test.go b/pkg/pao/controller/performanceprofile/components/profile/profile_suite_test.go
new file mode 100644
index 000000000..06be7f26b
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/profile/profile_suite_test.go
@@ -0,0 +1,13 @@
+package profile
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestProfile(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Profile Suite")
+}
diff --git a/pkg/pao/controller/performanceprofile/components/profile/profile_test.go b/pkg/pao/controller/performanceprofile/components/profile/profile_test.go
new file mode 100644
index 000000000..b83639813
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/profile/profile_test.go
@@ -0,0 +1,67 @@
+package profile
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+
+ testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing"
+)
+
+const (
+ NodeSelectorRole = "barRole"
+)
+
+var _ = Describe("PerformanceProfile", func() {
+ var profile *performancev2.PerformanceProfile
+
+ BeforeEach(func() {
+ profile = testutils.NewPerformanceProfile("test")
+ })
+
+ Describe("Defaulting", func() {
+ It("should return given MachineConfigLabel", func() {
+ labels := GetMachineConfigLabel(profile)
+ k, v := components.GetFirstKeyAndValue(labels)
+ Expect(k).To(Equal(testutils.MachineConfigLabelKey))
+ Expect(v).To(Equal(testutils.MachineConfigLabelValue))
+
+ })
+
+ It("should return given MachineConfigPoolSelector", func() {
+ labels := GetMachineConfigPoolSelector(profile, nil)
+ k, v := components.GetFirstKeyAndValue(labels)
+ Expect(k).To(Equal(testutils.MachineConfigPoolLabelKey))
+ Expect(v).To(Equal(testutils.MachineConfigPoolLabelValue))
+ })
+
+ It("should return default MachineConfigLabels", func() {
+ profile.Spec.MachineConfigLabel = nil
+ setValidNodeSelector(profile)
+
+ labels := GetMachineConfigLabel(profile)
+ k, v := components.GetFirstKeyAndValue(labels)
+ Expect(k).To(Equal(components.MachineConfigRoleLabelKey))
+ Expect(v).To(Equal(NodeSelectorRole))
+
+ })
+
+ It("should return default MachineConfigPoolSelector", func() {
+ profile.Spec.MachineConfigPoolSelector = nil
+ setValidNodeSelector(profile)
+
+ labels := GetMachineConfigPoolSelector(profile, nil)
+ k, v := components.GetFirstKeyAndValue(labels)
+ Expect(k).To(Equal(components.MachineConfigRoleLabelKey))
+ Expect(v).To(Equal(NodeSelectorRole))
+
+ })
+ })
+})
+
+func setValidNodeSelector(profile *performancev2.PerformanceProfile) {
+ selector := make(map[string]string)
+ selector["fooDomain/"+NodeSelectorRole] = ""
+ profile.Spec.NodeSelector = selector
+}
diff --git a/pkg/pao/controller/performanceprofile/components/runtimeclass/runtimeclass.go b/pkg/pao/controller/performanceprofile/components/runtimeclass/runtimeclass.go
new file mode 100644
index 000000000..dcac37736
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/runtimeclass/runtimeclass.go
@@ -0,0 +1,27 @@
+package runtimeclass
+
+import (
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+
+ nodev1beta1 "k8s.io/api/node/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// New returns a new RuntimeClass object
+func New(profile *performancev2.PerformanceProfile, handler string) *nodev1beta1.RuntimeClass {
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ return &nodev1beta1.RuntimeClass{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "RuntimeClass",
+ APIVersion: "node.k8s.io/v1beta1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ Handler: handler,
+ Scheduling: &nodev1beta1.Scheduling{
+ NodeSelector: profile.Spec.NodeSelector,
+ },
+ }
+}
diff --git a/pkg/pao/controller/performanceprofile/components/tuned/tuned.go b/pkg/pao/controller/performanceprofile/components/tuned/tuned.go
new file mode 100644
index 000000000..5268949ab
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/tuned/tuned.go
@@ -0,0 +1,204 @@
+package tuned
+
+import (
+ "bytes"
+ "fmt"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/template"
+
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+ "k8s.io/utils/pointer"
+
+ assets "github.com/openshift/cluster-node-tuning-operator/assets/pao"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ profilecomponent "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile"
+)
+
+const (
+ cmdlineDelimiter = " "
+ templateIsolatedCpus = "IsolatedCpus"
+ templateStaticIsolation = "StaticIsolation"
+ templateDefaultHugepagesSize = "DefaultHugepagesSize"
+ templateHugepages = "Hugepages"
+ templateAdditionalArgs = "AdditionalArgs"
+ templateGloballyDisableIrqLoadBalancing = "GloballyDisableIrqLoadBalancing"
+ templateNetDevices = "NetDevices"
+ nfConntrackHashsize = "nf_conntrack_hashsize=131072"
+)
+
+func new(name string, profiles []tunedv1.TunedProfile, recommends []tunedv1.TunedRecommend) *tunedv1.Tuned {
+ return &tunedv1.Tuned{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: tunedv1.SchemeGroupVersion.String(),
+ Kind: "Tuned",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: components.NamespaceNodeTuningOperator,
+ },
+ Spec: tunedv1.TunedSpec{
+ Profile: profiles,
+ Recommend: recommends,
+ },
+ }
+}
+
+// NewNodePerformance returns tuned profile for performance sensitive workflows
+func NewNodePerformance(profile *performancev2.PerformanceProfile) (*tunedv1.Tuned, error) {
+ templateArgs := make(map[string]string)
+
+ if profile.Spec.CPU.Isolated != nil {
+ templateArgs[templateIsolatedCpus] = string(*profile.Spec.CPU.Isolated)
+ if profile.Spec.CPU.BalanceIsolated != nil && *profile.Spec.CPU.BalanceIsolated == false {
+ templateArgs[templateStaticIsolation] = strconv.FormatBool(true)
+ }
+ }
+
+ if profile.Spec.HugePages != nil {
+ var defaultHugepageSize performancev2.HugePageSize
+ if profile.Spec.HugePages.DefaultHugePagesSize != nil {
+ defaultHugepageSize = *profile.Spec.HugePages.DefaultHugePagesSize
+ templateArgs[templateDefaultHugepagesSize] = string(defaultHugepageSize)
+ }
+
+ var is2MHugepagesRequested *bool
+ var hugepages []string
+ for _, page := range profile.Spec.HugePages.Pages {
+ // we can not allocate huge pages on the specific NUMA node via kernel boot arguments
+ if page.Node != nil {
+ // a user requested to allocate 2M huge pages on the specific NUMA node,
+ // append dummy kernel arguments
+ if page.Size == components.HugepagesSize2M && is2MHugepagesRequested == nil {
+ is2MHugepagesRequested = pointer.BoolPtr(true)
+ }
+ continue
+ }
+
+ // a user requested to allocated 2M huge pages without specifying the node
+ // we need to append 2M hugepages kernel arguments anyway, no need to add dummy
+ // kernel arguments
+ if page.Size == components.HugepagesSize2M {
+ is2MHugepagesRequested = pointer.BoolPtr(false)
+ }
+
+ hugepages = append(hugepages, fmt.Sprintf("hugepagesz=%s", string(page.Size)))
+ hugepages = append(hugepages, fmt.Sprintf("hugepages=%d", page.Count))
+ }
+
+ // append dummy 2M huge pages kernel arguments to guarantee that the kernel will create 2M related files
+ // and directories under the filesystem
+ if is2MHugepagesRequested != nil && *is2MHugepagesRequested {
+ if defaultHugepageSize == components.HugepagesSize1G {
+ hugepages = append(hugepages, fmt.Sprintf("hugepagesz=%s", components.HugepagesSize2M))
+ hugepages = append(hugepages, fmt.Sprintf("hugepages=%d", 0))
+ }
+ }
+
+ hugepagesArgs := strings.Join(hugepages, cmdlineDelimiter)
+ templateArgs[templateHugepages] = hugepagesArgs
+ }
+
+ if profile.Spec.AdditionalKernelArgs != nil {
+ templateArgs[templateAdditionalArgs] = strings.Join(profile.Spec.AdditionalKernelArgs, cmdlineDelimiter)
+ }
+
+ if profile.Spec.GloballyDisableIrqLoadBalancing != nil &&
+ *profile.Spec.GloballyDisableIrqLoadBalancing == true {
+ templateArgs[templateGloballyDisableIrqLoadBalancing] = strconv.FormatBool(true)
+ }
+
+ //set default [net] field first, override if needed.
+ templateArgs[templateNetDevices] = fmt.Sprintf("[net]\n%s", nfConntrackHashsize)
+ if profile.Spec.Net != nil && *profile.Spec.Net.UserLevelNetworking && profile.Spec.CPU.Reserved != nil {
+
+ reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved))
+ if err != nil {
+ return nil, err
+ }
+ reserveCPUcount := reservedSet.Size()
+
+ var devices []string
+ var tunedNetDevicesOutput []string
+ netPluginSequence := 0
+ netPluginString := ""
+
+ for _, device := range profile.Spec.Net.Devices {
+ devices = make([]string, 0)
+ if device.DeviceID != nil {
+ devices = append(devices, "^ID_MODEL_ID="+*device.DeviceID)
+ }
+ if device.VendorID != nil {
+ devices = append(devices, "^ID_VENDOR_ID="+*device.VendorID)
+ }
+ if device.InterfaceName != nil {
+ deviceNameAmendedRegex := strings.Replace(*device.InterfaceName, "*", ".*", -1)
+ if strings.HasPrefix(*device.InterfaceName, "!") {
+ devices = append(devices, "^INTERFACE="+"(?!"+deviceNameAmendedRegex+")")
+ } else {
+ devices = append(devices, "^INTERFACE="+deviceNameAmendedRegex)
+ }
+ }
+ // Final regex format can be one of the following formats:
+ // devicesUdevRegex = ^INTERFACE=InterfaceName' (InterfaceName can also hold .* representing * wildcard)
+ // devicesUdevRegex = ^INTERFACE(?!InterfaceName)' (InterfaceName can starting with ?! represents ! wildcard)
+ // devicesUdevRegex = ^ID_VENDOR_ID=VendorID'
+ // devicesUdevRegex = ^ID_MODEL_ID=DeviceID[\s\S]*^ID_VENDOR_ID=VendorID'
+ // devicesUdevRegex = ^ID_MODEL_ID=DeviceID[\s\S]*^ID_VENDOR_ID=VendorID[\s\S]*^INTERFACE=InterfaceName'
+ // devicesUdevRegex = ^ID_MODEL_ID=DeviceID[\s\S]*^ID_VENDOR_ID=VendorID[\s\S]*^INTERFACE=(?!InterfaceName)'
+ // Important note: The order of the key must be preserved - INTERFACE, ID_MODEL_ID, ID_VENDOR_ID (in that order)
+ devicesUdevRegex := strings.Join(devices, `[\s\S]*`)
+ if netPluginSequence > 0 {
+ netPluginString = "_" + strconv.Itoa(netPluginSequence)
+ }
+ tunedNetDevicesOutput = append(tunedNetDevicesOutput, fmt.Sprintf("\n[net%s]\ntype=net\ndevices_udev_regex=%s\nchannels=combined %d\n%s", netPluginString, devicesUdevRegex, reserveCPUcount, nfConntrackHashsize))
+ netPluginSequence++
+ }
+ //nfConntrackHashsize
+ if len(tunedNetDevicesOutput) == 0 {
+ templateArgs[templateNetDevices] = fmt.Sprintf("[net]\nchannels=combined %d\n%s", reserveCPUcount, nfConntrackHashsize)
+ } else {
+ templateArgs[templateNetDevices] = strings.Join(tunedNetDevicesOutput, "")
+ }
+ }
+
+ profileData, err := getProfileData(filepath.Join("tuned", components.ProfileNamePerformance), templateArgs)
+ if err != nil {
+ return nil, err
+ }
+
+ name := components.GetComponentName(profile.Name, components.ProfileNamePerformance)
+ profiles := []tunedv1.TunedProfile{
+ {
+ Name: &name,
+ Data: &profileData,
+ },
+ }
+
+ priority := uint64(20)
+ recommends := []tunedv1.TunedRecommend{
+ {
+ Profile: &name,
+ Priority: &priority,
+ MachineConfigLabels: profilecomponent.GetMachineConfigLabel(profile),
+ },
+ }
+ return new(name, profiles, recommends), nil
+}
+
+func getProfileData(tunedTemplate string, data interface{}) (string, error) {
+ profileTemplate, err := template.ParseFS(assets.Tuned, tunedTemplate)
+ if err != nil {
+ return "", err
+ }
+
+ profile := &bytes.Buffer{}
+ if err := profileTemplate.Execute(profile, data); err != nil {
+ return "", err
+ }
+ return profile.String(), nil
+}
diff --git a/pkg/pao/controller/performanceprofile/components/tuned/tuned_suite_test.go b/pkg/pao/controller/performanceprofile/components/tuned/tuned_suite_test.go
new file mode 100644
index 000000000..57672cce1
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/tuned/tuned_suite_test.go
@@ -0,0 +1,13 @@
+package tuned
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestTuned(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Tuned Suite")
+}
diff --git a/pkg/pao/controller/performanceprofile/components/tuned/tuned_test.go b/pkg/pao/controller/performanceprofile/components/tuned/tuned_test.go
new file mode 100644
index 000000000..cf202a78d
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/tuned/tuned_test.go
@@ -0,0 +1,305 @@
+package tuned
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/ghodss/yaml"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing"
+
+ cpuset "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+ "k8s.io/utils/pointer"
+)
+
+const expectedMatchSelector = `
+ - machineConfigLabels:
+ mcKey: mcValue
+`
+
+var (
+ cmdlineCPUsPartitioning = regexp.MustCompile(`\s*cmdline_cpu_part=\+\s*nohz=on\s+rcu_nocbs=\${isolated_cores}\s+tuned.non_isolcpus=\${not_isolated_cpumask}\s+intel_pstate=disable\s+nosoftlockup\s*`)
+ cmdlineRealtimeWithCPUBalancing = regexp.MustCompile(`\s*cmdline_realtime=\+\s*tsc=nowatchdog\s+intel_iommu=on\s+iommu=pt\s+isolcpus=managed_irq,\${isolated_cores}\s+systemd.cpu_affinity=\${not_isolated_cores_expanded}\s*`)
+ cmdlineRealtimeWithoutCPUBalancing = regexp.MustCompile(`\s*cmdline_realtime=\+\s*tsc=nowatchdog\s+intel_iommu=on\s+iommu=pt\s+isolcpus=domain,managed_irq,\${isolated_cores}\s+systemd.cpu_affinity=\${not_isolated_cores_expanded}\s*`)
+ cmdlineHugepages = regexp.MustCompile(`\s*cmdline_hugepages=\+\s*default_hugepagesz=1G\s+hugepagesz=1G\s+hugepages=4\s*`)
+ cmdlineAdditionalArg = regexp.MustCompile(`\s*cmdline_additionalArg=\+\s*test1=val1\s+test2=val2\s*`)
+ cmdlineDummy2MHugePages = regexp.MustCompile(`\s*cmdline_hugepages=\+\s*default_hugepagesz=1G\s+hugepagesz=1G\s+hugepages=4\s+hugepagesz=2M\s+hugepages=0\s*`)
+ cmdlineMultipleHugePages = regexp.MustCompile(`\s*cmdline_hugepages=\+\s*default_hugepagesz=1G\s+hugepagesz=1G\s+hugepages=4\s+hugepagesz=2M\s+hugepages=128\s*`)
+)
+
+var additionalArgs = []string{"test1=val1", "test2=val2"}
+
+var _ = Describe("Tuned", func() {
+ var profile *performancev2.PerformanceProfile
+
+ BeforeEach(func() {
+ profile = testutils.NewPerformanceProfile("test")
+ })
+
+ getTunedManifest := func(profile *performancev2.PerformanceProfile) string {
+ tuned, err := NewNodePerformance(profile)
+ Expect(err).ToNot(HaveOccurred())
+ y, err := yaml.Marshal(tuned)
+ Expect(err).ToNot(HaveOccurred())
+ return string(y)
+ }
+
+ Context("with worker performance profile", func() {
+ It("should generate yaml with expected parameters", func() {
+ manifest := getTunedManifest(profile)
+
+ Expect(manifest).To(ContainSubstring(expectedMatchSelector))
+ Expect(manifest).To(ContainSubstring(fmt.Sprintf("isolated_cores=4-7")))
+ By("Populating CPU partitioning cmdline")
+ Expect(cmdlineCPUsPartitioning.MatchString(manifest)).To(BeTrue())
+ By("Populating realtime cmdline")
+ Expect(cmdlineRealtimeWithCPUBalancing.MatchString(manifest)).To(BeTrue())
+ By("Populating hugepages cmdline")
+ Expect(cmdlineHugepages.MatchString(manifest)).To(BeTrue())
+ By("Populating empty additional kernel arguments cmdline")
+ Expect(manifest).To(ContainSubstring("cmdline_additionalArg="))
+
+ })
+
+ It("should generate yaml with expected parameters for Isolated balancing disabled", func() {
+ profile.Spec.CPU.BalanceIsolated = pointer.BoolPtr(false)
+ manifest := getTunedManifest(profile)
+
+ Expect(cmdlineRealtimeWithoutCPUBalancing.MatchString(manifest)).To(BeTrue())
+ })
+
+ It("should generate yaml with expected parameters for additional kernel arguments", func() {
+ profile.Spec.AdditionalKernelArgs = additionalArgs
+ manifest := getTunedManifest(profile)
+
+ Expect(cmdlineAdditionalArg.MatchString(manifest)).To(BeTrue())
+ })
+
+ It("should not allocate hugepages on the specific NUMA node via kernel arguments", func() {
+ manifest := getTunedManifest(profile)
+ Expect(strings.Count(manifest, "hugepagesz=")).Should(BeNumerically("==", 2))
+ Expect(strings.Count(manifest, "hugepages=")).Should(BeNumerically("==", 3))
+
+ profile.Spec.HugePages.Pages[0].Node = pointer.Int32Ptr(1)
+ manifest = getTunedManifest(profile)
+ Expect(strings.Count(manifest, "hugepagesz=")).Should(BeNumerically("==", 1))
+ Expect(strings.Count(manifest, "hugepages=")).Should(BeNumerically("==", 2))
+ })
+
+ Context("with 1G default huge pages", func() {
+ Context("with requested 2M huge pages allocation on the specified node", func() {
+ It("should append the dummy 2M huge pages kernel arguments", func() {
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{
+ Size: components.HugepagesSize2M,
+ Count: 128,
+ Node: pointer.Int32Ptr(0),
+ })
+
+ manifest := getTunedManifest(profile)
+ Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeTrue())
+ })
+ })
+
+ Context("with requested 2M huge pages allocation via kernel arguments", func() {
+ It("should not append the dummy 2M kernel arguments", func() {
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{
+ Size: components.HugepagesSize2M,
+ Count: 128,
+ })
+
+ manifest := getTunedManifest(profile)
+ Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeFalse())
+ Expect(cmdlineMultipleHugePages.MatchString(manifest)).To(BeTrue())
+ })
+ })
+
+ Context("without requested 2M hugepages", func() {
+ It("should not append dummy 2M huge pages kernel arguments", func() {
+ manifest := getTunedManifest(profile)
+ Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeFalse())
+ })
+ })
+
+ Context("with requested 2M huge pages allocation on the specified node and via kernel arguments", func() {
+ It("should not append the dummy 2M kernel arguments", func() {
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{
+ Size: components.HugepagesSize2M,
+ Count: 128,
+ Node: pointer.Int32Ptr(0),
+ })
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{
+ Size: components.HugepagesSize2M,
+ Count: 128,
+ })
+
+ manifest := getTunedManifest(profile)
+ Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeFalse())
+ Expect(cmdlineMultipleHugePages.MatchString(manifest)).To(BeTrue())
+ })
+ })
+ })
+
+ Context("with 2M default huge pages", func() {
+ Context("with requested 2M huge pages allocation on the specified node", func() {
+ It("should not append the dummy 2M huge pages kernel arguments", func() {
+ defaultSize := performancev2.HugePageSize(components.HugepagesSize2M)
+ profile.Spec.HugePages.DefaultHugePagesSize = &defaultSize
+ profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{
+ Size: components.HugepagesSize2M,
+ Count: 128,
+ Node: pointer.Int32Ptr(0),
+ })
+
+ manifest := getTunedManifest(profile)
+ Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeFalse())
+ Expect(cmdlineMultipleHugePages.MatchString(manifest)).To(BeFalse())
+ })
+ })
+ })
+
+ Context("with user level networking enabled", func() {
+ Context("with default net device queues (all devices set)", func() {
+ It("should set the default netqueues count to reserved CPUs count", func() {
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.BoolPtr(true),
+ }
+ manifest := getTunedManifest(profile)
+ reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved))
+ Expect(err).ToNot(HaveOccurred())
+ reserveCPUcount := reservedSet.Size()
+ channelsRegex := regexp.MustCompile(`\s*channels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`)
+ Expect(channelsRegex.MatchString(manifest)).To(BeTrue())
+ })
+ It("should set by interface name with reserved CPUs count", func() {
+ netDeviceName := "eth*"
+ //regex field should be: devices_udev_regex=^INTERFACE=eth.*
+ devicesUdevRegex := "\\^INTERFACE=" + strings.Replace(netDeviceName, "*", "\\.\\*", -1)
+
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.BoolPtr(true),
+ Devices: []performancev2.Device{
+ {
+ InterfaceName: &netDeviceName,
+ },
+ }}
+ manifest := getTunedManifest(profile)
+ reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved))
+ Expect(err).ToNot(HaveOccurred())
+ reserveCPUcount := reservedSet.Size()
+ channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`)
+ Expect(channelsRegex.MatchString(manifest)).To(BeTrue())
+ })
+ It("should set by negative interface name with reserved CPUs count", func() {
+ netDeviceName := "!ens5"
+ //regex field should be: devices_udev_regex=^INTERFACE=(?!ens5)
+ devicesUdevRegex := "\\^INTERFACE=\\(\\?!" + strings.Replace(netDeviceName, "*", "\\.\\*", -1) + "\\)"
+
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.BoolPtr(true),
+ Devices: []performancev2.Device{
+ {
+ InterfaceName: &netDeviceName,
+ },
+ }}
+ manifest := getTunedManifest(profile)
+ reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved))
+ Expect(err).ToNot(HaveOccurred())
+ reserveCPUcount := reservedSet.Size()
+ channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`)
+ Expect(channelsRegex.MatchString(manifest)).To(BeTrue())
+ })
+ It("should set by specific vendor with reserved CPUs count", func() {
+ netDeviceVendorID := "0x1af4"
+ //regex field should be: devices_udev_regex=^ID_VENDOR_ID=0x1af4
+ devicesUdevRegex := "\\^ID_VENDOR_ID=" + netDeviceVendorID
+
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.BoolPtr(true),
+ Devices: []performancev2.Device{
+ {
+ VendorID: &netDeviceVendorID,
+ },
+ }}
+ manifest := getTunedManifest(profile)
+ reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved))
+ Expect(err).ToNot(HaveOccurred())
+ reserveCPUcount := reservedSet.Size()
+ channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`)
+ Expect(channelsRegex.MatchString(manifest)).To(BeTrue())
+ })
+ It("should set by specific vendor and model with reserved CPUs count", func() {
+ netDeviceVendorID := "0x1af4"
+ netDeviceModelID := "0x1000"
+ //regex field should be: devices_udev_regex=^ID_MODEL_ID=0x1000[\s\S]*^ID_VENDOR_ID=0x1af4
+ devicesUdevRegex := `\^ID_MODEL_ID=` + netDeviceModelID + `\[\\\\s\\\\S]\*\^ID_VENDOR_ID=` + netDeviceVendorID
+
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.BoolPtr(true),
+ Devices: []performancev2.Device{
+ {
+ DeviceID: &netDeviceModelID,
+ VendorID: &netDeviceVendorID,
+ },
+ }}
+ manifest := getTunedManifest(profile)
+ reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved))
+ Expect(err).ToNot(HaveOccurred())
+ reserveCPUcount := reservedSet.Size()
+ channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`)
+ Expect(channelsRegex.MatchString(manifest)).To(BeTrue())
+ })
+ It("should set by specific vendor,model and interface name with reserved CPUs count", func() {
+ netDeviceName := "ens5"
+ netDeviceVendorID := "0x1af4"
+ netDeviceModelID := "0x1000"
+ //regex field should be: devices_udev_regex=^ID_MODEL_ID=0x1000[\s\S]*^ID_VENDOR_ID=0x1af4[\s\S]*^INTERFACE=ens5
+ devicesUdevRegex := `\^ID_MODEL_ID=` + netDeviceModelID + `\[\\\\s\\\\S]\*\^ID_VENDOR_ID=` + netDeviceVendorID + `\[\\\\s\\\\S]\*\^INTERFACE=` + netDeviceName
+
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.BoolPtr(true),
+ Devices: []performancev2.Device{
+ {
+ InterfaceName: &netDeviceName,
+ DeviceID: &netDeviceModelID,
+ VendorID: &netDeviceVendorID,
+ },
+ }}
+ manifest := getTunedManifest(profile)
+ reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved))
+ Expect(err).ToNot(HaveOccurred())
+ reserveCPUcount := reservedSet.Size()
+ channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`)
+ Expect(channelsRegex.MatchString(manifest)).To(BeTrue())
+ })
+ It("should set by specific vendor,model and negative interface name with reserved CPUs count", func() {
+ netDeviceName := "!ens5"
+ netDeviceVendorID := "0x1af4"
+ netDeviceModelID := "0x1000"
+ //regex field should be: devices_udev_regex=^ID_MODEL_ID=0x1000[\\s\\S]*^ID_VENDOR_ID=0x1af4[\\s\\S]*^INTERFACE=(?!ens5)
+ devicesUdevRegex := `\^ID_MODEL_ID=` + netDeviceModelID + `\[\\\\s\\\\S]\*\^ID_VENDOR_ID=` + netDeviceVendorID + `\[\\\\s\\\\S]\*\^INTERFACE=\(\?!` + netDeviceName + `\)`
+
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.BoolPtr(true),
+ Devices: []performancev2.Device{
+ {
+ InterfaceName: &netDeviceName,
+ DeviceID: &netDeviceModelID,
+ VendorID: &netDeviceVendorID,
+ },
+ }}
+ manifest := getTunedManifest(profile)
+ reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved))
+ Expect(err).ToNot(HaveOccurred())
+ reserveCPUcount := reservedSet.Size()
+ channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`)
+ Expect(channelsRegex.MatchString(manifest)).To(BeTrue())
+ })
+ })
+ })
+ })
+})
diff --git a/pkg/pao/controller/performanceprofile/components/utils.go b/pkg/pao/controller/performanceprofile/components/utils.go
new file mode 100644
index 000000000..81869b04b
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/utils.go
@@ -0,0 +1,141 @@
+package components
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+ "strconv"
+ "strings"
+
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+)
+
+const bitsInWord = 32
+
+// GetComponentName returns the component name for the specific performance profile
+func GetComponentName(profileName string, prefix string) string {
+ return fmt.Sprintf("%s-%s", prefix, profileName)
+}
+
+// GetFirstKeyAndValue return the first key / value pair of a map
+func GetFirstKeyAndValue(m map[string]string) (string, string) {
+ for k, v := range m {
+ return k, v
+ }
+ return "", ""
+}
+
+// SplitLabelKey returns the given label key splitted up in domain and role
+func SplitLabelKey(s string) (domain, role string, err error) {
+ parts := strings.Split(s, "/")
+ if len(parts) != 2 {
+ return "", "", fmt.Errorf("Can't split %s", s)
+ }
+ return parts[0], parts[1], nil
+}
+
+// CPUListToHexMask converts a list of cpus into a cpu mask represented in hexdecimal
+func CPUListToHexMask(cpulist string) (hexMask string, err error) {
+ cpus, err := cpuset.Parse(cpulist)
+ if err != nil {
+ return "", err
+ }
+
+ reservedCPUs := cpus.ToSlice()
+ currMask := big.NewInt(0)
+ for _, cpu := range reservedCPUs {
+ x := new(big.Int).Lsh(big.NewInt(1), uint(cpu))
+ currMask.Or(currMask, x)
+ }
+ return fmt.Sprintf("%064x", currMask), nil
+}
+
+// CPUListToMaskList converts a list of cpus into a cpu mask represented
+// in a list of hexadecimal mask devided by a delimiter ","
+func CPUListToMaskList(cpulist string) (hexMask string, err error) {
+ maskStr, err := CPUListToHexMask(cpulist)
+ if err != nil {
+ return "", nil
+ }
+ index := 0
+ for index < (len(maskStr) - 8) {
+ if maskStr[index:index+8] != "00000000" {
+ break
+ }
+ index = index + 8
+ }
+ var b bytes.Buffer
+ for index <= (len(maskStr) - 16) {
+ b.WriteString(maskStr[index : index+8])
+ b.WriteString(",")
+ index = index + 8
+ }
+ b.WriteString(maskStr[index : index+8])
+ trimmedCPUMaskList := b.String()
+ return trimmedCPUMaskList, nil
+}
+
+// CPULists allows easy checks between reserved and isolated cpu set definitons
+type CPULists struct {
+ reserved cpuset.CPUSet
+ isolated cpuset.CPUSet
+}
+
+// Intersect returns cpu ids found in both the provided cpuLists, if any
+func (c *CPULists) Intersect() []int {
+ commonSet := c.reserved.Intersection(c.isolated)
+ return commonSet.ToSlice()
+}
+
+// CountIsolated returns how many isolated cpus where specified
+func (c *CPULists) CountIsolated() int {
+ return c.isolated.Size()
+}
+
+// NewCPULists parse text representations of reserved and isolated cpusets definiton and returns a CPULists object
+func NewCPULists(reservedList, isolatedList string) (*CPULists, error) {
+ var err error
+ reserved, err := cpuset.Parse(reservedList)
+ if err != nil {
+ return nil, err
+ }
+ isolated, err := cpuset.Parse(isolatedList)
+ if err != nil {
+ return nil, err
+ }
+ return &CPULists{
+ reserved: reserved,
+ isolated: isolated,
+ }, nil
+}
+
+// CPUMaskToCPUSet parses a CPUSet received in a Mask Format, see:
+// https://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS
+func CPUMaskToCPUSet(cpuMask string) (cpuset.CPUSet, error) {
+ chunks := strings.Split(cpuMask, ",")
+
+ // reverse the chunks order
+ n := len(chunks)
+ for i := 0; i < n/2; i++ {
+ chunks[i], chunks[n-i-1] = chunks[n-i-1], chunks[i]
+ }
+
+ builder := cpuset.NewBuilder()
+ for i, chunk := range chunks {
+ if chunk == "" {
+ return cpuset.NewCPUSet(), fmt.Errorf("malformed CPU mask %q chunk %q", cpuMask, chunk)
+ }
+ mask, err := strconv.ParseUint(chunk, 16, bitsInWord)
+ if err != nil {
+ return cpuset.NewCPUSet(), fmt.Errorf("failed to parse the CPU mask %q: %v", cpuMask, err)
+ }
+ for j := 0; j < bitsInWord; j++ {
+ if mask&1 == 1 {
+ builder.Add(i*bitsInWord + j)
+ }
+ mask >>= 1
+ }
+ }
+
+ return builder.Result(), nil
+}
diff --git a/pkg/pao/controller/performanceprofile/components/utils_suite_test.go b/pkg/pao/controller/performanceprofile/components/utils_suite_test.go
new file mode 100644
index 000000000..5a874af5f
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/utils_suite_test.go
@@ -0,0 +1,13 @@
+package components
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestComponetsUtils(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Tuned Suite")
+}
diff --git a/pkg/pao/controller/performanceprofile/components/utils_test.go b/pkg/pao/controller/performanceprofile/components/utils_test.go
new file mode 100644
index 000000000..a8eceea6c
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile/components/utils_test.go
@@ -0,0 +1,101 @@
+package components
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+)
+
+type listToMask struct {
+ cpuList string
+ cpuMask string
+}
+
+var cpuListToMask = []listToMask{
+ {"0", "00000001"},
+ {"2-3", "0000000c"},
+ {"3,4,53-55,61-63", "e0e00000,00000018"},
+ {"0-127", "ffffffff,ffffffff,ffffffff,ffffffff"},
+ {"0-255", "ffffffff,ffffffff,ffffffff,ffffffff,ffffffff,ffffffff,ffffffff,ffffffff"},
+}
+
+func intersectHelper(cpuListA, cpuListB string) ([]int, error) {
+ cpuLists, err := NewCPULists(cpuListA, cpuListB)
+ if err != nil {
+ return nil, err
+ }
+ return cpuLists.Intersect(), nil
+}
+
+var _ = Describe("Components utils", func() {
+ Context("Convert CPU list to CPU mask", func() {
+ It("should generate a valid CPU mask from CPU list ", func() {
+ for _, cpuEntry := range cpuListToMask {
+ cpuMask, err := CPUListToMaskList(cpuEntry.cpuList)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(cpuMask).Should(Equal(cpuEntry.cpuMask))
+ }
+ })
+ })
+
+ Context("Convert CPU mask to CPU list", func() {
+ It("should generate a valid CPU list from CPU mask ", func() {
+ for _, cpuEntry := range cpuListToMask {
+ cpuSetFromList, err := cpuset.Parse(cpuEntry.cpuList)
+ Expect(err).ToNot(HaveOccurred())
+ cpuSetFromMask, err := CPUMaskToCPUSet(cpuEntry.cpuMask)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(cpuSetFromList).Should(Equal(cpuSetFromMask))
+ }
+ })
+ })
+
+ Context("Check intersections between CPU sets", func() {
+ It("should detect invalid cpulists", func() {
+ var cpuListInvalid = []string{
+ "0-", "-", "-3", ",,", ",2", "-,", "0-1,", "0,1,3,,4",
+ }
+
+ for _, entry := range cpuListInvalid {
+ _, err := intersectHelper(entry, entry)
+ Expect(err).To(HaveOccurred())
+
+ _, err = intersectHelper(entry, "0-3")
+ Expect(err).To(HaveOccurred())
+
+ _, err = intersectHelper("0-3", entry)
+ Expect(err).To(HaveOccurred())
+ }
+ })
+
+ It("should detect cpulist intersections", func() {
+ type cpuListIntersect struct {
+ cpuListA string
+ cpuListB string
+ result []int
+ }
+
+ var cpuListIntersectTestcases = []cpuListIntersect{
+ {"", "0-3", []int{}},
+ {"0-3", "", []int{}},
+ {"0-3", "4-15", []int{}},
+ {"0-3", "8-15", []int{}},
+ {"0-3", "0-15", []int{0, 1, 2, 3}},
+ {"0-3", "3-15", []int{3}},
+ {"3-7", "6-15", []int{6, 7}},
+ }
+
+ for _, entry := range cpuListIntersectTestcases {
+ res, err := intersectHelper(entry.cpuListA, entry.cpuListB)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(len(res)).To(Equal(len(entry.result)))
+ for idx, cpuid := range res {
+ Expect(cpuid).To(Equal(entry.result[idx]))
+ }
+ }
+ })
+ })
+})
diff --git a/pkg/pao/controller/performanceprofile_controller.go b/pkg/pao/controller/performanceprofile_controller.go
new file mode 100644
index 000000000..f4667dda5
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile_controller.go
@@ -0,0 +1,703 @@
+/*
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "time"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/manifestset"
+ profileutil "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile"
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+
+ olmv1 "github.com/operator-framework/api/pkg/operators/v1"
+ olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
+
+ corev1 "k8s.io/api/core/v1"
+ nodev1beta1 "k8s.io/api/node/v1beta1"
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/api/errors"
+ k8serros "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/klog"
+
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/builder"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ "sigs.k8s.io/controller-runtime/pkg/event"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+const finalizer = "foreground-deletion"
+
+// PerformanceProfileReconciler reconciles a PerformanceProfile object
+type PerformanceProfileReconciler struct {
+ client.Client
+ Scheme *runtime.Scheme
+ Recorder record.EventRecorder
+ olmRemoved bool
+}
+
+// SetupWithManager creates a new PerformanceProfile Controller and adds it to the Manager.
+// The Manager will set fields on the Controller and Start it when the Manager is Started.
+func (r *PerformanceProfileReconciler) SetupWithManager(mgr ctrl.Manager) error {
+
+ // we want to initate reconcile loop only on change under labels or spec of the object
+ p := predicate.Funcs{
+ UpdateFunc: func(e event.UpdateEvent) bool {
+ if !validateUpdateEvent(&e) {
+ return false
+ }
+
+ return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() ||
+ !apiequality.Semantic.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels())
+ },
+ }
+
+ kubeletPredicates := predicate.Funcs{
+ UpdateFunc: func(e event.UpdateEvent) bool {
+ if !validateUpdateEvent(&e) {
+ return false
+ }
+
+ kubeletOld := e.ObjectOld.(*mcov1.KubeletConfig)
+ kubeletNew := e.ObjectNew.(*mcov1.KubeletConfig)
+
+ return kubeletOld.GetGeneration() != kubeletNew.GetGeneration() ||
+ !reflect.DeepEqual(kubeletOld.Status.Conditions, kubeletNew.Status.Conditions)
+ },
+ }
+
+ mcpPredicates := predicate.Funcs{
+ UpdateFunc: func(e event.UpdateEvent) bool {
+ if !validateUpdateEvent(&e) {
+ return false
+ }
+
+ mcpOld := e.ObjectOld.(*mcov1.MachineConfigPool)
+ mcpNew := e.ObjectNew.(*mcov1.MachineConfigPool)
+
+ return !reflect.DeepEqual(mcpOld.Status.Conditions, mcpNew.Status.Conditions)
+ },
+ }
+
+ tunedProfilePredicates := predicate.Funcs{
+ UpdateFunc: func(e event.UpdateEvent) bool {
+ if !validateUpdateEvent(&e) {
+ return false
+ }
+
+ tunedProfileOld := e.ObjectOld.(*tunedv1.Profile)
+ tunedProfileNew := e.ObjectNew.(*tunedv1.Profile)
+
+ return !reflect.DeepEqual(tunedProfileOld.Status.Conditions, tunedProfileNew.Status.Conditions)
+ },
+ }
+
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&performancev2.PerformanceProfile{}).
+ Owns(&mcov1.MachineConfig{}, builder.WithPredicates(p)).
+ Owns(&mcov1.KubeletConfig{}, builder.WithPredicates(kubeletPredicates)).
+ Owns(&tunedv1.Tuned{}, builder.WithPredicates(p)).
+ Owns(&nodev1beta1.RuntimeClass{}, builder.WithPredicates(p)).
+ Watches(
+ &source.Kind{Type: &mcov1.MachineConfigPool{}},
+ handler.EnqueueRequestsFromMapFunc(r.mcpToPerformanceProfile),
+ builder.WithPredicates(mcpPredicates)).
+ Watches(
+ &source.Kind{Type: &tunedv1.Profile{}},
+ handler.EnqueueRequestsFromMapFunc(r.tunedProfileToPerformanceProfile),
+ builder.WithPredicates(tunedProfilePredicates),
+ ).
+ Complete(r)
+}
+
+// uninstall PAO OLM operator and all of its artifacts
+// this should apply only from version 4.11
+func (r *PerformanceProfileReconciler) removeOLMOperator() error {
+ paoCSV := "performance-addon-operator.v4.10.0"
+ subscriptions := &olmv1alpha1.SubscriptionList{}
+
+ if err := r.List(context.TODO(), subscriptions); err != nil {
+ if !errors.IsNotFound(err) {
+ return err
+ }
+ }
+
+ for i := range subscriptions.Items {
+ subscription := &subscriptions.Items[i]
+ if subscription.Name == "performance-addon-operator" {
+ klog.Infof("Removing performance-addon-operator subscription %s", subscription.Name)
+ if subscription.Status.CurrentCSV != paoCSV {
+ return fmt.Errorf("Subscription to be removed contains a current CSV version %s which is different from %s", subscription.Status.CurrentCSV, paoCSV)
+ }
+ if err := r.Delete(context.TODO(), subscription); err != nil {
+ return err
+ }
+ }
+ }
+
+ csvs := &olmv1alpha1.ClusterServiceVersionList{}
+ if err := r.List(context.TODO(), csvs); err != nil {
+ if !errors.IsNotFound(err) {
+ return err
+ }
+ }
+
+ for i := range csvs.Items {
+ csv := &csvs.Items[i]
+ if csv.Name == paoCSV {
+ klog.Infof("Removing performance-addon-operator CSV %s", paoCSV)
+ if err := r.Delete(context.TODO(), csv); err != nil {
+ return err
+ }
+ }
+ }
+
+ operatorGroups := &olmv1.OperatorGroupList{}
+ if err := r.List(context.TODO(), operatorGroups); err != nil {
+ if !errors.IsNotFound(err) {
+ return err
+ }
+ }
+
+ for i := range operatorGroups.Items {
+ operatorGroup := &operatorGroups.Items[i]
+ if operatorGroup.Name == "performance-addon-operator" {
+ klog.Infof("Removing performance-addon-operator operator group %s", operatorGroup.Name)
+ if err := r.Delete(context.TODO(), operatorGroup); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *PerformanceProfileReconciler) getCSV(name, namespace string) (*olmv1alpha1.ClusterServiceVersion, error) {
+ csv := &olmv1alpha1.ClusterServiceVersion{}
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: namespace,
+ }
+ err := r.Get(context.TODO(), key, csv)
+ return csv, err
+}
+
+func (r *PerformanceProfileReconciler) mcpToPerformanceProfile(mcpObj client.Object) []reconcile.Request {
+ mcp := &mcov1.MachineConfigPool{}
+
+ key := types.NamespacedName{
+ Namespace: mcpObj.GetNamespace(),
+ Name: mcpObj.GetName(),
+ }
+ if err := r.Get(context.TODO(), key, mcp); err != nil {
+ klog.Errorf("failed to get the machine config pool %+v: %v", key, err)
+ return nil
+ }
+
+ profiles := &performancev2.PerformanceProfileList{}
+ if err := r.List(context.TODO(), profiles); err != nil {
+ klog.Errorf("failed to get performance profiles: %v", err)
+ return nil
+ }
+
+ var requests []reconcile.Request
+ for i, profile := range profiles.Items {
+ profileNodeSelector := labels.Set(profile.Spec.NodeSelector)
+ mcpNodeSelector, err := metav1.LabelSelectorAsSelector(mcp.Spec.NodeSelector)
+ if err != nil {
+ klog.Errorf("failed to parse the selector %v: %v", mcp.Spec.NodeSelector, err)
+ return nil
+ }
+
+ if mcpNodeSelector.Matches(profileNodeSelector) {
+ requests = append(requests, reconcile.Request{NamespacedName: namespacedName(&profiles.Items[i])})
+ }
+ }
+
+ return requests
+}
+
+func (r *PerformanceProfileReconciler) tunedProfileToPerformanceProfile(tunedProfileObj client.Object) []reconcile.Request {
+ node := &corev1.Node{}
+ key := types.NamespacedName{
+ // the tuned profile name is the same as node
+ Name: tunedProfileObj.GetName(),
+ }
+
+ if err := r.Get(context.TODO(), key, node); err != nil {
+ klog.Errorf("failed to get the tuned profile %+v: %v", key, err)
+ return nil
+ }
+
+ profiles := &performancev2.PerformanceProfileList{}
+ if err := r.List(context.TODO(), profiles); err != nil {
+ klog.Errorf("failed to get performance profiles: %v", err)
+ return nil
+ }
+
+ var requests []reconcile.Request
+ for i, profile := range profiles.Items {
+ profileNodeSelector := labels.Set(profile.Spec.NodeSelector)
+ nodeLabels := labels.Set(node.Labels)
+ if profileNodeSelector.AsSelector().Matches(nodeLabels) {
+ requests = append(requests, reconcile.Request{NamespacedName: namespacedName(&profiles.Items[i])})
+ }
+ }
+
+ return requests
+}
+
+func validateUpdateEvent(e *event.UpdateEvent) bool {
+ if e.ObjectOld == nil {
+ klog.Error("Update event has no old runtime object to update")
+ return false
+ }
+ if e.ObjectNew == nil {
+ klog.Error("Update event has no new runtime object for update")
+ return false
+ }
+
+ return true
+}
+
+// +kubebuilder:rbac:groups="",resources=events,verbs=*
+// +kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch
+// +kubebuilder:rbac:groups=performance.openshift.io,resources=performanceprofiles;performanceprofiles/status;performanceprofiles/finalizers,verbs=*
+// +kubebuilder:rbac:groups=machineconfiguration.openshift.io,resources=machineconfigs;machineconfigpools;kubeletconfigs,verbs=*
+// +kubebuilder:rbac:groups=tuned.openshift.io,resources=tuneds;profiles,verbs=*
+// +kubebuilder:rbac:groups=node.k8s.io,resources=runtimeclasses,verbs=*
+// +kubebuilder:rbac:groups=config.openshift.io,resources=infrastructures,verbs=get;list;watch
+// +kubebuilder:rbac:namespace="openshift-cluster-node-tuning-operator",groups=core,resources=pods;services;services/finalizers;configmaps,verbs=*
+// +kubebuilder:rbac:namespace="openshift-cluster-node-tuning-operator",groups=coordination.k8s.io,resources=leases,verbs=create;get;list;update
+// +kubebuilder:rbac:namespace="openshift-cluster-node-tuning-operator",groups=apps,resourceNames=performance-operator,resources=deployments/finalizers,verbs=update
+// +kubebuilder:rbac:namespace="openshift-cluster-node-tuning-operator",groups=monitoring.coreos.com,resources=servicemonitors,verbs=*
+
+// Reconcile reads that state of the cluster for a PerformanceProfile object and makes changes based on the state read
+// and what is in the PerformanceProfile.Spec
+// Note:
+// The Controller will requeue the Request to be processed again if the returned error is non-nil or
+// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
+func (r *PerformanceProfileReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ klog.Info("Reconciling PerformanceProfile")
+
+ // One time operation to uninstall PAO optional operator
+ // This should be deprecated in openshift 4.12
+ if !r.olmRemoved {
+ if err := r.removeOLMOperator(); err != nil {
+ return reconcile.Result{}, err
+ } else {
+ r.olmRemoved = true
+ }
+ }
+
+ // Fetch the PerformanceProfile instance
+ instance := &performancev2.PerformanceProfile{}
+ err := r.Get(ctx, req.NamespacedName, instance)
+ if err != nil {
+ if k8serros.IsNotFound(err) {
+ // Request object not found, could have been deleted after reconcile request.
+ // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
+ // Return and don't requeue
+ return reconcile.Result{}, nil
+ }
+ // Error reading the object - requeue the request.
+ return reconcile.Result{}, err
+ }
+
+ if instance.DeletionTimestamp != nil {
+ // delete components
+ if err := r.deleteComponents(instance); err != nil {
+ klog.Errorf("failed to delete components: %v", err)
+ r.Recorder.Eventf(instance, corev1.EventTypeWarning, "Deletion failed", "Failed to delete components: %v", err)
+ return reconcile.Result{}, err
+ }
+ r.Recorder.Eventf(instance, corev1.EventTypeNormal, "Deletion succeeded", "Succeeded to delete all components")
+
+ if r.isComponentsExist(instance) {
+ return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
+ }
+
+ // remove finalizer
+ if hasFinalizer(instance, finalizer) {
+ removeFinalizer(instance, finalizer)
+ if err := r.Update(ctx, instance); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+ }
+ }
+
+ // add finalizer
+ if !hasFinalizer(instance, finalizer) {
+ instance.Finalizers = append(instance.Finalizers, finalizer)
+ instance.Status.Conditions = r.getProgressingConditions("DeploymentStarting", "Deployment is starting")
+ if err := r.Update(ctx, instance); err != nil {
+ return reconcile.Result{}, err
+ }
+
+ // we exit reconcile loop because we will have additional update reconcile
+ return reconcile.Result{}, nil
+ }
+
+ profileMCP, err := r.getMachineConfigPoolByProfile(instance)
+ if err != nil {
+ conditions := r.getDegradedConditions(conditionFailedToFindMachineConfigPool, err.Error())
+ if err := r.updateStatus(instance, conditions); err != nil {
+ klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err)
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+ }
+
+ if err := validateProfileMachineConfigPool(instance, profileMCP); err != nil {
+ conditions := r.getDegradedConditions(conditionBadMachineConfigLabels, err.Error())
+ if err := r.updateStatus(instance, conditions); err != nil {
+ klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err)
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+ }
+
+ // remove components with the old name after the upgrade
+ if err := r.deleteDeprecatedComponents(instance); err != nil {
+ return ctrl.Result{}, err
+ }
+
+ // apply components
+ result, err := r.applyComponents(instance, profileMCP)
+ if err != nil {
+ klog.Errorf("failed to deploy performance profile %q components: %v", instance.Name, err)
+ r.Recorder.Eventf(instance, corev1.EventTypeWarning, "Creation failed", "Failed to create all components: %v", err)
+ conditions := r.getDegradedConditions(conditionReasonComponentsCreationFailed, err.Error())
+ if err := r.updateStatus(instance, conditions); err != nil {
+ klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err)
+ return reconcile.Result{}, err
+ }
+ return reconcile.Result{}, err
+ }
+
+ // get kubelet false condition
+ conditions, err := r.getKubeletConditionsByProfile(instance)
+ if err != nil {
+ return r.updateDegradedCondition(instance, conditionFailedGettingKubeletStatus, err)
+ }
+
+ // get MCP degraded conditions
+ if conditions == nil {
+ conditions, err = r.getMCPDegradedCondition(profileMCP)
+ if err != nil {
+ return r.updateDegradedCondition(instance, conditionFailedGettingMCPStatus, err)
+ }
+ }
+
+ // get tuned profile degraded conditions
+ if conditions == nil {
+ conditions, err = r.getTunedConditionsByProfile(instance)
+ if err != nil {
+ return r.updateDegradedCondition(instance, conditionFailedGettingTunedProfileStatus, err)
+ }
+ }
+
+ // if conditions were not added due to machine config pool status change then set as available
+ if conditions == nil {
+ conditions = r.getAvailableConditions()
+ }
+
+ if err := r.updateStatus(instance, conditions); err != nil {
+ klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err)
+ // we still want to requeue after some, also in case of error, to avoid chance of multiple reboots
+ if result != nil {
+ return *result, nil
+ }
+
+ return reconcile.Result{}, err
+ }
+
+ if result != nil {
+ return *result, nil
+ }
+
+ return ctrl.Result{}, nil
+}
+
+func (r *PerformanceProfileReconciler) deleteDeprecatedComponents(instance *performancev2.PerformanceProfile) error {
+ // remove the machine config with the deprecated name
+ name := components.GetComponentName(instance.Name, components.ComponentNamePrefix)
+ return r.deleteMachineConfig(name)
+}
+
+func (r *PerformanceProfileReconciler) updateDegradedCondition(instance *performancev2.PerformanceProfile, conditionState string, conditionError error) (ctrl.Result, error) {
+ conditions := r.getDegradedConditions(conditionState, conditionError.Error())
+ if err := r.updateStatus(instance, conditions); err != nil {
+ klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err)
+ return reconcile.Result{}, err
+ }
+ return reconcile.Result{}, conditionError
+}
+
+func (r *PerformanceProfileReconciler) applyComponents(profile *performancev2.PerformanceProfile, profileMCP *mcov1.MachineConfigPool) (*reconcile.Result, error) {
+ if profileutil.IsPaused(profile) {
+ klog.Infof("Ignoring reconcile loop for pause performance profile %s", profile.Name)
+ return nil, nil
+ }
+
+ components, err := manifestset.GetNewComponents(profile, profileMCP)
+ if err != nil {
+ return nil, err
+ }
+ for _, componentObj := range components.ToObjects() {
+ if err := controllerutil.SetControllerReference(profile, componentObj, r.Scheme); err != nil {
+ return nil, err
+ }
+ }
+
+ // get mutated machine config
+ mcMutated, err := r.getMutatedMachineConfig(components.MachineConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ // get mutated kubelet config
+ kcMutated, err := r.getMutatedKubeletConfig(components.KubeletConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ // get mutated performance tuned
+ performanceTunedMutated, err := r.getMutatedTuned(components.Tuned)
+ if err != nil {
+ return nil, err
+ }
+
+ // get mutated RuntimeClass
+ runtimeClassMutated, err := r.getMutatedRuntimeClass(components.RuntimeClass)
+ if err != nil {
+ return nil, err
+ }
+
+ updated := mcMutated != nil ||
+ kcMutated != nil ||
+ performanceTunedMutated != nil ||
+ runtimeClassMutated != nil
+
+ // does not update any resources, if it no changes to relevant objects and just continue to the status update
+ if !updated {
+ return nil, nil
+ }
+
+ if mcMutated != nil {
+ if err := r.createOrUpdateMachineConfig(mcMutated); err != nil {
+ return nil, err
+ }
+ }
+
+ if performanceTunedMutated != nil {
+ if err := r.createOrUpdateTuned(performanceTunedMutated, profile.Name); err != nil {
+ return nil, err
+ }
+ }
+
+ if kcMutated != nil {
+ if err := r.createOrUpdateKubeletConfig(kcMutated); err != nil {
+ return nil, err
+ }
+ }
+
+ if runtimeClassMutated != nil {
+ if err := r.createOrUpdateRuntimeClass(runtimeClassMutated); err != nil {
+ return nil, err
+ }
+ }
+
+ r.Recorder.Eventf(profile, corev1.EventTypeNormal, "Creation succeeded", "Succeeded to create all components")
+ return &reconcile.Result{}, nil
+}
+
+func (r *PerformanceProfileReconciler) deleteComponents(profile *performancev2.PerformanceProfile) error {
+ tunedName := components.GetComponentName(profile.Name, components.ProfileNamePerformance)
+ if err := r.deleteTuned(tunedName, components.NamespaceNodeTuningOperator); err != nil {
+ return err
+ }
+
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ if err := r.deleteKubeletConfig(name); err != nil {
+ return err
+ }
+
+ if err := r.deleteRuntimeClass(name); err != nil {
+ return err
+ }
+
+ if err := r.deleteMachineConfig(machineconfig.GetMachineConfigName(profile)); err != nil {
+ return err
+ }
+
+ return nil
+
+}
+
+func (r *PerformanceProfileReconciler) isComponentsExist(profile *performancev2.PerformanceProfile) bool {
+ tunedName := components.GetComponentName(profile.Name, components.ProfileNamePerformance)
+ if _, err := r.getTuned(tunedName, components.NamespaceNodeTuningOperator); !k8serros.IsNotFound(err) {
+ klog.Infof("Tuned %q custom resource is still exists under the namespace %q", tunedName, components.NamespaceNodeTuningOperator)
+ return true
+ }
+
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ if _, err := r.getKubeletConfig(name); !k8serros.IsNotFound(err) {
+ klog.Infof("Kubelet Config %q exists under the cluster", name)
+ return true
+ }
+
+ if _, err := r.getRuntimeClass(name); !k8serros.IsNotFound(err) {
+ klog.Infof("Runtime class %q exists under the cluster", name)
+ return true
+ }
+
+ if _, err := r.getMachineConfig(machineconfig.GetMachineConfigName(profile)); !k8serros.IsNotFound(err) {
+ klog.Infof("Machine Config %q exists under the cluster", name)
+ return true
+ }
+
+ return false
+}
+
+func hasFinalizer(profile *performancev2.PerformanceProfile, finalizer string) bool {
+ for _, f := range profile.Finalizers {
+ if f == finalizer {
+ return true
+ }
+ }
+ return false
+}
+
+func removeFinalizer(profile *performancev2.PerformanceProfile, finalizer string) {
+ var finalizers []string
+ for _, f := range profile.Finalizers {
+ if f == finalizer {
+ continue
+ }
+ finalizers = append(finalizers, f)
+ }
+ profile.Finalizers = finalizers
+}
+
+func namespacedName(obj metav1.Object) types.NamespacedName {
+ return types.NamespacedName{
+ Namespace: obj.GetNamespace(),
+ Name: obj.GetName(),
+ }
+}
+
+func (r *PerformanceProfileReconciler) getMachineConfigPoolByProfile(profile *performancev2.PerformanceProfile) (*mcov1.MachineConfigPool, error) {
+ nodeSelector := labels.Set(profile.Spec.NodeSelector)
+
+ mcpList := &mcov1.MachineConfigPoolList{}
+ if err := r.Client.List(context.TODO(), mcpList); err != nil {
+ return nil, err
+ }
+
+ filteredMCPList := filterMCPDuplications(mcpList.Items)
+
+ var profileMCPs []*mcov1.MachineConfigPool
+ for i := range filteredMCPList {
+ mcp := &mcpList.Items[i]
+
+ if mcp.Spec.NodeSelector == nil {
+ continue
+ }
+
+ mcpNodeSelector, err := metav1.LabelSelectorAsSelector(mcp.Spec.NodeSelector)
+ if err != nil {
+ return nil, err
+ }
+
+ if mcpNodeSelector.Matches(nodeSelector) {
+ profileMCPs = append(profileMCPs, mcp)
+ }
+ }
+
+ if len(profileMCPs) == 0 {
+ return nil, fmt.Errorf("failed to find MCP with the node selector that matches labels %q", nodeSelector.String())
+ }
+
+ if len(profileMCPs) > 1 {
+ return nil, fmt.Errorf("more than one MCP found that matches performance profile node selector %q", nodeSelector.String())
+ }
+
+ return profileMCPs[0], nil
+}
+
+func filterMCPDuplications(mcps []mcov1.MachineConfigPool) []mcov1.MachineConfigPool {
+ var filtered []mcov1.MachineConfigPool
+ items := map[string]mcov1.MachineConfigPool{}
+ for _, mcp := range mcps {
+ if _, exists := items[mcp.Name]; !exists {
+ items[mcp.Name] = mcp
+ filtered = append(filtered, mcp)
+ }
+ }
+
+ return filtered
+}
+
+func validateProfileMachineConfigPool(profile *performancev2.PerformanceProfile, profileMCP *mcov1.MachineConfigPool) error {
+ if profileMCP.Spec.MachineConfigSelector.Size() == 0 {
+ return fmt.Errorf("the MachineConfigPool %q machineConfigSelector is nil", profileMCP.Name)
+ }
+
+ if len(profileMCP.Labels) == 0 {
+ return fmt.Errorf("the MachineConfigPool %q does not have any labels that can be used to bind it together with KubeletConfing", profileMCP.Name)
+ }
+
+ // we can not guarantee that our generated label for the machine config selector will be the right one
+ // but at least we can validate that the MCP will consume our machine config
+ machineConfigLabels := profileutil.GetMachineConfigLabel(profile)
+ mcpMachineConfigSelector, err := metav1.LabelSelectorAsSelector(profileMCP.Spec.MachineConfigSelector)
+ if err != nil {
+ return err
+ }
+
+ if !mcpMachineConfigSelector.Matches(labels.Set(machineConfigLabels)) {
+ if len(profile.Spec.MachineConfigLabel) > 0 {
+ return fmt.Errorf("the machine config labels %v provided via profile.spec.machineConfigLabel do not match the MachineConfigPool %q machineConfigSelector %q", machineConfigLabels, profileMCP.Name, mcpMachineConfigSelector.String())
+ }
+
+ return fmt.Errorf("the machine config labels %v generated from the profile.spec.nodeSelector %v do not match the MachineConfigPool %q machineConfigSelector %q", machineConfigLabels, profile.Spec.NodeSelector, profileMCP.Name, mcpMachineConfigSelector.String())
+ }
+
+ return nil
+}
diff --git a/pkg/pao/controller/performanceprofile_controller_suite_test.go b/pkg/pao/controller/performanceprofile_controller_suite_test.go
new file mode 100644
index 000000000..d8293d661
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile_controller_suite_test.go
@@ -0,0 +1,30 @@
+package controller
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ configv1 "github.com/openshift/api/config/v1"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ olmv1 "github.com/operator-framework/api/pkg/operators/v1"
+ olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
+
+ "k8s.io/client-go/kubernetes/scheme"
+)
+
+func TestPerformanceProfile(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ // add resources API to default scheme
+ performancev2.AddToScheme(scheme.Scheme)
+ configv1.AddToScheme(scheme.Scheme)
+ mcov1.AddToScheme(scheme.Scheme)
+ tunedv1.AddToScheme(scheme.Scheme)
+ olmv1.AddToScheme(scheme.Scheme)
+ olmv1alpha1.AddToScheme(scheme.Scheme)
+
+ RunSpecs(t, "Performance Profile Suite")
+}
diff --git a/pkg/pao/controller/performanceprofile_controller_test.go b/pkg/pao/controller/performanceprofile_controller_test.go
new file mode 100644
index 000000000..95e0a9d4c
--- /dev/null
+++ b/pkg/pao/controller/performanceprofile_controller_test.go
@@ -0,0 +1,884 @@
+package controller
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ . "github.com/onsi/gomega/gstruct"
+
+ igntypes "github.com/coreos/ignition/config/v2_2/types"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/kubeletconfig"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/runtimeclass"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/tuned"
+ testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing"
+ conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+
+ corev1 "k8s.io/api/core/v1"
+ nodev1beta1 "k8s.io/api/node/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/utils/pointer"
+
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+)
+
+var _ = Describe("Controller", func() {
+ var request reconcile.Request
+ var profile *performancev2.PerformanceProfile
+ var profileMCP *mcov1.MachineConfigPool
+
+ BeforeEach(func() {
+ profileMCP = testutils.NewProfileMCP()
+ profile = testutils.NewPerformanceProfile("test")
+ request = reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Namespace: metav1.NamespaceNone,
+ Name: profile.Name,
+ },
+ }
+ })
+
+ It("should add finalizer to the performance profile", func() {
+ r := newFakeReconciler(profile, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceNone,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+ Expect(hasFinalizer(updatedProfile, finalizer)).To(Equal(true))
+ })
+
+ Context("with profile with finalizer", func() {
+ BeforeEach(func() {
+ profile.Finalizers = append(profile.Finalizers, finalizer)
+ })
+
+ It("should create all resources on first reconcile loop", func() {
+ r := newFakeReconciler(profile, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ key := types.NamespacedName{
+ Name: machineconfig.GetMachineConfigName(profile),
+ Namespace: metav1.NamespaceNone,
+ }
+
+ // verify MachineConfig creation
+ mc := &mcov1.MachineConfig{}
+ err := r.Get(context.TODO(), key, mc)
+ Expect(err).ToNot(HaveOccurred())
+
+ key = types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix),
+ Namespace: metav1.NamespaceNone,
+ }
+
+ // verify KubeletConfig creation
+ kc := &mcov1.KubeletConfig{}
+ err = r.Get(context.TODO(), key, kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ // verify RuntimeClass creation
+ runtimeClass := &nodev1beta1.RuntimeClass{}
+ err = r.Get(context.TODO(), key, runtimeClass)
+ Expect(err).ToNot(HaveOccurred())
+
+ // verify tuned performance creation
+ tunedPerformance := &tunedv1.Tuned{}
+ key.Name = components.GetComponentName(profile.Name, components.ProfileNamePerformance)
+ key.Namespace = components.NamespaceNodeTuningOperator
+ err = r.Get(context.TODO(), key, tunedPerformance)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("should create event on the second reconcile loop", func() {
+ r := newFakeReconciler(profile, profileMCP)
+
+ Expect(reconcileTimes(r, request, 2)).To(Equal(reconcile.Result{}))
+
+ // verify creation event
+ fakeRecorder, ok := r.Recorder.(*record.FakeRecorder)
+ Expect(ok).To(BeTrue())
+ event := <-fakeRecorder.Events
+ Expect(event).To(ContainSubstring("Creation succeeded"))
+ })
+
+ It("should update the profile status", func() {
+ r := newFakeReconciler(profile, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceNone,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+
+ // verify performance profile status
+ Expect(len(updatedProfile.Status.Conditions)).To(Equal(4))
+
+ // verify profile conditions
+ progressingCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionProgressing)
+ Expect(progressingCondition).ToNot(BeNil())
+ Expect(progressingCondition.Status).To(Equal(corev1.ConditionFalse))
+ availableCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionAvailable)
+ Expect(availableCondition).ToNot(BeNil())
+ Expect(availableCondition.Status).To(Equal(corev1.ConditionTrue))
+ })
+
+ It("should promote kubelet config failure condition", func() {
+ r := newFakeReconciler(profile, profileMCP)
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: metav1.NamespaceNone,
+ }
+
+ kc := &mcov1.KubeletConfig{}
+ err := r.Get(context.TODO(), key, kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ now := time.Now()
+ kc.Status.Conditions = []mcov1.KubeletConfigCondition{
+ {
+ Type: mcov1.KubeletConfigFailure,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: now.Add(time.Minute)},
+ Reason: "Test failure condition",
+ Message: "Test failure condition",
+ },
+ {
+ Type: mcov1.KubeletConfigSuccess,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: now},
+ Reason: "Test succeed condition",
+ Message: "Test succeed condition",
+ },
+ }
+ Expect(r.Update(context.TODO(), kc)).ToNot(HaveOccurred())
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key = types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceNone,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+
+ degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded)
+ Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue))
+ Expect(degradedCondition.Message).To(Equal("Test failure condition"))
+ Expect(degradedCondition.Reason).To(Equal(conditionKubeletFailed))
+ })
+
+ It("should not promote old failure condition", func() {
+ r := newFakeReconciler(profile, profileMCP)
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: metav1.NamespaceNone,
+ }
+
+ kc := &mcov1.KubeletConfig{}
+ err := r.Get(context.TODO(), key, kc)
+ Expect(err).ToNot(HaveOccurred())
+
+ now := time.Now()
+ kc.Status.Conditions = []mcov1.KubeletConfigCondition{
+ {
+ Type: mcov1.KubeletConfigFailure,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: now},
+ Reason: "Test failure condition",
+ Message: "Test failure condition",
+ },
+ {
+ Type: mcov1.KubeletConfigSuccess,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: now.Add(time.Minute)},
+ Reason: "Test succeed condition",
+ Message: "Test succeed condition",
+ },
+ }
+ Expect(r.Update(context.TODO(), kc)).ToNot(HaveOccurred())
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key = types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceNone,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+
+ degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded)
+ Expect(degradedCondition.Status).To(Equal(corev1.ConditionFalse))
+ })
+
+ It("should remove outdated tuned objects", func() {
+ tunedOutdatedA, err := tuned.NewNodePerformance(profile)
+ Expect(err).ToNot(HaveOccurred())
+ tunedOutdatedA.Name = "outdated-a"
+ tunedOutdatedA.OwnerReferences = []metav1.OwnerReference{
+ {Name: profile.Name},
+ }
+ tunedOutdatedB, err := tuned.NewNodePerformance(profile)
+ Expect(err).ToNot(HaveOccurred())
+ tunedOutdatedB.Name = "outdated-b"
+ tunedOutdatedB.OwnerReferences = []metav1.OwnerReference{
+ {Name: profile.Name},
+ }
+ r := newFakeReconciler(profile, tunedOutdatedA, tunedOutdatedB, profileMCP)
+
+ keyA := types.NamespacedName{
+ Name: tunedOutdatedA.Name,
+ Namespace: tunedOutdatedA.Namespace,
+ }
+ ta := &tunedv1.Tuned{}
+ err = r.Get(context.TODO(), keyA, ta)
+ Expect(err).ToNot(HaveOccurred())
+
+ keyB := types.NamespacedName{
+ Name: tunedOutdatedA.Name,
+ Namespace: tunedOutdatedA.Namespace,
+ }
+ tb := &tunedv1.Tuned{}
+ err = r.Get(context.TODO(), keyB, tb)
+ Expect(err).ToNot(HaveOccurred())
+
+ result, err := r.Reconcile(context.TODO(), request)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(result).To(Equal(reconcile.Result{}))
+
+ tunedList := &tunedv1.TunedList{}
+ err = r.List(context.TODO(), tunedList)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(tunedList.Items)).To(Equal(1))
+ tunedName := components.GetComponentName(profile.Name, components.ProfileNamePerformance)
+ Expect(tunedList.Items[0].Name).To(Equal(tunedName))
+ })
+
+ It("should create nothing when pause annotation is set", func() {
+ profile.Annotations = map[string]string{performancev2.PerformanceProfilePauseAnnotation: "true"}
+ r := newFakeReconciler(profile, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: metav1.NamespaceNone,
+ }
+
+ // verify MachineConfig wasn't created
+ mc := &mcov1.MachineConfig{}
+ err := r.Get(context.TODO(), key, mc)
+ Expect(errors.IsNotFound(err)).To(BeTrue())
+
+ // verify that KubeletConfig wasn't created
+ kc := &mcov1.KubeletConfig{}
+ err = r.Get(context.TODO(), key, kc)
+ Expect(errors.IsNotFound(err)).To(BeTrue())
+
+ // verify no machine config pool was created
+ mcp := &mcov1.MachineConfigPool{}
+ err = r.Get(context.TODO(), key, mcp)
+ Expect(errors.IsNotFound(err)).To(BeTrue())
+
+ // verify tuned Performance wasn't created
+ tunedPerformance := &tunedv1.Tuned{}
+ key.Name = components.ProfileNamePerformance
+ key.Namespace = components.NamespaceNodeTuningOperator
+ err = r.Get(context.TODO(), key, tunedPerformance)
+ Expect(errors.IsNotFound(err)).To(BeTrue())
+
+ // verify that no RuntimeClass was created
+ runtimeClass := &nodev1beta1.RuntimeClass{}
+ err = r.Get(context.TODO(), key, runtimeClass)
+ Expect(errors.IsNotFound(err)).To(BeTrue())
+ })
+
+ Context("when all components exist", func() {
+ var mc *mcov1.MachineConfig
+ var kc *mcov1.KubeletConfig
+ var tunedPerformance *tunedv1.Tuned
+ var runtimeClass *nodev1beta1.RuntimeClass
+
+ BeforeEach(func() {
+ var err error
+
+ mc, err = machineconfig.New(profile)
+ Expect(err).ToNot(HaveOccurred())
+
+ mcpSelectorKey, mcpSelectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err = kubeletconfig.New(profile, map[string]string{mcpSelectorKey: mcpSelectorValue})
+ Expect(err).ToNot(HaveOccurred())
+
+ tunedPerformance, err = tuned.NewNodePerformance(profile)
+ Expect(err).ToNot(HaveOccurred())
+
+ runtimeClass = runtimeclass.New(profile, machineconfig.HighPerformanceRuntime)
+ })
+
+ It("should not record new create event", func() {
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, runtimeClass, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ // verify that no creation event created
+ fakeRecorder, ok := r.Recorder.(*record.FakeRecorder)
+ Expect(ok).To(BeTrue())
+
+ select {
+ case _ = <-fakeRecorder.Events:
+ Fail("the recorder should not have new events")
+ default:
+ }
+ })
+
+ It("should update MC when RT kernel gets disabled", func() {
+ profile.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(false)
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ key := types.NamespacedName{
+ Name: machineconfig.GetMachineConfigName(profile),
+ Namespace: metav1.NamespaceNone,
+ }
+
+ // verify MachineConfig update
+ mc := &mcov1.MachineConfig{}
+ err := r.Get(context.TODO(), key, mc)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(mc.Spec.KernelType).To(Equal(machineconfig.MCKernelDefault))
+ })
+
+ It("should update MC, KC and Tuned when CPU params change", func() {
+ reserved := performancev2.CPUSet("0-1")
+ isolated := performancev2.CPUSet("2-3")
+ profile.Spec.CPU = &performancev2.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ key := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix),
+ Namespace: metav1.NamespaceNone,
+ }
+
+ By("Verifying KC update for reserved")
+ kc := &mcov1.KubeletConfig{}
+ err := r.Get(context.TODO(), key, kc)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(string(kc.Spec.KubeletConfig.Raw)).To(ContainSubstring(fmt.Sprintf(`"reservedSystemCPUs":"%s"`, string(*profile.Spec.CPU.Reserved))))
+
+ By("Verifying Tuned update for isolated")
+ key = types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ t := &tunedv1.Tuned{}
+ err = r.Get(context.TODO(), key, t)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(*t.Spec.Profile[0].Data).To(ContainSubstring("isolated_cores=" + string(*profile.Spec.CPU.Isolated)))
+ })
+
+ It("should add isolcpus with managed_irq flag to tuned profile when balanced set to true", func() {
+ reserved := performancev2.CPUSet("0-1")
+ isolated := performancev2.CPUSet("2-3")
+ profile.Spec.CPU = &performancev2.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ BalanceIsolated: pointer.BoolPtr(true),
+ }
+
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ key := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ t := &tunedv1.Tuned{}
+ err := r.Get(context.TODO(), key, t)
+ Expect(err).ToNot(HaveOccurred())
+ cmdlineRealtimeWithoutCPUBalancing := regexp.MustCompile(`\s*cmdline_realtime=\+\s*tsc=nowatchdog\s+intel_iommu=on\s+iommu=pt\s+isolcpus=managed_irq\s*`)
+ Expect(cmdlineRealtimeWithoutCPUBalancing.MatchString(*t.Spec.Profile[0].Data)).To(BeTrue())
+ })
+
+ It("should add isolcpus with domain,managed_irq flags to tuned profile when balanced set to false", func() {
+ reserved := performancev2.CPUSet("0-1")
+ isolated := performancev2.CPUSet("2-3")
+ profile.Spec.CPU = &performancev2.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ BalanceIsolated: pointer.BoolPtr(false),
+ }
+
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ key := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ t := &tunedv1.Tuned{}
+ err := r.Get(context.TODO(), key, t)
+ Expect(err).ToNot(HaveOccurred())
+ cmdlineRealtimeWithoutCPUBalancing := regexp.MustCompile(`\s*cmdline_realtime=\+\s*tsc=nowatchdog\s+intel_iommu=on\s+iommu=pt\s+isolcpus=domain,managed_irq,\s*`)
+ Expect(cmdlineRealtimeWithoutCPUBalancing.MatchString(*t.Spec.Profile[0].Data)).To(BeTrue())
+ })
+
+ It("should update MC when Hugepages params change without node added", func() {
+ size := performancev2.HugePageSize("2M")
+ profile.Spec.HugePages = &performancev2.HugePages{
+ DefaultHugePagesSize: &size,
+ Pages: []performancev2.HugePage{
+ {
+ Count: 8,
+ Size: size,
+ },
+ },
+ }
+
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ By("Verifying Tuned profile update")
+ key := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ t := &tunedv1.Tuned{}
+ err := r.Get(context.TODO(), key, t)
+ Expect(err).ToNot(HaveOccurred())
+ cmdlineHugepages := regexp.MustCompile(`\s*cmdline_hugepages=\+\s*default_hugepagesz=2M\s+hugepagesz=2M\s+hugepages=8\s*`)
+ Expect(cmdlineHugepages.MatchString(*t.Spec.Profile[0].Data)).To(BeTrue())
+ })
+
+ It("should update Tuned when Hugepages params change with node added", func() {
+ size := performancev2.HugePageSize("2M")
+ profile.Spec.HugePages = &performancev2.HugePages{
+ DefaultHugePagesSize: &size,
+ Pages: []performancev2.HugePage{
+ {
+ Count: 8,
+ Size: size,
+ Node: pointer.Int32Ptr(0),
+ },
+ },
+ }
+
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ By("Verifying Tuned update")
+ key := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ t := &tunedv1.Tuned{}
+ err := r.Get(context.TODO(), key, t)
+ Expect(err).ToNot(HaveOccurred())
+ cmdlineHugepages := regexp.MustCompile(`\s*cmdline_hugepages=\+\s*`)
+ Expect(cmdlineHugepages.MatchString(*t.Spec.Profile[0].Data)).To(BeTrue())
+
+ By("Verifying MC update")
+ key = types.NamespacedName{
+ Name: machineconfig.GetMachineConfigName(profile),
+ Namespace: metav1.NamespaceNone,
+ }
+ mc := &mcov1.MachineConfig{}
+ err = r.Get(context.TODO(), key, mc)
+ Expect(err).ToNot(HaveOccurred())
+
+ config := &igntypes.Config{}
+ err = json.Unmarshal(mc.Spec.Config.Raw, config)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(config.Systemd.Units).To(ContainElement(MatchFields(IgnoreMissing|IgnoreExtras, Fields{
+ "Contents": And(
+ ContainSubstring("Description=Hugepages"),
+ ContainSubstring("Environment=HUGEPAGES_COUNT=8"),
+ ContainSubstring("Environment=HUGEPAGES_SIZE=2048"),
+ ContainSubstring("Environment=NUMA_NODE=0"),
+ ),
+ })))
+
+ })
+
+ It("should update status with generated tuned", func() {
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP)
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+ key := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ t := &tunedv1.Tuned{}
+ err := r.Get(context.TODO(), key, t)
+ Expect(err).ToNot(HaveOccurred())
+ tunedNamespacedName := namespacedName(t).String()
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key = types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceNone,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+ Expect(updatedProfile.Status.Tuned).NotTo(BeNil())
+ Expect(*updatedProfile.Status.Tuned).To(Equal(tunedNamespacedName))
+ })
+
+ It("should update status with generated runtime class", func() {
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, runtimeClass, profileMCP)
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ key := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix),
+ Namespace: metav1.NamespaceAll,
+ }
+ runtimeClass := &nodev1beta1.RuntimeClass{}
+ err := r.Get(context.TODO(), key, runtimeClass)
+ Expect(err).ToNot(HaveOccurred())
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key = types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceAll,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+ Expect(updatedProfile.Status.RuntimeClass).NotTo(BeNil())
+ Expect(*updatedProfile.Status.RuntimeClass).To(Equal(runtimeClass.Name))
+ })
+
+ It("should update status when MCP is degraded", func() {
+ mcpReason := "mcpReason"
+ mcpMessage := "MCP message"
+
+ mcp := &mcov1.MachineConfigPool{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: mcov1.GroupVersion.String(),
+ Kind: "MachineConfigPool",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "mcp-test",
+ Labels: map[string]string{
+ testutils.MachineConfigPoolLabelKey: testutils.MachineConfigPoolLabelValue,
+ },
+ },
+ Spec: mcov1.MachineConfigPoolSpec{
+ NodeSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"nodekey": "nodeValue"},
+ },
+ MachineConfigSelector: &metav1.LabelSelector{
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: testutils.MachineConfigLabelKey,
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{testutils.MachineConfigLabelValue},
+ },
+ },
+ },
+ },
+ Status: mcov1.MachineConfigPoolStatus{
+ Conditions: []mcov1.MachineConfigPoolCondition{
+ {
+ Type: mcov1.MachineConfigPoolNodeDegraded,
+ Status: corev1.ConditionTrue,
+ Reason: mcpReason,
+ Message: mcpMessage,
+ },
+ },
+ },
+ }
+
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, mcp)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceNone,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+
+ // verify performance profile status
+ Expect(len(updatedProfile.Status.Conditions)).To(Equal(4))
+
+ // verify profile conditions
+ degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded)
+ Expect(degradedCondition).ToNot(BeNil())
+ Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue))
+ Expect(degradedCondition.Reason).To(Equal(conditionReasonMCPDegraded))
+ Expect(degradedCondition.Message).To(ContainSubstring(mcpMessage))
+ })
+
+ It("should update status when TunedProfile is degraded", func() {
+ tunedReason := "tunedReason"
+ tunedMessage := "Tuned message"
+
+ tuned := &tunedv1.Profile{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "tuned-profile-test",
+ },
+ Status: tunedv1.ProfileStatus{
+ Conditions: []tunedv1.ProfileStatusCondition{
+ {
+ Type: tunedv1.TunedDegraded,
+ Status: corev1.ConditionTrue,
+ Reason: tunedReason,
+ Message: tunedMessage,
+ },
+ {
+ Type: tunedv1.TunedProfileApplied,
+ Status: corev1.ConditionFalse,
+ Reason: tunedReason,
+ Message: tunedMessage,
+ },
+ },
+ },
+ }
+
+ nodes := &corev1.NodeList{
+ Items: []corev1.Node{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "tuned-profile-test",
+ Labels: map[string]string{
+ "nodekey": "nodeValue",
+ },
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "tuned-profile-test2",
+ },
+ },
+ },
+ }
+
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, tuned, nodes, profileMCP)
+
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceNone,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+
+ // verify performance profile status
+ Expect(len(updatedProfile.Status.Conditions)).To(Equal(4))
+
+ // verify profile conditions
+ degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded)
+ Expect(degradedCondition).ToNot(BeNil())
+ Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue))
+ Expect(degradedCondition.Reason).To(Equal(conditionReasonTunedDegraded))
+ Expect(degradedCondition.Message).To(ContainSubstring(tunedMessage))
+ })
+ })
+
+ When("the provided machine config labels are different from one specified under the machine config pool", func() {
+ It("should move the performance profile to the degraded state", func() {
+ profileMCP.Spec.MachineConfigSelector = &metav1.LabelSelector{
+ MatchLabels: map[string]string{"wrongKey": "bad"},
+ }
+ r := newFakeReconciler(profile, profileMCP)
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceNone,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+
+ // verify performance profile status
+ Expect(len(updatedProfile.Status.Conditions)).To(Equal(4))
+
+ // verify profile conditions
+ degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded)
+ Expect(degradedCondition).ToNot(BeNil())
+ Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue))
+ Expect(degradedCondition.Reason).To(Equal(conditionBadMachineConfigLabels))
+ Expect(degradedCondition.Message).To(ContainSubstring("provided via profile.spec.machineConfigLabel do not match the MachineConfigPool"))
+ })
+ })
+
+ When("the generated machine config labels are different from one specified under the machine config pool", func() {
+ It("should move the performance profile to the degraded state", func() {
+ profileMCP.Spec.MachineConfigSelector = &metav1.LabelSelector{
+ MatchLabels: map[string]string{"wrongKey": "bad"},
+ }
+ profile.Spec.MachineConfigLabel = nil
+ r := newFakeReconciler(profile, profileMCP)
+ Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{}))
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: metav1.NamespaceNone,
+ }
+ Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+
+ // verify performance profile status
+ Expect(len(updatedProfile.Status.Conditions)).To(Equal(4))
+
+ // verify profile conditions
+ degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded)
+ Expect(degradedCondition).ToNot(BeNil())
+ Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue))
+ Expect(degradedCondition.Reason).To(Equal(conditionBadMachineConfigLabels))
+ Expect(degradedCondition.Message).To(ContainSubstring("generated from the profile.spec.nodeSelector"))
+ })
+ })
+ })
+
+ Context("with profile with deletion timestamp", func() {
+ BeforeEach(func() {
+ profile.DeletionTimestamp = &metav1.Time{
+ Time: time.Now(),
+ }
+ profile.Finalizers = append(profile.Finalizers, finalizer)
+ })
+
+ It("should remove all components and remove the finalizer on first reconcile loop", func() {
+ mc, err := machineconfig.New(profile)
+ Expect(err).ToNot(HaveOccurred())
+
+ mcpSelectorKey, mcpSelectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector)
+ kc, err := kubeletconfig.New(profile, map[string]string{mcpSelectorKey: mcpSelectorValue})
+ Expect(err).ToNot(HaveOccurred())
+
+ tunedPerformance, err := tuned.NewNodePerformance(profile)
+ Expect(err).ToNot(HaveOccurred())
+
+ runtimeClass := runtimeclass.New(profile, machineconfig.HighPerformanceRuntime)
+
+ r := newFakeReconciler(profile, mc, kc, tunedPerformance, runtimeClass, profileMCP)
+ result, err := r.Reconcile(context.TODO(), request)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(result).To(Equal(reconcile.Result{}))
+
+ // verify that controller deleted all components
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: metav1.NamespaceNone,
+ }
+
+ // verify MachineConfig deletion
+ err = r.Get(context.TODO(), key, mc)
+ Expect(errors.IsNotFound(err)).To(Equal(true))
+
+ // verify KubeletConfig deletion
+ err = r.Get(context.TODO(), key, kc)
+ Expect(errors.IsNotFound(err)).To(Equal(true))
+
+ // verify RuntimeClass deletion
+ err = r.Get(context.TODO(), key, runtimeClass)
+ Expect(errors.IsNotFound(err)).To(Equal(true))
+
+ // verify tuned real-time kernel deletion
+ key.Name = components.GetComponentName(profile.Name, components.ProfileNamePerformance)
+ key.Namespace = components.NamespaceNodeTuningOperator
+ err = r.Get(context.TODO(), key, tunedPerformance)
+ Expect(errors.IsNotFound(err)).To(Equal(true))
+
+ // verify profile deletion
+ key.Name = profile.Name
+ key.Namespace = metav1.NamespaceNone
+ updatedProfile := &performancev2.PerformanceProfile{}
+ err = r.Get(context.TODO(), key, updatedProfile)
+ Expect(errors.IsNotFound(err)).To(Equal(true))
+ })
+ })
+
+ It("should map machine config pool to the performance profile", func() {
+ mcp := &mcov1.MachineConfigPool{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: mcov1.GroupVersion.String(),
+ Kind: "MachineConfigPool",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "mcp-test",
+ },
+ Spec: mcov1.MachineConfigPoolSpec{
+ NodeSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"nodekey": "nodeValue"},
+ },
+ MachineConfigSelector: &metav1.LabelSelector{
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: testutils.MachineConfigLabelKey,
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{testutils.MachineConfigLabelValue},
+ },
+ },
+ },
+ },
+ }
+ r := newFakeReconciler(profile, mcp)
+ requests := r.mcpToPerformanceProfile(mcp)
+ Expect(requests).NotTo(BeEmpty())
+ Expect(requests[0].Name).To(Equal(profile.Name))
+ })
+})
+
+func reconcileTimes(reconciler *PerformanceProfileReconciler, request reconcile.Request, times int) reconcile.Result {
+ var result reconcile.Result
+ var err error
+ for i := 0; i < times; i++ {
+ result, err = reconciler.Reconcile(context.TODO(), request)
+ Expect(err).ToNot(HaveOccurred())
+ }
+ return result
+}
+
+// newFakeReconciler returns a new reconcile.Reconciler with a fake client
+func newFakeReconciler(initObjects ...runtime.Object) *PerformanceProfileReconciler {
+ fakeClient := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(initObjects...).Build()
+ fakeRecorder := record.NewFakeRecorder(10)
+ return &PerformanceProfileReconciler{
+ Client: fakeClient,
+ Scheme: scheme.Scheme,
+ Recorder: fakeRecorder,
+ }
+}
diff --git a/pkg/pao/controller/resources.go b/pkg/pao/controller/resources.go
new file mode 100644
index 000000000..4830836a5
--- /dev/null
+++ b/pkg/pao/controller/resources.go
@@ -0,0 +1,331 @@
+package controller
+
+import (
+ "context"
+ "encoding/json"
+ "reflect"
+
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+
+ nodev1beta1 "k8s.io/api/node/v1beta1"
+ apiequality "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog"
+ kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
+)
+
+func mergeMaps(src map[string]string, dst map[string]string) {
+ for k, v := range src {
+ // NOTE: it will override destination values
+ dst[k] = v
+ }
+}
+
+// TODO: we should merge all create, get and delete methods
+
+func (r *PerformanceProfileReconciler) getMachineConfig(name string) (*mcov1.MachineConfig, error) {
+ mc := &mcov1.MachineConfig{}
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: metav1.NamespaceNone,
+ }
+ if err := r.Get(context.TODO(), key, mc); err != nil {
+ return nil, err
+ }
+ return mc, nil
+}
+
+func (r *PerformanceProfileReconciler) getMutatedMachineConfig(mc *mcov1.MachineConfig) (*mcov1.MachineConfig, error) {
+ existing, err := r.getMachineConfig(mc.Name)
+ if errors.IsNotFound(err) {
+ return mc, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ mutated := existing.DeepCopy()
+ mergeMaps(mc.Annotations, mutated.Annotations)
+ mergeMaps(mc.Labels, mutated.Labels)
+ mutated.Spec = mc.Spec
+
+ // we do not need to update if it no change between mutated and existing object
+ if reflect.DeepEqual(existing.Spec, mutated.Spec) &&
+ apiequality.Semantic.DeepEqual(existing.Labels, mutated.Labels) &&
+ apiequality.Semantic.DeepEqual(existing.Annotations, mutated.Annotations) {
+ return nil, nil
+ }
+
+ return mutated, nil
+}
+
+func (r *PerformanceProfileReconciler) createOrUpdateMachineConfig(mc *mcov1.MachineConfig) error {
+ _, err := r.getMachineConfig(mc.Name)
+ if errors.IsNotFound(err) {
+ klog.Infof("Create machine-config %q", mc.Name)
+ if err := r.Create(context.TODO(), mc); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ if err != nil {
+ return err
+ }
+
+ klog.Infof("Update machine-config %q", mc.Name)
+ return r.Update(context.TODO(), mc)
+}
+
+func (r *PerformanceProfileReconciler) deleteMachineConfig(name string) error {
+ mc, err := r.getMachineConfig(name)
+ if errors.IsNotFound(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ return r.Delete(context.TODO(), mc)
+}
+
+func (r *PerformanceProfileReconciler) getKubeletConfig(name string) (*mcov1.KubeletConfig, error) {
+ kc := &mcov1.KubeletConfig{}
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: metav1.NamespaceNone,
+ }
+ if err := r.Get(context.TODO(), key, kc); err != nil {
+ return nil, err
+ }
+ return kc, nil
+}
+
+func (r *PerformanceProfileReconciler) getMutatedKubeletConfig(kc *mcov1.KubeletConfig) (*mcov1.KubeletConfig, error) {
+ existing, err := r.getKubeletConfig(kc.Name)
+ if errors.IsNotFound(err) {
+ return kc, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ mutated := existing.DeepCopy()
+ mergeMaps(kc.Annotations, mutated.Annotations)
+ mergeMaps(kc.Labels, mutated.Labels)
+ mutated.Spec = kc.Spec
+
+ existingKubeletConfig := &kubeletconfigv1beta1.KubeletConfiguration{}
+ err = json.Unmarshal(existing.Spec.KubeletConfig.Raw, existingKubeletConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ mutatedKubeletConfig := &kubeletconfigv1beta1.KubeletConfiguration{}
+ err = json.Unmarshal(mutated.Spec.KubeletConfig.Raw, mutatedKubeletConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ // we do not need to update if it no change between mutated and existing object
+ if apiequality.Semantic.DeepEqual(existingKubeletConfig, mutatedKubeletConfig) &&
+ apiequality.Semantic.DeepEqual(existing.Spec.MachineConfigPoolSelector, mutated.Spec.MachineConfigPoolSelector) &&
+ apiequality.Semantic.DeepEqual(existing.Labels, mutated.Labels) &&
+ apiequality.Semantic.DeepEqual(existing.Annotations, mutated.Annotations) {
+ return nil, nil
+ }
+
+ return mutated, nil
+}
+
+func (r *PerformanceProfileReconciler) createOrUpdateKubeletConfig(kc *mcov1.KubeletConfig) error {
+ _, err := r.getKubeletConfig(kc.Name)
+ if errors.IsNotFound(err) {
+ klog.Infof("Create kubelet-config %q", kc.Name)
+ if err := r.Create(context.TODO(), kc); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ if err != nil {
+ return err
+ }
+
+ klog.Infof("Update kubelet-config %q", kc.Name)
+ return r.Update(context.TODO(), kc)
+}
+
+func (r *PerformanceProfileReconciler) deleteKubeletConfig(name string) error {
+ kc, err := r.getKubeletConfig(name)
+ if errors.IsNotFound(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ return r.Delete(context.TODO(), kc)
+}
+
+func (r *PerformanceProfileReconciler) getTuned(name string, namespace string) (*tunedv1.Tuned, error) {
+ tuned := &tunedv1.Tuned{}
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: namespace,
+ }
+ if err := r.Get(context.TODO(), key, tuned); err != nil {
+ return nil, err
+ }
+ return tuned, nil
+}
+
+func (r *PerformanceProfileReconciler) getMutatedTuned(tuned *tunedv1.Tuned) (*tunedv1.Tuned, error) {
+ existing, err := r.getTuned(tuned.Name, tuned.Namespace)
+ if errors.IsNotFound(err) {
+ return tuned, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ mutated := existing.DeepCopy()
+ mergeMaps(tuned.Annotations, mutated.Annotations)
+ mergeMaps(tuned.Labels, mutated.Labels)
+ mutated.Spec = tuned.Spec
+
+ // we do not need to update if it no change between mutated and existing object
+ if apiequality.Semantic.DeepEqual(existing.Spec, mutated.Spec) &&
+ apiequality.Semantic.DeepEqual(existing.Labels, mutated.Labels) &&
+ apiequality.Semantic.DeepEqual(existing.Annotations, mutated.Annotations) {
+ return nil, nil
+ }
+
+ return mutated, nil
+}
+
+func (r *PerformanceProfileReconciler) createOrUpdateTuned(tuned *tunedv1.Tuned, profileName string) error {
+
+ if err := r.removeOutdatedTuned(tuned, profileName); err != nil {
+ return err
+ }
+
+ _, err := r.getTuned(tuned.Name, tuned.Namespace)
+ if errors.IsNotFound(err) {
+ klog.Infof("Create tuned %q under the namespace %q", tuned.Name, tuned.Namespace)
+ if err := r.Create(context.TODO(), tuned); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ if err != nil {
+ return err
+ }
+
+ klog.Infof("Update tuned %q under the namespace %q", tuned.Name, tuned.Namespace)
+ return r.Update(context.TODO(), tuned)
+}
+
+func (r *PerformanceProfileReconciler) removeOutdatedTuned(tuned *tunedv1.Tuned, profileName string) error {
+ tunedList := &tunedv1.TunedList{}
+ if err := r.List(context.TODO(), tunedList); err != nil {
+ klog.Errorf("Unable to list tuned objects for outdated removal procedure: %v", err)
+ return err
+ }
+
+ for t := range tunedList.Items {
+ tunedItem := tunedList.Items[t]
+ ownerReferences := tunedItem.ObjectMeta.OwnerReferences
+ for o := range ownerReferences {
+ if ownerReferences[o].Name == profileName && tunedItem.Name != tuned.Name {
+ if err := r.deleteTuned(tunedItem.Name, tunedItem.Namespace); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (r *PerformanceProfileReconciler) deleteTuned(name string, namespace string) error {
+ tuned, err := r.getTuned(name, namespace)
+ if errors.IsNotFound(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ return r.Delete(context.TODO(), tuned)
+}
+
+func (r *PerformanceProfileReconciler) getRuntimeClass(name string) (*nodev1beta1.RuntimeClass, error) {
+ runtimeClass := &nodev1beta1.RuntimeClass{}
+ key := types.NamespacedName{
+ Name: name,
+ }
+ if err := r.Get(context.TODO(), key, runtimeClass); err != nil {
+ return nil, err
+ }
+ return runtimeClass, nil
+}
+
+func (r *PerformanceProfileReconciler) getMutatedRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass) (*nodev1beta1.RuntimeClass, error) {
+ existing, err := r.getRuntimeClass(runtimeClass.Name)
+ if errors.IsNotFound(err) {
+ return runtimeClass, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ mutated := existing.DeepCopy()
+ mergeMaps(runtimeClass.Annotations, mutated.Annotations)
+ mergeMaps(runtimeClass.Labels, mutated.Labels)
+ mutated.Handler = runtimeClass.Handler
+ mutated.Scheduling = runtimeClass.Scheduling
+
+ // we do not need to update if it no change between mutated and existing object
+ if apiequality.Semantic.DeepEqual(existing.Handler, mutated.Handler) &&
+ apiequality.Semantic.DeepEqual(existing.Scheduling, mutated.Scheduling) &&
+ apiequality.Semantic.DeepEqual(existing.Labels, mutated.Labels) &&
+ apiequality.Semantic.DeepEqual(existing.Annotations, mutated.Annotations) {
+ return nil, nil
+ }
+
+ return mutated, nil
+}
+
+func (r *PerformanceProfileReconciler) createOrUpdateRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass) error {
+ _, err := r.getRuntimeClass(runtimeClass.Name)
+ if errors.IsNotFound(err) {
+ klog.Infof("Create runtime class %q", runtimeClass.Name)
+ if err := r.Create(context.TODO(), runtimeClass); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ if err != nil {
+ return err
+ }
+
+ klog.Infof("Update runtime class %q", runtimeClass.Name)
+ return r.Update(context.TODO(), runtimeClass)
+}
+
+func (r *PerformanceProfileReconciler) deleteRuntimeClass(name string) error {
+ runtimeClass, err := r.getRuntimeClass(name)
+ if errors.IsNotFound(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ return r.Delete(context.TODO(), runtimeClass)
+}
diff --git a/pkg/pao/controller/status.go b/pkg/pao/controller/status.go
new file mode 100644
index 000000000..09b8858f1
--- /dev/null
+++ b/pkg/pao/controller/status.go
@@ -0,0 +1,296 @@
+package controller
+
+import (
+ "bytes"
+ "context"
+ "time"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+const (
+ conditionFailedToFindMachineConfigPool = "GettingMachineConfigPoolFailed"
+ conditionBadMachineConfigLabels = "BadMachineConfigLabels"
+ conditionReasonComponentsCreationFailed = "ComponentCreationFailed"
+ conditionReasonMCPDegraded = "MCPDegraded"
+ conditionFailedGettingMCPStatus = "GettingMCPStatusFailed"
+ conditionKubeletFailed = "KubeletConfig failure"
+ conditionFailedGettingKubeletStatus = "GettingKubeletStatusFailed"
+ conditionReasonTunedDegraded = "TunedProfileDegraded"
+ conditionFailedGettingTunedProfileStatus = "GettingTunedStatusFailed"
+)
+
+func (r *PerformanceProfileReconciler) updateStatus(profile *performancev2.PerformanceProfile, conditions []conditionsv1.Condition) error {
+ profileCopy := profile.DeepCopy()
+
+ if conditions != nil {
+ profileCopy.Status.Conditions = conditions
+ }
+
+ // check if we need to update the status
+ modified := false
+
+ // since we always set the same four conditions, we don't need to check if we need to remove old conditions
+ for _, newCondition := range profileCopy.Status.Conditions {
+ oldCondition := conditionsv1.FindStatusCondition(profile.Status.Conditions, newCondition.Type)
+ if oldCondition == nil {
+ modified = true
+ break
+ }
+
+ // ignore timestamps to avoid infinite reconcile loops
+ if oldCondition.Status != newCondition.Status ||
+ oldCondition.Reason != newCondition.Reason ||
+ oldCondition.Message != newCondition.Message {
+
+ modified = true
+ break
+ }
+ }
+
+ if profileCopy.Status.Tuned == nil {
+ tunedNamespacedname := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ tunedStatus := tunedNamespacedname.String()
+ profileCopy.Status.Tuned = &tunedStatus
+ modified = true
+ }
+
+ if profileCopy.Status.RuntimeClass == nil {
+ runtimeClassName := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ profileCopy.Status.RuntimeClass = &runtimeClassName
+ modified = true
+ }
+
+ if !modified {
+ return nil
+ }
+
+ klog.Infof("Updating the performance profile %q status", profile.Name)
+ return r.Status().Update(context.TODO(), profileCopy)
+}
+
+func (r *PerformanceProfileReconciler) getAvailableConditions() []conditionsv1.Condition {
+ now := time.Now()
+ return []conditionsv1.Condition{
+ {
+ Type: conditionsv1.ConditionAvailable,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: now},
+ LastHeartbeatTime: metav1.Time{Time: now},
+ },
+ {
+ Type: conditionsv1.ConditionUpgradeable,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: now},
+ LastHeartbeatTime: metav1.Time{Time: now},
+ },
+ {
+ Type: conditionsv1.ConditionProgressing,
+ Status: corev1.ConditionFalse,
+ LastTransitionTime: metav1.Time{Time: now},
+ LastHeartbeatTime: metav1.Time{Time: now},
+ },
+ {
+ Type: conditionsv1.ConditionDegraded,
+ Status: corev1.ConditionFalse,
+ LastTransitionTime: metav1.Time{Time: now},
+ LastHeartbeatTime: metav1.Time{Time: now},
+ },
+ }
+}
+
+func (r *PerformanceProfileReconciler) getDegradedConditions(reason string, message string) []conditionsv1.Condition {
+ now := time.Now()
+ return []conditionsv1.Condition{
+ {
+ Type: conditionsv1.ConditionAvailable,
+ Status: corev1.ConditionFalse,
+ LastTransitionTime: metav1.Time{Time: now},
+ LastHeartbeatTime: metav1.Time{Time: now},
+ },
+ {
+ Type: conditionsv1.ConditionUpgradeable,
+ Status: corev1.ConditionFalse,
+ LastTransitionTime: metav1.Time{Time: now},
+ LastHeartbeatTime: metav1.Time{Time: now},
+ },
+ {
+ Type: conditionsv1.ConditionProgressing,
+ Status: corev1.ConditionFalse,
+ LastTransitionTime: metav1.Time{Time: now},
+ LastHeartbeatTime: metav1.Time{Time: now},
+ },
+ {
+ Type: conditionsv1.ConditionDegraded,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: now},
+ LastHeartbeatTime: metav1.Time{Time: now},
+ Reason: reason,
+ Message: message,
+ },
+ }
+}
+
+func (r *PerformanceProfileReconciler) getProgressingConditions(reason string, message string) []conditionsv1.Condition {
+ now := time.Now()
+
+ return []conditionsv1.Condition{
+ {
+ Type: conditionsv1.ConditionAvailable,
+ Status: corev1.ConditionFalse,
+ LastTransitionTime: metav1.Time{Time: now},
+ },
+ {
+ Type: conditionsv1.ConditionUpgradeable,
+ Status: corev1.ConditionFalse,
+ LastTransitionTime: metav1.Time{Time: now},
+ },
+ {
+ Type: conditionsv1.ConditionProgressing,
+ Status: corev1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: now},
+ Reason: reason,
+ Message: message,
+ },
+ {
+ Type: conditionsv1.ConditionDegraded,
+ Status: corev1.ConditionFalse,
+ LastTransitionTime: metav1.Time{Time: now},
+ },
+ }
+}
+
+func (r *PerformanceProfileReconciler) getMCPDegradedCondition(profileMCP *mcov1.MachineConfigPool) ([]conditionsv1.Condition, error) {
+ message := bytes.Buffer{}
+ for _, condition := range profileMCP.Status.Conditions {
+ if (condition.Type == mcov1.MachineConfigPoolNodeDegraded || condition.Type == mcov1.MachineConfigPoolRenderDegraded) && condition.Status == corev1.ConditionTrue {
+ if len(condition.Reason) > 0 {
+ message.WriteString("Machine config pool " + profileMCP.Name + " Degraded Reason: " + condition.Reason + ".\n")
+ }
+ if len(condition.Message) > 0 {
+ message.WriteString("Machine config pool " + profileMCP.Name + " Degraded Message: " + condition.Message + ".\n")
+ }
+ }
+ }
+
+ messageString := message.String()
+ if len(messageString) == 0 {
+ return nil, nil
+ }
+
+ return r.getDegradedConditions(conditionReasonMCPDegraded, messageString), nil
+}
+
+func (r *PerformanceProfileReconciler) getKubeletConditionsByProfile(profile *performancev2.PerformanceProfile) ([]conditionsv1.Condition, error) {
+ name := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ kc, err := r.getKubeletConfig(name)
+
+ // do not drop an error when kubelet config does not exist
+ if errors.IsNotFound(err) {
+ return nil, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ latestCondition := getLatestKubeletConfigCondition(kc.Status.Conditions)
+ if latestCondition == nil {
+ return nil, nil
+ }
+
+ if latestCondition.Type != mcov1.KubeletConfigFailure {
+ return nil, nil
+ }
+
+ return r.getDegradedConditions(conditionKubeletFailed, latestCondition.Message), nil
+}
+
+func (r *PerformanceProfileReconciler) getTunedConditionsByProfile(profile *performancev2.PerformanceProfile) ([]conditionsv1.Condition, error) {
+ tunedProfileList := &tunedv1.ProfileList{}
+ if err := r.List(context.TODO(), tunedProfileList); err != nil {
+ klog.Errorf("Cannot list Tuned Profiles to match with profile %q : %v", profile.Name, err)
+ return nil, err
+ }
+
+ selector := labels.SelectorFromSet(profile.Spec.NodeSelector)
+ nodes := &corev1.NodeList{}
+ if err := r.List(context.TODO(), nodes, &client.ListOptions{LabelSelector: selector}); err != nil {
+ return nil, err
+ }
+
+ // remove Tuned profiles that are not associate with this perfomance profile
+ // Tuned profile's name and node's name should be equal
+ filtered := removeUnMatchedTunedProfiles(nodes.Items, tunedProfileList.Items)
+ message := bytes.Buffer{}
+ for _, tunedProfile := range filtered {
+ isDegraded := false
+ isApplied := true
+ var tunedDegradedCondition *tunedv1.ProfileStatusCondition
+
+ for _, condition := range tunedProfile.Status.Conditions {
+ if (condition.Type == tunedv1.TunedDegraded) && condition.Status == corev1.ConditionTrue {
+ isDegraded = true
+ tunedDegradedCondition = &condition
+ }
+
+ if (condition.Type == tunedv1.TunedProfileApplied) && condition.Status == corev1.ConditionFalse {
+ isApplied = false
+ }
+ }
+ // We need both condition to exists,
+ // since there is a scenario where both Degraded & Applied condition are true
+ if isDegraded == true && isApplied == false {
+ if len(tunedDegradedCondition.Reason) > 0 {
+ message.WriteString("Tuned " + tunedProfile.GetName() + " Degraded Reason: " + tunedDegradedCondition.Reason + ".\n")
+ }
+ if len(tunedDegradedCondition.Message) > 0 {
+ message.WriteString("Tuned " + tunedProfile.GetName() + " Degraded Message: " + tunedDegradedCondition.Message + ".\n")
+ }
+ }
+ }
+
+ messageString := message.String()
+ if len(messageString) == 0 {
+ return nil, nil
+ }
+
+ return r.getDegradedConditions(conditionReasonTunedDegraded, messageString), nil
+}
+
+func getLatestKubeletConfigCondition(conditions []mcov1.KubeletConfigCondition) *mcov1.KubeletConfigCondition {
+ var latestCondition *mcov1.KubeletConfigCondition
+ for i := 0; i < len(conditions); i++ {
+ if latestCondition == nil || latestCondition.LastTransitionTime.Before(&conditions[i].LastTransitionTime) {
+ latestCondition = &conditions[i]
+ }
+ }
+ return latestCondition
+}
+
+func removeUnMatchedTunedProfiles(nodes []corev1.Node, profiles []tunedv1.Profile) []tunedv1.Profile {
+ filteredProfiles := make([]tunedv1.Profile, 0)
+ for _, profile := range profiles {
+ for _, node := range nodes {
+ if profile.Name == node.Name {
+ filteredProfiles = append(filteredProfiles, profile)
+ break
+ }
+ }
+ }
+ return filteredProfiles
+}
diff --git a/pkg/pao/profilecreator/helper.go b/pkg/pao/profilecreator/helper.go
new file mode 100644
index 000000000..a3ee64c63
--- /dev/null
+++ b/pkg/pao/profilecreator/helper.go
@@ -0,0 +1,18 @@
+package profilecreator
+
+import (
+ v1 "k8s.io/api/core/v1"
+)
+
+func newTestNode(nodeName string) *v1.Node {
+ n := v1.Node{}
+ n.Name = nodeName
+ return &n
+}
+func newTestNodeList(nodes ...*v1.Node) []*v1.Node {
+ nodeList := make([]*v1.Node, 0)
+ for _, node := range nodes {
+ nodeList = append(nodeList, node)
+ }
+ return nodeList
+}
diff --git a/pkg/pao/profilecreator/mcp.go b/pkg/pao/profilecreator/mcp.go
new file mode 100644
index 000000000..2ef37269e
--- /dev/null
+++ b/pkg/pao/profilecreator/mcp.go
@@ -0,0 +1,197 @@
+package profilecreator
+
+import (
+ "fmt"
+ "strings"
+
+ log "github.com/sirupsen/logrus"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+
+ mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+)
+
+// GetMCPSelector returns a label that is unique to the target pool, error otherwise
+func GetMCPSelector(pool *mcfgv1.MachineConfigPool, clusterPools []*mcfgv1.MachineConfigPool) (map[string]string, error) {
+ mcpSelector := make(map[string]string)
+
+ // go over all the labels to find the unique ones
+ for key, value := range pool.Labels {
+ unique := true
+ for _, mcp := range clusterPools {
+ if mcp.Name == pool.Name {
+ continue
+ }
+ if mcpValue, found := mcp.Labels[key]; found {
+ if value == mcpValue {
+ unique = false
+ break
+ }
+ }
+ }
+ if unique {
+ mcpSelector[key] = value
+ }
+ }
+
+ if len(mcpSelector) == 0 {
+ return nil, fmt.Errorf("can't find a unique label for '%s' MCP", pool.Name)
+ }
+
+ // find a label that includes the MCP name
+ if len(mcpSelector) > 1 {
+ for key, value := range mcpSelector {
+ if strings.HasSuffix(key, pool.Name) {
+ mcpSelector = make(map[string]string)
+ mcpSelector[key] = value
+ break
+ }
+ }
+ }
+
+ // pick a single unique label
+ if len(mcpSelector) > 1 {
+ for key, value := range mcpSelector {
+ mcpSelector = make(map[string]string)
+ mcpSelector[key] = value
+ break
+ }
+ }
+
+ return mcpSelector, nil
+}
+
+// GetNodesForPool returns the nodes belonging to the input mcp
+// Adapted (including dependencies) from:
+// https://github.com/openshift/machine-config-operator/blob/e4aa3bc5a405c67fb112b24e24b2c372457b3358/pkg/controller/node/node_controller.go#L745
+func GetNodesForPool(pool *mcfgv1.MachineConfigPool, clusterPools []*mcfgv1.MachineConfigPool, clusterNodes []*corev1.Node) ([]*corev1.Node, error) {
+ var nodes []*corev1.Node
+
+ poolNodeSelector, err := metav1.LabelSelectorAsSelector(pool.Spec.NodeSelector)
+ if err != nil {
+ return nil, fmt.Errorf("invalid label selector: %v", err)
+ }
+
+ for _, n := range clusterNodes {
+ p, err := getPrimaryPoolForNode(n, clusterPools)
+ if err != nil {
+ log.Warningf("can't get pool for node %q: %v", n.Name, err)
+ continue
+ }
+ if p == nil {
+ continue
+ }
+ if p.Name != pool.Name {
+ continue
+ }
+ var unschedulable bool
+ for _, taint := range n.Spec.Taints {
+ if taint.Effect == corev1.TaintEffectNoSchedule && poolNodeSelector.Matches(labels.Set{taint.Key: taint.Value}) {
+ unschedulable = true
+ break
+ }
+ }
+ if unschedulable {
+ continue
+ }
+ nodes = append(nodes, n)
+ }
+ return nodes, nil
+}
+
+// getPrimaryPoolForNode uses getPoolsForNode and returns the first one which is the one the node targets
+func getPrimaryPoolForNode(node *corev1.Node, clusterPools []*mcfgv1.MachineConfigPool) (*mcfgv1.MachineConfigPool, error) {
+ pools, err := getPoolsForNode(node, clusterPools)
+ if err != nil {
+ return nil, err
+ }
+ if pools == nil {
+ return nil, nil
+ }
+ return pools[0], nil
+}
+
+// getPoolsForNode chooses the MachineConfigPools that should be used for a given node.
+// It disambiguates in the case where e.g. a node has both master/worker roles applied,
+// and where a custom role may be used. It returns a slice of all the pools the node belongs to.
+// It also ignores the Windows nodes.
+func getPoolsForNode(node *corev1.Node, clusterPools []*mcfgv1.MachineConfigPool) ([]*mcfgv1.MachineConfigPool, error) {
+ if isWindows(node) {
+ // This is not an error, is this a Windows Node and it won't be managed by MCO. We're explicitly logging
+ // here at a high level to disambiguate this from other pools = nil scenario
+ log.Infof("Node %v is a windows node so won't be managed by MCO", node.Name)
+ return nil, nil
+ }
+
+ var pools []*mcfgv1.MachineConfigPool
+ for _, p := range clusterPools {
+ selector, err := metav1.LabelSelectorAsSelector(p.Spec.NodeSelector)
+ if err != nil {
+ return nil, fmt.Errorf("invalid label selector: %v", err)
+ }
+
+ // If a pool with a nil or empty selector creeps in, it should match nothing, not everything.
+ if selector.Empty() || !selector.Matches(labels.Set(node.Labels)) {
+ continue
+ }
+
+ pools = append(pools, p)
+ }
+
+ if len(pools) == 0 {
+ // This is not an error, as there might be nodes in cluster that are not managed by machineconfigpool.
+ return nil, nil
+ }
+
+ var master, worker *mcfgv1.MachineConfigPool
+ var custom []*mcfgv1.MachineConfigPool
+ for _, pool := range pools {
+ if pool.Name == "master" {
+ master = pool
+ } else if pool.Name == "worker" {
+ worker = pool
+ } else {
+ custom = append(custom, pool)
+ }
+ }
+
+ if len(custom) > 1 {
+ return nil, fmt.Errorf("node %s belongs to %d custom roles, cannot proceed with this Node", node.Name, len(custom))
+ } else if len(custom) == 1 {
+ // We don't support making custom pools for masters
+ if master != nil {
+ return nil, fmt.Errorf("node %s has both master role and custom role %s", node.Name, custom[0].Name)
+ }
+ // One custom role, let's use its pool
+ pls := []*mcfgv1.MachineConfigPool{custom[0]}
+ if worker != nil {
+ pls = append(pls, worker)
+ }
+ return pls, nil
+ } else if master != nil {
+ // In the case where a node is both master/worker, have it live under
+ // the master pool. This occurs in CodeReadyContainers and general
+ // "single node" deployments, which one may want to do for testing bare
+ // metal, etc.
+ return []*mcfgv1.MachineConfigPool{master}, nil
+ }
+
+ // Otherwise, it's a worker with no custom roles.
+ return []*mcfgv1.MachineConfigPool{worker}, nil
+}
+
+// isWindows checks if given node is a Windows node or a Linux node
+func isWindows(node *corev1.Node) bool {
+ windowsOsValue := "windows"
+ if value, ok := node.ObjectMeta.Labels["kubernetes.io/os"]; ok {
+ if value == windowsOsValue {
+ return true
+ }
+ return false
+ }
+ // All the nodes should have a OS label populated by kubelet, if not just to maintain
+ // backwards compatibility, we can returning true here.
+ return false
+}
diff --git a/pkg/pao/profilecreator/profilecreator.go b/pkg/pao/profilecreator/profilecreator.go
new file mode 100644
index 000000000..dd556d69e
--- /dev/null
+++ b/pkg/pao/profilecreator/profilecreator.go
@@ -0,0 +1,556 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Copyright 2021 Red Hat, Inc.
+ */
+
+package profilecreator
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/jaypipes/ghw"
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/topology"
+ log "github.com/sirupsen/logrus"
+
+ k8syaml "k8s.io/apimachinery/pkg/util/yaml"
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+
+ machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ v1 "k8s.io/api/core/v1"
+)
+
+const (
+ // ClusterScopedResources defines the subpath, relative to the top-level must-gather directory.
+ // A top-level must-gather directory is of the following format:
+ // must-gather-dir/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-
+ // Here we find the cluster-scoped definitions saved by must-gather
+ ClusterScopedResources = "cluster-scoped-resources"
+ // CoreNodes defines the subpath, relative to ClusterScopedResources, on which we find node-specific data
+ CoreNodes = "core/nodes"
+ // MCPools defines the subpath, relative to ClusterScopedResources, on which we find the machine config pool definitions
+ MCPools = "machineconfiguration.openshift.io/machineconfigpools"
+ // YAMLSuffix is the extension of the yaml files saved by must-gather
+ YAMLSuffix = ".yaml"
+ // Nodes defines the subpath, relative to top-level must-gather directory, on which we find node-specific data
+ Nodes = "nodes"
+ // SysInfoFileName defines the name of the file where ghw snapshot is stored
+ SysInfoFileName = "sysinfo.tgz"
+ // noSMTKernelArg is the kernel arg value to disable SMT in a system
+ noSMTKernelArg = "nosmt"
+ // allCores correspond to the value when all the processorCores need to be added to the generated CPUset
+ allCores = -1
+)
+
+var (
+ // ValidPowerConsumptionModes are a set of valid power consumption modes
+ // default => no args
+ // low-latency => "nmi_watchdog=0", "audit=0", "mce=off"
+ // ultra-low-latency: low-latency values + "processor.max_cstate=1", "intel_idle.max_cstate=0", "idle=poll"
+ // For more information on CPU "C-states" please refer to https://gist.github.com/wmealing/2dd2b543c4d3cff6cab7
+ ValidPowerConsumptionModes = []string{"default", "low-latency", "ultra-low-latency"}
+ lowLatencyKernelArgs = map[string]bool{"nmi_watchdog=0": true, "audit=0": true, "mce=off": true}
+ ultraLowLatencyKernelArgs = map[string]bool{"processor.max_cstate=1": true, "intel_idle.max_cstate=0": true, "idle=poll": true}
+)
+
+func getMustGatherFullPathsWithFilter(mustGatherPath string, suffix string, filter string) (string, error) {
+ var paths []string
+
+ // don't assume directory names, only look for the suffix, filter out files having "filter" in their names
+ err := filepath.Walk(mustGatherPath, func(path string, info os.FileInfo, err error) error {
+ if strings.HasSuffix(path, suffix) {
+ if len(filter) == 0 || !strings.Contains(path, filter) {
+ paths = append(paths, path)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return "", fmt.Errorf("failed to get the path mustGatherPath:%s, suffix:%s %v", mustGatherPath, suffix, err)
+ }
+
+ if len(paths) == 0 {
+ return "", fmt.Errorf("no match for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix)
+
+ }
+ if len(paths) > 1 {
+ log.Infof("Multiple matches for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix)
+ return "", fmt.Errorf("Multiple matches for the specified must gather directory path: %s and suffix: %s.\n Expected only one performance-addon-operator-must-gather* directory, please check the must-gather tarball", mustGatherPath, suffix)
+ }
+ // returning one possible path
+ return paths[0], err
+}
+
+func getMustGatherFullPaths(mustGatherPath string, suffix string) (string, error) {
+ return getMustGatherFullPathsWithFilter(mustGatherPath, suffix, "")
+}
+
+func getNode(mustGatherDirPath, nodeName string) (*v1.Node, error) {
+ var node v1.Node
+ nodePathSuffix := path.Join(ClusterScopedResources, CoreNodes, nodeName)
+ path, err := getMustGatherFullPaths(mustGatherDirPath, nodePathSuffix)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get MachineConfigPool for %s: %v", nodeName, err)
+ }
+
+ src, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open %q: %v", path, err)
+ }
+ defer src.Close()
+
+ dec := k8syaml.NewYAMLOrJSONDecoder(src, 1024)
+ if err := dec.Decode(&node); err != nil {
+ return nil, fmt.Errorf("failed to decode %q: %v", path, err)
+ }
+ return &node, nil
+}
+
+// GetNodeList returns the list of nodes using the Node YAMLs stored in Must Gather
+func GetNodeList(mustGatherDirPath string) ([]*v1.Node, error) {
+ machines := make([]*v1.Node, 0)
+
+ nodePathSuffix := path.Join(ClusterScopedResources, CoreNodes)
+ nodePath, err := getMustGatherFullPaths(mustGatherDirPath, nodePathSuffix)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get Nodes from must gather directory: %v", err)
+ }
+ if nodePath == "" {
+ return nil, fmt.Errorf("failed to get Nodes from must gather directory: %v", err)
+ }
+
+ nodes, err := ioutil.ReadDir(nodePath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list mustGatherPath directories: %v", err)
+ }
+ for _, node := range nodes {
+ nodeName := node.Name()
+ node, err := getNode(mustGatherDirPath, nodeName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get Nodes %s: %v", nodeName, err)
+ }
+ machines = append(machines, node)
+ }
+ return machines, nil
+}
+
+// GetMCPList returns the list of MCPs using the mcp YAMLs stored in Must Gather
+func GetMCPList(mustGatherDirPath string) ([]*machineconfigv1.MachineConfigPool, error) {
+ pools := make([]*machineconfigv1.MachineConfigPool, 0)
+
+ mcpPathSuffix := path.Join(ClusterScopedResources, MCPools)
+ mcpPath, err := getMustGatherFullPaths(mustGatherDirPath, mcpPathSuffix)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get MCPs: %v", err)
+ }
+ if mcpPath == "" {
+ return nil, fmt.Errorf("failed to get MCPs path: %v", err)
+ }
+
+ mcpFiles, err := ioutil.ReadDir(mcpPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list mustGatherPath directories: %v", err)
+ }
+ for _, mcp := range mcpFiles {
+ mcpName := strings.TrimSuffix(mcp.Name(), filepath.Ext(mcp.Name()))
+
+ mcp, err := GetMCP(mustGatherDirPath, mcpName)
+ // master pool relevant only when pods can be scheduled on masters, e.g. SNO
+ if mcpName != "master" && err != nil {
+ return nil, fmt.Errorf("can't obtain MCP %s: %v", mcpName, err)
+ }
+ pools = append(pools, mcp)
+ }
+ return pools, nil
+}
+
+// GetMCP returns an MCP object corresponding to a specified MCP Name
+func GetMCP(mustGatherDirPath, mcpName string) (*machineconfigv1.MachineConfigPool, error) {
+ var mcp machineconfigv1.MachineConfigPool
+
+ mcpPathSuffix := path.Join(ClusterScopedResources, MCPools, mcpName+YAMLSuffix)
+ mcpPath, err := getMustGatherFullPaths(mustGatherDirPath, mcpPathSuffix)
+ if err != nil {
+ return nil, fmt.Errorf("failed to obtain MachineConfigPool %s: %v", mcpName, err)
+ }
+ if mcpPath == "" {
+ return nil, fmt.Errorf("failed to obtain MachineConfigPool, mcp:%s does not exist: %v", mcpName, err)
+ }
+
+ src, err := os.Open(mcpPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open %q: %v", mcpPath, err)
+ }
+ defer src.Close()
+ dec := k8syaml.NewYAMLOrJSONDecoder(src, 1024)
+ if err := dec.Decode(&mcp); err != nil {
+ return nil, fmt.Errorf("failed to decode %q: %v", mcpPath, err)
+ }
+ return &mcp, nil
+}
+
+// NewGHWHandler is a handler to use ghw options corresponding to a node
+func NewGHWHandler(mustGatherDirPath string, node *v1.Node) (*GHWHandler, error) {
+ nodeName := node.GetName()
+ nodePathSuffix := path.Join(Nodes)
+ nodepath, err := getMustGatherFullPathsWithFilter(mustGatherDirPath, nodePathSuffix, ClusterScopedResources)
+ if err != nil {
+ return nil, fmt.Errorf("can't obtain the node path %s: %v", nodeName, err)
+ }
+ _, err = os.Stat(path.Join(nodepath, nodeName, SysInfoFileName))
+ if err != nil {
+ return nil, fmt.Errorf("can't obtain the path: %s for node %s: %v", nodeName, nodepath, err)
+ }
+ options := ghw.WithSnapshot(ghw.SnapshotOptions{
+ Path: path.Join(nodepath, nodeName, SysInfoFileName),
+ })
+ ghwHandler := &GHWHandler{snapShotOptions: options, Node: node}
+ return ghwHandler, nil
+}
+
+// GHWHandler is a wrapper around ghw to get the API object
+type GHWHandler struct {
+ snapShotOptions *option.Option
+ Node *v1.Node
+}
+
+// CPU returns a CPUInfo struct that contains information about the CPUs on the host system
+func (ghwHandler GHWHandler) CPU() (*cpu.Info, error) {
+ return ghw.CPU(ghwHandler.snapShotOptions)
+}
+
+// SortedTopology returns a TopologyInfo struct that contains information about the Topology sorted by numa ids and cpu ids on the host system
+func (ghwHandler GHWHandler) SortedTopology() (*topology.Info, error) {
+ topologyInfo, err := ghw.Topology(ghwHandler.snapShotOptions)
+ if err != nil {
+ return nil, fmt.Errorf("can't obtain topology info from GHW snapshot: %v", err)
+ }
+ sort.Slice(topologyInfo.Nodes, func(x, y int) bool {
+ return topologyInfo.Nodes[x].ID < topologyInfo.Nodes[y].ID
+ })
+ for _, node := range topologyInfo.Nodes {
+ for _, core := range node.Cores {
+ sort.Slice(core.LogicalProcessors, func(x, y int) bool {
+ return core.LogicalProcessors[x] < core.LogicalProcessors[y]
+ })
+ }
+ sort.Slice(node.Cores, func(i, j int) bool {
+ return node.Cores[i].LogicalProcessors[0] < node.Cores[j].LogicalProcessors[0]
+ })
+ }
+ return topologyInfo, nil
+}
+
+// topologyHTDisabled returns topologyinfo in case Hyperthreading needs to be disabled.
+// It receives a pointer to Topology.Info and deletes logicalprocessors from individual cores.
+// The behaviour of this function depends on ghw data representation.
+func topologyHTDisabled(info *topology.Info) *topology.Info {
+ disabledHTTopology := &topology.Info{
+ Architecture: info.Architecture,
+ }
+ newNodes := []*topology.Node{}
+ for _, node := range info.Nodes {
+ var newNode *topology.Node
+ cores := []*cpu.ProcessorCore{}
+ for _, processorCore := range node.Cores {
+ newCore := cpu.ProcessorCore{ID: processorCore.ID,
+ Index: processorCore.Index,
+ NumThreads: 1,
+ }
+ // LogicalProcessors is a slice of ints representing the logical processor IDs assigned to
+ // a processing unit for a core. GHW API gurantees that the logicalProcessors correspond
+ // to hyperthread pairs and in the code below we select only the first hyperthread (id=0)
+ // of the available logical processors.
+ for id, logicalProcessor := range processorCore.LogicalProcessors {
+ // Please refer to https://www.kernel.org/doc/Documentation/x86/topology.txt for more information on
+ // x86 hardware topology. This document clarifies the main aspects of x86 topology modelling and
+ // representation in the linux kernel and explains why we select id=0 for obtaining the first
+ // hyperthread (logical core).
+ if id == 0 {
+ newCore.LogicalProcessors = []int{logicalProcessor}
+ cores = append(cores, &newCore)
+ }
+ }
+ newNode = &topology.Node{Cores: cores,
+ ID: node.ID,
+ }
+ }
+ newNodes = append(newNodes, newNode)
+ disabledHTTopology.Nodes = newNodes
+ }
+ return disabledHTTopology
+}
+
+// GetReservedAndIsolatedCPUs returns Reserved and Isolated CPUs
+func (ghwHandler GHWHandler) GetReservedAndIsolatedCPUs(reservedCPUCount int, splitReservedCPUsAcrossNUMA bool, disableHTFlag bool) (cpuset.CPUSet, cpuset.CPUSet, error) {
+ cpuInfo, err := ghwHandler.CPU()
+ if err != nil {
+ return cpuset.CPUSet{}, cpuset.CPUSet{}, fmt.Errorf("can't obtain CPU info from GHW snapshot: %v", err)
+ }
+
+ if reservedCPUCount <= 0 || reservedCPUCount >= int(cpuInfo.TotalThreads) {
+ return cpuset.CPUSet{}, cpuset.CPUSet{}, fmt.Errorf("please specify the reserved CPU count in the range [1,%d]", cpuInfo.TotalThreads-1)
+ }
+ topologyInfo, err := ghwHandler.SortedTopology()
+ if err != nil {
+ return cpuset.CPUSet{}, cpuset.CPUSet{}, fmt.Errorf("can't obtain Topology Info from GHW snapshot: %v", err)
+ }
+ htEnabled, err := ghwHandler.IsHyperthreadingEnabled()
+ if err != nil {
+ return cpuset.CPUSet{}, cpuset.CPUSet{}, fmt.Errorf("can't determine if Hyperthreading is enabled or not: %v", err)
+ }
+ //currently HT is enabled on the system and the user wants to disable HT
+ if htEnabled && disableHTFlag {
+ htEnabled = false
+ log.Infof("Currently hyperthreading is enabled and the performance profile will disable it")
+ topologyInfo = topologyHTDisabled(topologyInfo)
+
+ }
+ log.Infof("NUMA cell(s): %d", len(topologyInfo.Nodes))
+ totalCPUs := 0
+ for id, node := range topologyInfo.Nodes {
+ coreList := []int{}
+ for _, core := range node.Cores {
+ coreList = append(coreList, core.LogicalProcessors...)
+ }
+ log.Infof("NUMA cell %d : %v", id, coreList)
+ totalCPUs += len(coreList)
+ }
+
+ log.Infof("CPU(s): %d", totalCPUs)
+
+ if splitReservedCPUsAcrossNUMA {
+ return ghwHandler.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfo.Nodes)
+ }
+ return ghwHandler.getCPUsSequentially(reservedCPUCount, htEnabled, topologyInfo.Nodes)
+}
+
+type cpuAccumulator struct {
+ builder *cpuset.Builder
+ count int
+}
+
+func newCPUAccumulator() *cpuAccumulator {
+ return &cpuAccumulator{
+ builder: cpuset.NewBuilder(),
+ }
+}
+
+// AddCores adds logical cores from the slice of *cpu.ProcessorCore to a CPUset till the cpuset size is equal to the max value specified
+// In case the max is specified as allCores, all the cores from the slice of *cpu.ProcessorCore are added to the CPUSet
+func (ca *cpuAccumulator) AddCores(max int, cores []*cpu.ProcessorCore) {
+ for _, processorCore := range cores {
+ for _, core := range processorCore.LogicalProcessors {
+ if ca.count < max || max == allCores {
+ ca.builder.Add(core)
+ ca.count++
+ }
+ }
+ }
+}
+
+func (ca *cpuAccumulator) Result() cpuset.CPUSet {
+ return ca.builder.Result()
+}
+
+// getCPUsSplitAcrossNUMA returns Reserved and Isolated CPUs split across NUMA nodes
+// We identify the right number of CPUs that need to be allocated per NUMA node, meaning reservedPerNuma + (the additional number based on the remainder and the NUMA node)
+// E.g. If the user requests 15 reserved cpus and we have 4 numa nodes, we find reservedPerNuma in this case is 3 and remainder = 3.
+// For each numa node we find a max which keeps track of the cumulative resources that should be allocated for each NUMA node:
+// max = (numaID+1)*reservedPerNuma + (numaNodeNum - remainder)
+// For NUMA node 0 max = (0+1)*3 + 4-3 = 4 remainder is decremented => remainder is 2
+// For NUMA node 1 max = (1+1)*3 + 4-2 = 8 remainder is decremented => remainder is 1
+// For NUMA node 2 max = (2+1)*3 + 4-2 = 12 remainder is decremented => remainder is 0
+// For NUMA Node 3 remainder = 0 so max = 12 + 3 = 15.
+func (ghwHandler GHWHandler) getCPUsSplitAcrossNUMA(reservedCPUCount int, htEnabled bool, topologyInfoNodes []*topology.Node) (cpuset.CPUSet, cpuset.CPUSet, error) {
+ reservedCPUs := newCPUAccumulator()
+ var isolatedCPUSet cpuset.CPUSet
+ numaNodeNum := len(topologyInfoNodes)
+
+ max := 0
+ reservedPerNuma := reservedCPUCount / numaNodeNum
+ remainder := reservedCPUCount % numaNodeNum
+ if remainder != 0 {
+ log.Warnf("The reserved CPUs cannot be split equally across NUMA Nodes")
+ }
+ for numaID, node := range topologyInfoNodes {
+ if remainder != 0 {
+ max = (numaID+1)*reservedPerNuma + (numaNodeNum - remainder)
+ remainder--
+ } else {
+ max = max + reservedPerNuma
+ }
+ if max%2 != 0 && htEnabled {
+ return reservedCPUs.Result(), isolatedCPUSet, fmt.Errorf("can't allocate odd number of CPUs from a NUMA Node")
+ }
+ reservedCPUs.AddCores(max, node.Cores)
+ }
+ totalCPUSet := totalCPUSetFromTopology(topologyInfoNodes)
+ reservedCPUSet := reservedCPUs.Result()
+ isolatedCPUSet = totalCPUSet.Difference(reservedCPUSet)
+ return reservedCPUSet, isolatedCPUSet, nil
+}
+
+// getCPUsSequentially returns Reserved and Isolated CPUs sequentially
+func (ghwHandler GHWHandler) getCPUsSequentially(reservedCPUCount int, htEnabled bool, topologyInfoNodes []*topology.Node) (cpuset.CPUSet, cpuset.CPUSet, error) {
+ reservedCPUs := newCPUAccumulator()
+ var isolatedCPUSet cpuset.CPUSet
+ if reservedCPUCount%2 != 0 && htEnabled {
+ return reservedCPUs.Result(), isolatedCPUSet, fmt.Errorf("can't allocate odd number of CPUs from a NUMA Node")
+ }
+ for _, node := range topologyInfoNodes {
+ reservedCPUs.AddCores(reservedCPUCount, node.Cores)
+ }
+ totalCPUSet := totalCPUSetFromTopology(topologyInfoNodes)
+ reservedCPUSet := reservedCPUs.Result()
+ isolatedCPUSet = totalCPUSet.Difference(reservedCPUSet)
+ return reservedCPUSet, isolatedCPUSet, nil
+}
+
+func totalCPUSetFromTopology(topologyInfoNodes []*topology.Node) cpuset.CPUSet {
+ totalCPUs := newCPUAccumulator()
+ for _, node := range topologyInfoNodes {
+ //all the cores from node.Cores need to be added, hence allCores is specified as the max value
+ totalCPUs.AddCores(allCores, node.Cores)
+ }
+ return totalCPUs.Result()
+}
+
+// IsHyperthreadingEnabled checks if hyperthreading is enabled on the system or not
+func (ghwHandler GHWHandler) IsHyperthreadingEnabled() (bool, error) {
+ cpuInfo, err := ghwHandler.CPU()
+ if err != nil {
+ return false, fmt.Errorf("can't obtain CPU Info from GHW snapshot: %v", err)
+ }
+ // Since there is no way to disable flags per-processor (not system wide) we check the flags of the first available processor.
+ // A following implementation will leverage the /sys/devices/system/cpu/smt/active file which is the "standard" way to query HT.
+ return contains(cpuInfo.Processors[0].Capabilities, "ht"), nil
+}
+
+// contains checks if a string is present in a slice
+func contains(s []string, str string) bool {
+ for _, v := range s {
+ if v == str {
+ return true
+ }
+ }
+ return false
+}
+
+// EnsureNodesHaveTheSameHardware returns an error if all the input nodes do not have the same hardware configuration
+func EnsureNodesHaveTheSameHardware(nodeHandlers []*GHWHandler) error {
+ if len(nodeHandlers) < 1 {
+ return fmt.Errorf("no suitable nodes to compare")
+ }
+
+ firstHandle := nodeHandlers[0]
+ firstTopology, err := firstHandle.SortedTopology()
+ if err != nil {
+ return fmt.Errorf("can't obtain Topology info from GHW snapshot for %s: %v", firstHandle.Node.GetName(), err)
+ }
+
+ for _, handle := range nodeHandlers[1:] {
+ if err != nil {
+ return fmt.Errorf("can't obtain GHW snapshot handle for %s: %v", handle.Node.GetName(), err)
+ }
+
+ topology, err := handle.SortedTopology()
+ if err != nil {
+ return fmt.Errorf("can't obtain Topology info from GHW snapshot for %s: %v", handle.Node.GetName(), err)
+ }
+ err = ensureSameTopology(firstTopology, topology)
+ if err != nil {
+ return fmt.Errorf("nodes %s and %s have different topology: %v", firstHandle.Node.GetName(), handle.Node.GetName(), err)
+ }
+ }
+
+ return nil
+}
+
+func ensureSameTopology(topology1, topology2 *topology.Info) error {
+ if topology1.Architecture != topology2.Architecture {
+ return fmt.Errorf("the architecture is different: %v vs %v", topology1.Architecture, topology2.Architecture)
+ }
+
+ if len(topology1.Nodes) != len(topology2.Nodes) {
+ return fmt.Errorf("the number of NUMA nodes differ: %v vs %v", len(topology1.Nodes), len(topology2.Nodes))
+ }
+
+ for i, node1 := range topology1.Nodes {
+ node2 := topology2.Nodes[i]
+ if node1.ID != node2.ID {
+ return fmt.Errorf("the NUMA node ids differ: %v vs %v", node1.ID, node2.ID)
+ }
+
+ cores1 := node1.Cores
+ cores2 := node2.Cores
+ if len(cores1) != len(cores2) {
+ return fmt.Errorf("the number of CPU cores in NUMA node %d differ: %v vs %v",
+ node1.ID, len(topology1.Nodes), len(topology2.Nodes))
+ }
+
+ for j, core1 := range cores1 {
+ if !reflect.DeepEqual(core1, cores2[j]) {
+ return fmt.Errorf("the CPU corres differ: %v vs %v", core1, cores2[j])
+ }
+ }
+ }
+
+ return nil
+}
+
+// GetAdditionalKernelArgs returns a set of kernel parameters based on the power mode
+func GetAdditionalKernelArgs(powerMode string, disableHT bool) []string {
+ kernelArgsSet := make(map[string]bool)
+ kernelArgsSlice := make([]string, 0, 6)
+ switch powerMode {
+ //default
+ case ValidPowerConsumptionModes[0]:
+ kernelArgsSlice = []string{}
+ //low-latency
+ case ValidPowerConsumptionModes[1]:
+ for arg, exist := range lowLatencyKernelArgs {
+ kernelArgsSet[arg] = exist
+ }
+ //ultra-low-latency
+ case ValidPowerConsumptionModes[2]:
+ //computing the union for two sets (lowLatencyKernelArgs,ultraLowLatencyKernelArgs)
+ for arg, exist := range lowLatencyKernelArgs {
+ kernelArgsSet[arg] = exist
+ }
+ for arg, exist := range ultraLowLatencyKernelArgs {
+ kernelArgsSet[arg] = exist
+ }
+ }
+
+ for arg, exist := range kernelArgsSet {
+ if exist {
+ kernelArgsSlice = append(kernelArgsSlice, arg)
+ }
+ }
+ if disableHT {
+ kernelArgsSlice = append(kernelArgsSlice, noSMTKernelArg)
+ }
+ sort.Strings(kernelArgsSlice)
+ log.Infof("Additional Kernel Args based on the power consumption mode (%s):%v", powerMode, kernelArgsSlice)
+ return kernelArgsSlice
+}
diff --git a/pkg/pao/profilecreator/profilecreator_suite_test.go b/pkg/pao/profilecreator/profilecreator_suite_test.go
new file mode 100644
index 000000000..861f71249
--- /dev/null
+++ b/pkg/pao/profilecreator/profilecreator_suite_test.go
@@ -0,0 +1,13 @@
+package profilecreator
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestProfileCreator(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Profile Creator Suite")
+}
diff --git a/pkg/pao/profilecreator/profilecreator_test.go b/pkg/pao/profilecreator/profilecreator_test.go
new file mode 100644
index 000000000..8fadc73cd
--- /dev/null
+++ b/pkg/pao/profilecreator/profilecreator_test.go
@@ -0,0 +1,844 @@
+package profilecreator
+
+import (
+ "path/filepath"
+ "sort"
+
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/topology"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+
+ mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ v1 "k8s.io/api/core/v1"
+)
+
+const (
+ mustGatherDirPath = "../../../test/e2e/pao/testdata/must-gather/must-gather.bare-metal"
+ mustGatherSNODirPath = "../../../test/e2e/pao/testdata/must-gather/must-gather.sno"
+)
+
+var _ = Describe("PerformanceProfileCreator: MCP and Node Matching", func() {
+ var nodes []*v1.Node
+ var mcps []*mcfgv1.MachineConfigPool
+
+ BeforeEach(func() {
+ var err error
+
+ nodes, err = GetNodeList(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ mcps, err = GetMCPList(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ Context("Identifying Nodes targeted by MCP", func() {
+ It("should find one machine in cnf-worker MCP", func() {
+ mcp, err := GetMCP(mustGatherDirPath, "worker-cnf")
+ Expect(err).ToNot(HaveOccurred())
+
+ matchedNodes, err := GetNodesForPool(mcp, mcps, nodes)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(matchedNodes).ToNot(BeNil())
+ Expect(len(matchedNodes)).To(Equal(1))
+ Expect(matchedNodes[0].GetName()).To(Equal("worker1"))
+ })
+ It("should find 1 machine in worker MCP", func() {
+ mcp, err := GetMCP(mustGatherDirPath, "worker")
+ Expect(err).ToNot(HaveOccurred())
+
+ matchedNodes, err := GetNodesForPool(mcp, mcps, nodes)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(matchedNodes).ToNot(BeNil())
+ Expect(len(matchedNodes)).To(Equal(1))
+ Expect(matchedNodes[0].GetName()).To(Equal("worker2"))
+ })
+ })
+
+ Context("Ensure the correct MCP selector is used", func() {
+ It("should detect the cnf-worker MCP selector", func() {
+ mcp, err := GetMCP(mustGatherDirPath, "worker-cnf")
+ Expect(err).ToNot(HaveOccurred())
+
+ mcpSelector, err := GetMCPSelector(mcp, mcps)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(mcpSelector)).To(Equal(1))
+
+ for key, value := range mcpSelector {
+ Expect(key).To(Equal("machineconfiguration.openshift.io/role"))
+ Expect(value).To(Equal("worker-cnf"))
+ break
+ }
+ })
+
+ It("should detect the worker MCP selector", func() {
+ mcp, err := GetMCP(mustGatherDirPath, "worker")
+ Expect(err).ToNot(HaveOccurred())
+
+ mcpSelector, err := GetMCPSelector(mcp, mcps)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(mcpSelector)).To(Equal(1))
+
+ for key, value := range mcpSelector {
+ Expect(key).To(Equal("pools.operator.machineconfiguration.openshift.io/worker"))
+ Expect(value).To(Equal(""))
+ break
+ }
+ })
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: MCP and Node Matching in SNO", func() {
+ var nodes []*v1.Node
+ var mcps []*mcfgv1.MachineConfigPool
+
+ BeforeEach(func() {
+ var err error
+
+ nodes, err = GetNodeList(mustGatherSNODirPath)
+ Expect(err).ToNot(HaveOccurred())
+ mcps, err = GetMCPList(mustGatherSNODirPath)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ Context("Identifying Nodes targeted by MCP in SNO", func() {
+ It("should find no nodes in worker MCP", func() {
+ mcp, err := GetMCP(mustGatherSNODirPath, "worker")
+ Expect(err).ToNot(HaveOccurred())
+
+ matchedNodes, err := GetNodesForPool(mcp, mcps, nodes)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(matchedNodes)).To(Equal(0))
+ })
+ It("should find 1 machine in master MCP", func() {
+ mcp, err := GetMCP(mustGatherSNODirPath, "master")
+ Expect(err).ToNot(HaveOccurred())
+
+ matchedNodes, err := GetNodesForPool(mcp, mcps, nodes)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(matchedNodes).ToNot(BeNil())
+ Expect(len(matchedNodes)).To(Equal(1))
+ Expect(matchedNodes[0].GetName()).To(Equal("ocp47sno-master-0.demo.lab"))
+ })
+ })
+
+ Context("Ensure the correct MCP selector is used in SNO", func() {
+ It("should detect the worker MCP selector", func() {
+ mcp, err := GetMCP(mustGatherSNODirPath, "worker")
+ Expect(err).ToNot(HaveOccurred())
+
+ mcpSelector, err := GetMCPSelector(mcp, mcps)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(mcpSelector)).To(Equal(1))
+
+ for key, value := range mcpSelector {
+ Expect(key).To(Equal("pools.operator.machineconfiguration.openshift.io/worker"))
+ Expect(value).To(Equal(""))
+ break
+ }
+ })
+ It("should detect the master MCP selector", func() {
+ mcp, err := GetMCP(mustGatherSNODirPath, "master")
+ Expect(err).ToNot(HaveOccurred())
+
+ mcpSelector, err := GetMCPSelector(mcp, mcps)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(mcpSelector)).To(Equal(1))
+
+ for key, value := range mcpSelector {
+ Expect(key).To(Equal("pools.operator.machineconfiguration.openshift.io/master"))
+ Expect(value).To(Equal(""))
+ break
+ }
+ })
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Getting MCP from Must Gather", func() {
+ var mcpName, mcpNodeSelectorKey, mustGatherDirAbsolutePath string
+ var err error
+ Context("Identifying Nodes targetted by MCP", func() {
+ It("gets the MCP successfully", func() {
+ mcpName = "worker-cnf"
+ mcpNodeSelectorKey = "node-role.kubernetes.io/worker-cnf"
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ mcp, err := GetMCP(mustGatherDirAbsolutePath, mcpName)
+ k, _ := components.GetFirstKeyAndValue(mcp.Spec.NodeSelector.MatchLabels)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(k).To(Equal(mcpNodeSelectorKey))
+ })
+ It("fails to get MCP as an MCP with that name doesn't exist", func() {
+ mcpName = "foo"
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ mcp, err := GetMCP(mustGatherDirAbsolutePath, mcpName)
+ Expect(mcp).To(BeNil())
+ Expect(err).To(HaveOccurred())
+ })
+ It("fails to get MCP due to misconfigured must-gather path", func() {
+ mcpName = "worker-cnf"
+ mustGatherDirAbsolutePath, err = filepath.Abs("foo-path")
+ Expect(err).ToNot(HaveOccurred())
+ _, err := GetMCP(mustGatherDirAbsolutePath, mcpName)
+ Expect(err).To(HaveOccurred())
+ })
+
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Getting Nodes from Must Gather", func() {
+ var mustGatherDirAbsolutePath string
+ var err error
+
+ Context("Identifying Nodes in the cluster", func() {
+ It("gets the Nodes successfully", func() {
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ nodes, err := GetNodeList(mustGatherDirAbsolutePath)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(nodes)).To(Equal(5))
+ })
+ It("fails to get Nodes due to misconfigured must-gather path", func() {
+ mustGatherDirAbsolutePath, err = filepath.Abs("foo-path")
+ _, err := GetNodeList(mustGatherDirAbsolutePath)
+ Expect(err).To(HaveOccurred())
+ })
+
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Consuming GHW Snapshot from Must Gather", func() {
+ var mustGatherDirAbsolutePath string
+ var node *v1.Node
+ var err error
+
+ Context("Identifying Nodes Info of the nodes cluster", func() {
+ It("gets the Nodes Info successfully", func() {
+ node = newTestNode("worker1")
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ cpuInfo, err := handle.CPU()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(cpuInfo.Processors)).To(Equal(2))
+ Expect(int(cpuInfo.TotalCores)).To(Equal(40))
+ Expect(int(cpuInfo.TotalThreads)).To(Equal(80))
+ topologyInfo, err := handle.SortedTopology()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(len(topologyInfo.Nodes)).To(Equal(2))
+ })
+ It("fails to get Nodes Info due to misconfigured must-gather path", func() {
+ mustGatherDirAbsolutePath, err = filepath.Abs("foo-path")
+ _, err := NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).To(HaveOccurred())
+ })
+ It("fails to get Nodes Info for a node that does not exist", func() {
+ node = newTestNode("foo")
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ _, err := NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).To(HaveOccurred())
+ })
+
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Populating Reserved and Isolated CPUs in Performance Profile", func() {
+ var mustGatherDirAbsolutePath string
+ var node *v1.Node
+ var handle *GHWHandler
+ var splitReservedCPUsAcrossNUMA, disableHT bool
+ var reservedCPUCount int
+ var err error
+
+ BeforeEach(func() {
+ node = newTestNode("worker1")
+ })
+ Context("Check if reserved and isolated CPUs are properly populated in the performance profile", func() {
+ It("Ensure reserved CPUs populated are correctly when splitReservedCPUsAcrossNUMA is disabled and disableHT is disabled", func() {
+ reservedCPUCount = 20 // random number, no special meaning
+ splitReservedCPUsAcrossNUMA = false
+ disableHT = false
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ reservedCPUSet, isolatedCPUSet, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(reservedCPUSet.String()).To(Equal("0,2,4,6,8,10,12,14,16,18,40,42,44,46,48,50,52,54,56,58"))
+ Expect(isolatedCPUSet.String()).To(Equal("1,3,5,7,9,11,13,15,17,19-39,41,43,45,47,49,51,53,55,57,59-79"))
+ })
+ It("Ensure reserved CPUs populated are correctly when splitReservedCPUsAcrossNUMA is enabled and disableHT is disabled", func() {
+ reservedCPUCount = 20 // random number, no special meaning
+ splitReservedCPUsAcrossNUMA = true
+ disableHT = false
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ reservedCPUSet, isolatedCPUSet, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(reservedCPUSet.String()).To(Equal("0-9,40-49"))
+ Expect(isolatedCPUSet.String()).To(Equal("10-39,50-79"))
+ })
+ It("Errors out in case negative reservedCPUCount is specified", func() {
+ reservedCPUCount = -2 // random negative number, no special meaning
+ splitReservedCPUsAcrossNUMA = true
+ disableHT = false
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).To(HaveOccurred())
+ })
+ It("Errors out in case specified reservedCPUCount is greater than the total CPUs present in the system and disableHT is disabled", func() {
+ reservedCPUCount = 100 // random positive number greater than that total number of CPUs
+ splitReservedCPUsAcrossNUMA = true
+ disableHT = false
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).To(HaveOccurred())
+ })
+ It("Errors out in case hyperthreading is enabled, splitReservedCPUsAcrossNUMA is enabled, disableHT is disabled and number of reserved CPUs per number of NUMA nodes are odd", func() {
+ reservedCPUCount = 21 // random number which results in a CPU split per NUMA node (11 + 10 in this case) such that odd number of reserved CPUs (11) have to be allocated from a NUMA node
+ splitReservedCPUsAcrossNUMA = true
+ disableHT = false
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).To(HaveOccurred())
+ })
+ It("Errors out in case hyperthreading is enabled, splitReservedCPUsAcrossNUMA is disabled,, disableHT is disabled and number of reserved CPUs are odd", func() {
+ reservedCPUCount = 21 // random number which results in odd number (21) of CPUs to be allocated from a NUMA node
+ splitReservedCPUsAcrossNUMA = false
+ disableHT = false
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).To(HaveOccurred())
+ })
+ It("Ensure reserved CPUs populated are correctly when splitReservedCPUsAcrossNUMA is disabled, disableHT is enabled", func() {
+ reservedCPUCount = 20 // random number, no special meaning
+ splitReservedCPUsAcrossNUMA = false
+ disableHT = true
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ reservedCPUSet, isolatedCPUSet, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(reservedCPUSet.String()).To(Equal("0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38"))
+ Expect(isolatedCPUSet.String()).To(Equal("1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39"))
+ })
+ It("Ensure reserved CPUs populated are correctly when splitReservedCPUsAcrossNUMA is enabled and disableHT is enabled", func() {
+ reservedCPUCount = 20 // random number, no special meaning
+ splitReservedCPUsAcrossNUMA = true
+ disableHT = true
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ reservedCPUSet, isolatedCPUSet, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(reservedCPUSet.String()).To(Equal("0-19"))
+ Expect(isolatedCPUSet.String()).To(Equal("20-39"))
+ })
+ It("Do not error out in case hyperthreading is currently enabled, splitReservedCPUsAcrossNUMA is disabled, disableHT is enabled and number of reserved CPUs allocated from a NUMA node are odd", func() {
+ reservedCPUCount = 11 // random number which results in odd number (11) of CPUs to be allocated from a NUMA node
+ splitReservedCPUsAcrossNUMA = false
+ disableHT = true
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ It("Do not error out in case hyperthreading is currently enabled, splitReservedCPUsAcrossNUMA is enabled, disableHT is enabled and number of reserved CPUs allocated from a NUMA node are odd", func() {
+ reservedCPUCount = 2 // random number which results in odd number (1) of CPUs to be allocated from a NUMA node
+ splitReservedCPUsAcrossNUMA = true
+ disableHT = true
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ It("Do not error out in case of a system where hyperthreading is not enabled initially, splitReservedCPUsAcrossNUMA is disabled, disableHT is enabled and number of reserved CPUs allocated are odd", func() {
+ node = newTestNode("ocp47sno-master-0.demo.lab")
+ reservedCPUCount = 3 // random number which results in odd number (3) of CPUs to be allocated
+ splitReservedCPUsAcrossNUMA = false
+ disableHT = true
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherSNODirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Check if Hyperthreading enabled/disabled in a system to correctly populate reserved and isolated CPUs in the performance profile", func() {
+ var mustGatherDirAbsolutePath string
+ var node *v1.Node
+ var handle *GHWHandler
+ var err error
+
+ Context("Check if hyperthreading is enabled on the system or not", func() {
+ It("Ensure we detect correctly that hyperthreading is enabled on a system", func() {
+ node = newTestNode("worker1")
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ htEnabled, err := handle.IsHyperthreadingEnabled()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(htEnabled).To(Equal(true))
+ })
+ It("Ensure we detect correctly that hyperthreading is disabled on a system", func() {
+ node = newTestNode("worker2")
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ htEnabled, err := handle.IsHyperthreadingEnabled()
+ Expect(err).ToNot(HaveOccurred())
+ Expect(htEnabled).To(Equal(false))
+ })
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Test Helper Functions getCPUsSplitAcrossNUMA and getCPUsSequentially", func() {
+ var mustGatherDirAbsolutePath string
+ var node *v1.Node
+ var handle *GHWHandler
+ var reservedCPUCount int
+ var topologyInfoNodes, htDisabledTopologyInfoNodes []*topology.Node
+ var htEnabled bool
+ var err error
+
+ BeforeEach(func() {
+ node = newTestNode("worker1")
+ topologyInfoNodes = []*topology.Node{
+ {
+ ID: 0,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{0, 40}},
+ {ID: 4, Index: 6, NumThreads: 2, LogicalProcessors: []int{2, 42}},
+ {ID: 1, Index: 17, NumThreads: 2, LogicalProcessors: []int{4, 44}},
+ {ID: 3, Index: 18, NumThreads: 2, LogicalProcessors: []int{6, 46}},
+ {ID: 2, Index: 19, NumThreads: 2, LogicalProcessors: []int{8, 48}},
+ {ID: 12, Index: 1, NumThreads: 2, LogicalProcessors: []int{10, 50}},
+ {ID: 8, Index: 2, NumThreads: 2, LogicalProcessors: []int{12, 52}},
+ {ID: 11, Index: 3, NumThreads: 2, LogicalProcessors: []int{14, 54}},
+ {ID: 9, Index: 4, NumThreads: 2, LogicalProcessors: []int{16, 56}},
+ {ID: 10, Index: 5, NumThreads: 2, LogicalProcessors: []int{18, 58}},
+ {ID: 16, Index: 7, NumThreads: 2, LogicalProcessors: []int{20, 60}},
+ {ID: 20, Index: 8, NumThreads: 2, LogicalProcessors: []int{22, 62}},
+ {ID: 17, Index: 9, NumThreads: 2, LogicalProcessors: []int{24, 64}},
+ {ID: 19, Index: 10, NumThreads: 2, LogicalProcessors: []int{26, 66}},
+ {ID: 18, Index: 11, NumThreads: 2, LogicalProcessors: []int{28, 68}},
+ {ID: 28, Index: 12, NumThreads: 2, LogicalProcessors: []int{30, 70}},
+ {ID: 24, Index: 13, NumThreads: 2, LogicalProcessors: []int{32, 72}},
+ {ID: 27, Index: 14, NumThreads: 2, LogicalProcessors: []int{34, 74}},
+ {ID: 25, Index: 15, NumThreads: 2, LogicalProcessors: []int{36, 76}},
+ {ID: 26, Index: 16, NumThreads: 2, LogicalProcessors: []int{38, 78}},
+ },
+ },
+ {
+ ID: 1,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{1, 41}},
+ {ID: 4, Index: 11, NumThreads: 2, LogicalProcessors: []int{3, 43}},
+ {ID: 1, Index: 17, NumThreads: 2, LogicalProcessors: []int{5, 45}},
+ {ID: 3, Index: 18, NumThreads: 2, LogicalProcessors: []int{7, 47}},
+ {ID: 2, Index: 19, NumThreads: 2, LogicalProcessors: []int{9, 49}},
+ {ID: 12, Index: 1, NumThreads: 2, LogicalProcessors: []int{11, 51}},
+ {ID: 8, Index: 2, NumThreads: 2, LogicalProcessors: []int{13, 53}},
+ {ID: 11, Index: 3, NumThreads: 2, LogicalProcessors: []int{15, 55}},
+ {ID: 9, Index: 4, NumThreads: 2, LogicalProcessors: []int{17, 57}},
+ {ID: 10, Index: 5, NumThreads: 2, LogicalProcessors: []int{19, 59}},
+ {ID: 16, Index: 6, NumThreads: 2, LogicalProcessors: []int{21, 61}},
+ {ID: 20, Index: 7, NumThreads: 2, LogicalProcessors: []int{23, 63}},
+ {ID: 17, Index: 8, NumThreads: 2, LogicalProcessors: []int{25, 65}},
+ {ID: 19, Index: 9, NumThreads: 2, LogicalProcessors: []int{27, 67}},
+ {ID: 18, Index: 10, NumThreads: 2, LogicalProcessors: []int{29, 69}},
+ {ID: 28, Index: 12, NumThreads: 2, LogicalProcessors: []int{31, 71}},
+ {ID: 24, Index: 13, NumThreads: 2, LogicalProcessors: []int{33, 73}},
+ {ID: 27, Index: 14, NumThreads: 2, LogicalProcessors: []int{35, 75}},
+ {ID: 25, Index: 15, NumThreads: 2, LogicalProcessors: []int{37, 77}},
+ {ID: 26, Index: 16, NumThreads: 2, LogicalProcessors: []int{39, 79}},
+ },
+ },
+ }
+
+ htDisabledTopologyInfoNodes = []*topology.Node{
+ {
+ ID: 0,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 0, Index: 0, NumThreads: 1, LogicalProcessors: []int{0}},
+ {ID: 4, Index: 6, NumThreads: 1, LogicalProcessors: []int{2}},
+ {ID: 1, Index: 17, NumThreads: 1, LogicalProcessors: []int{4}},
+ {ID: 3, Index: 18, NumThreads: 1, LogicalProcessors: []int{6}},
+ {ID: 2, Index: 19, NumThreads: 1, LogicalProcessors: []int{8}},
+ {ID: 12, Index: 1, NumThreads: 1, LogicalProcessors: []int{10}},
+ {ID: 8, Index: 2, NumThreads: 1, LogicalProcessors: []int{12}},
+ {ID: 11, Index: 3, NumThreads: 1, LogicalProcessors: []int{14}},
+ {ID: 9, Index: 4, NumThreads: 1, LogicalProcessors: []int{16}},
+ {ID: 10, Index: 5, NumThreads: 1, LogicalProcessors: []int{18}},
+ {ID: 16, Index: 7, NumThreads: 1, LogicalProcessors: []int{20}},
+ {ID: 20, Index: 8, NumThreads: 1, LogicalProcessors: []int{22}},
+ {ID: 17, Index: 9, NumThreads: 1, LogicalProcessors: []int{24}},
+ {ID: 19, Index: 10, NumThreads: 1, LogicalProcessors: []int{26}},
+ {ID: 18, Index: 11, NumThreads: 1, LogicalProcessors: []int{28}},
+ {ID: 28, Index: 12, NumThreads: 1, LogicalProcessors: []int{30}},
+ {ID: 24, Index: 13, NumThreads: 1, LogicalProcessors: []int{32}},
+ {ID: 27, Index: 14, NumThreads: 1, LogicalProcessors: []int{34}},
+ {ID: 25, Index: 15, NumThreads: 1, LogicalProcessors: []int{36}},
+ {ID: 26, Index: 16, NumThreads: 1, LogicalProcessors: []int{38}},
+ },
+ },
+ {
+ ID: 1,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 0, Index: 0, NumThreads: 1, LogicalProcessors: []int{1}},
+ {ID: 4, Index: 11, NumThreads: 1, LogicalProcessors: []int{3}},
+ {ID: 1, Index: 17, NumThreads: 1, LogicalProcessors: []int{5}},
+ {ID: 3, Index: 18, NumThreads: 1, LogicalProcessors: []int{7}},
+ {ID: 2, Index: 19, NumThreads: 1, LogicalProcessors: []int{9}},
+ {ID: 12, Index: 1, NumThreads: 1, LogicalProcessors: []int{11}},
+ {ID: 8, Index: 2, NumThreads: 1, LogicalProcessors: []int{13}},
+ {ID: 11, Index: 3, NumThreads: 1, LogicalProcessors: []int{15}},
+ {ID: 9, Index: 4, NumThreads: 1, LogicalProcessors: []int{17}},
+ {ID: 10, Index: 5, NumThreads: 1, LogicalProcessors: []int{19}},
+ {ID: 16, Index: 6, NumThreads: 1, LogicalProcessors: []int{21}},
+ {ID: 20, Index: 7, NumThreads: 1, LogicalProcessors: []int{23}},
+ {ID: 17, Index: 8, NumThreads: 1, LogicalProcessors: []int{25}},
+ {ID: 19, Index: 9, NumThreads: 1, LogicalProcessors: []int{27}},
+ {ID: 18, Index: 10, NumThreads: 1, LogicalProcessors: []int{29}},
+ {ID: 28, Index: 12, NumThreads: 1, LogicalProcessors: []int{31}},
+ {ID: 24, Index: 13, NumThreads: 1, LogicalProcessors: []int{33}},
+ {ID: 27, Index: 14, NumThreads: 1, LogicalProcessors: []int{35}},
+ {ID: 25, Index: 15, NumThreads: 1, LogicalProcessors: []int{37}},
+ {ID: 26, Index: 16, NumThreads: 1, LogicalProcessors: []int{39}},
+ },
+ },
+ }
+ })
+ Context("Check if getCPUsSplitAcrossNUMA and getCPUsSequentially are working correctly and reserved and isolated CPUs are properly populated in the performance profile", func() {
+ It("Ensure reserved and isolated CPUs populated are correctly by getCPUsSplitAcrossNUMAwhen when splitReservedCPUsAcrossNUMA is enabled", func() {
+ reservedCPUCount = 20 // random number, no special meaning
+ htEnabled = true
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ reservedCPUSet, isolatedCPUSet, err := handle.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfoNodes)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(reservedCPUSet.String()).To(Equal("0-9,40-49"))
+ Expect(isolatedCPUSet.String()).To(Equal("10-39,50-79"))
+ })
+ It("Ensure reserved and isolated CPUs populated are correctly by getCPUsSplitAcrossNUMAwhen when splitReservedCPUsAcrossNUMA is enabled and htEnabled is disabled ", func() {
+ reservedCPUCount = 20 // random number, no special meaning
+ htEnabled = false
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ reservedCPUSet, isolatedCPUSet, err := handle.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, htDisabledTopologyInfoNodes)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(reservedCPUSet.String()).To(Equal("0-19"))
+ Expect(isolatedCPUSet.String()).To(Equal("20-39"))
+ })
+ It("Errors out in case hyperthreading is enabled, splitReservedCPUsAcrossNUMA is enabled, htEnabled is enabled and the number of reserved CPUs per number of NUMA nodes are odd", func() {
+ reservedCPUCount = 21 // random number which results in a CPU split per NUMA node (11 + 10 in this case) such that odd number of reserved CPUs (11) have to be allocated from a NUMA node
+ htEnabled = true
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfoNodes)
+ Expect(err).To(HaveOccurred())
+ })
+ It("Works without error in case hyperthreading is disabled, splitReservedCPUsAcrossNUMA is enabled, htEnabled is disabled and number of reserved CPUs per number of NUMA nodes are odd", func() {
+ reservedCPUCount = 11 // random number which results in a CPU split per NUMA node (5 + 6 in this case) such that odd number of reserved CPUs (5) have to be allocated from a NUMA node
+ htEnabled = false
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfoNodes)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ It("Ensure reserved and isolated CPUs populated are correctly by getCPUsSequentially when splitReservedCPUsAcrossNUMA is disabled", func() {
+ reservedCPUCount = 20 // random number, no special meaning
+ htEnabled = true
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ reservedCPUSet, isolatedCPUSet, err := handle.getCPUsSequentially(reservedCPUCount, htEnabled, topologyInfoNodes)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(reservedCPUSet.String()).To(Equal("0,2,4,6,8,10,12,14,16,18,40,42,44,46,48,50,52,54,56,58"))
+ Expect(isolatedCPUSet.String()).To(Equal("1,3,5,7,9,11,13,15,17,19-39,41,43,45,47,49,51,53,55,57,59-79"))
+ })
+ It("Errors out in case hyperthreading is enabled, splitReservedCPUsAcrossNUMA is disabled and number of reserved CPUs are odd", func() {
+ reservedCPUCount = 21 // random number which results in odd number (21) of CPUs to be allocated from a NUMA node
+ htEnabled = true
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.getCPUsSequentially(reservedCPUCount, htEnabled, topologyInfoNodes)
+ Expect(err).To(HaveOccurred())
+ })
+ It("Works without error in case hyperthreading is disabled, splitReservedCPUsAcrossNUMA is disabled and number of reserved CPUs are odd", func() {
+ reservedCPUCount = 11 // random number which results in odd number (11) of CPUs to be allocated from a NUMA node
+ htEnabled = false
+ mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+ handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node)
+ Expect(err).ToNot(HaveOccurred())
+ _, _, err := handle.getCPUsSequentially(reservedCPUCount, htEnabled, topologyInfoNodes)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Ensuring Nodes hardware equality", func() {
+ Context("Testing matching nodes with the same hardware ", func() {
+ It("should pass hardware equality test", func() {
+ mustGatherDirAbsolutePath, err := filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+
+ node1, err := getNode(mustGatherDirAbsolutePath, "worker1.yaml")
+ Expect(err).ToNot(HaveOccurred())
+ node1Handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node1)
+ Expect(err).ToNot(HaveOccurred())
+
+ node2, err := getNode(mustGatherDirAbsolutePath, "worker1.yaml")
+ Expect(err).ToNot(HaveOccurred())
+ node2Handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node2)
+ Expect(err).ToNot(HaveOccurred())
+
+ nodeHandles := []*GHWHandler{node1Handle, node2Handle}
+ err = EnsureNodesHaveTheSameHardware(nodeHandles)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ })
+
+ Context("Testing matching nodes with different hardware ", func() {
+ It("should fail hardware equality test", func() {
+ mustGatherDirAbsolutePath, err := filepath.Abs(mustGatherDirPath)
+ Expect(err).ToNot(HaveOccurred())
+
+ node1, err := getNode(mustGatherDirAbsolutePath, "worker1.yaml")
+ Expect(err).ToNot(HaveOccurred())
+ node1Handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node1)
+ Expect(err).ToNot(HaveOccurred())
+
+ node2, err := getNode(mustGatherDirAbsolutePath, "worker2.yaml")
+ Expect(err).ToNot(HaveOccurred())
+ node2Handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node2)
+ Expect(err).ToNot(HaveOccurred())
+
+ nodeHandles := []*GHWHandler{node1Handle, node2Handle}
+ err = EnsureNodesHaveTheSameHardware(nodeHandles)
+ Expect(err).To(HaveOccurred())
+ })
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Test Helper Function ensureSameTopology", func() {
+ var nodes2 []*topology.Node
+ var topology2 topology.Info
+
+ nodes1 := []*topology.Node{
+ {
+ ID: 0,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{0, 1}},
+ {ID: 1, Index: 1, NumThreads: 2, LogicalProcessors: []int{2, 3}},
+ },
+ },
+ {
+ ID: 1,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 2, Index: 2, NumThreads: 2, LogicalProcessors: []int{4, 5}},
+ {ID: 3, Index: 3, NumThreads: 2, LogicalProcessors: []int{6, 7}},
+ },
+ },
+ }
+ topology1 := topology.Info{
+ Architecture: topology.ARCHITECTURE_NUMA,
+ Nodes: nodes1,
+ }
+
+ BeforeEach(func() {
+ nodes2 = []*topology.Node{
+ {
+ ID: 0,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{0, 1}},
+ {ID: 1, Index: 1, NumThreads: 2, LogicalProcessors: []int{2, 3}},
+ },
+ },
+ {
+ ID: 1,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 2, Index: 2, NumThreads: 2, LogicalProcessors: []int{4, 5}},
+ {ID: 3, Index: 3, NumThreads: 2, LogicalProcessors: []int{6, 7}},
+ },
+ },
+ }
+ topology2 = topology.Info{
+ Architecture: topology.ARCHITECTURE_NUMA,
+ Nodes: nodes2,
+ }
+ })
+
+ Context("Check if ensureSameTopology is working correctly", func() {
+ It("nodes with similar topology should not return error", func() {
+ err := ensureSameTopology(&topology1, &topology2)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ It("nodes with different architecture should return error", func() {
+ topology2.Architecture = topology.ARCHITECTURE_SMP
+ err := ensureSameTopology(&topology1, &topology2)
+ Expect(err).To(HaveOccurred())
+ })
+ It("nodes with different number of NUMA nodes should return error", func() {
+ topology2.Nodes = topology2.Nodes[1:]
+ err := ensureSameTopology(&topology1, &topology2)
+ Expect(err).To(HaveOccurred())
+ })
+ It("nodes with different number threads per core should return error", func() {
+ topology2.Nodes[1].Cores[1].NumThreads = 1
+ err := ensureSameTopology(&topology1, &topology2)
+ Expect(err).To(HaveOccurred())
+ })
+ It("nodes with different thread IDs should return error", func() {
+ topology2.Nodes[1].Cores[1].LogicalProcessors[1] = 15
+ err := ensureSameTopology(&topology1, &topology2)
+ Expect(err).To(HaveOccurred())
+ })
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Test Helper Function GetAdditionalKernelArgs", func() {
+ var powerMode string
+ var disableHT bool
+ Context("Ensure kernel args are populated correctly", func() {
+ It("Ensure kernel args are populated correctly in case of low-latency ", func() {
+ powerMode = "default"
+ disableHT = false
+ kernelArgs := GetAdditionalKernelArgs(powerMode, disableHT)
+ Expect(kernelArgs).To(BeEquivalentTo([]string{}))
+ })
+
+ })
+ Context("Ensure kernel args are populated correctly", func() {
+ It("Ensure kernel args are populated correctly in case of low-latency ", func() {
+ powerMode = "low-latency"
+ disableHT = false
+ args := []string{"audit=0",
+ "mce=off",
+ "nmi_watchdog=0",
+ }
+ kernelArgs := GetAdditionalKernelArgs(powerMode, disableHT)
+ sort.Strings(kernelArgs) // sort to avoid inequality due to difference in order
+ Expect(kernelArgs).To(BeEquivalentTo(args))
+ })
+
+ })
+ Context("Ensure kernel args are populated correctly", func() {
+ It("Ensure kernel args are populated correctly in case of ultra-low-latency ", func() {
+ powerMode = "ultra-low-latency"
+ disableHT = false
+ args := []string{"audit=0",
+ "idle=poll",
+ "intel_idle.max_cstate=0",
+ "mce=off",
+ "nmi_watchdog=0",
+ "processor.max_cstate=1",
+ }
+ kernelArgs := GetAdditionalKernelArgs(powerMode, disableHT)
+ sort.Strings(kernelArgs) // sort to avoid inequality due to difference in order
+ Expect(kernelArgs).To(BeEquivalentTo(args))
+ })
+
+ })
+ Context("Ensure kernel args are populated correctly", func() {
+ It("Ensure kernel args are populated correctly in case of disableHT=true ", func() {
+ powerMode = "ultra-low-latency"
+ disableHT = true
+ args := []string{"audit=0",
+ "idle=poll",
+ "intel_idle.max_cstate=0",
+ "mce=off",
+ "nmi_watchdog=0",
+ "nosmt",
+ "processor.max_cstate=1",
+ }
+ kernelArgs := GetAdditionalKernelArgs(powerMode, disableHT)
+ sort.Strings(kernelArgs) // sort to avoid inequality due to difference in order
+ Expect(kernelArgs).To(BeEquivalentTo(args))
+ })
+
+ })
+})
+
+var _ = Describe("PerformanceProfileCreator: Test Helper cpuAccumulator", func() {
+ nodes1 := []*topology.Node{
+ {
+ ID: 0,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{0, 1}},
+ {ID: 1, Index: 1, NumThreads: 2, LogicalProcessors: []int{2, 3}},
+ },
+ },
+ {
+ ID: 1,
+ Cores: []*cpu.ProcessorCore{
+ {ID: 2, Index: 2, NumThreads: 2, LogicalProcessors: []int{4, 5}},
+ {ID: 3, Index: 3, NumThreads: 2, LogicalProcessors: []int{6, 7}},
+ },
+ },
+ }
+ topology1 := topology.Info{
+ Architecture: topology.ARCHITECTURE_NUMA,
+ Nodes: nodes1,
+ }
+
+ Context("Check if cpuAccumulator is working correctly", func() {
+ It("should accumulate allCores", func() {
+ acc := newCPUAccumulator()
+ for _, node := range topology1.Nodes {
+ acc.AddCores(allCores, node.Cores)
+ }
+ cores := acc.Result().ToSlice()
+ Expect(cores).Should(Equal([]int{0, 1, 2, 3, 4, 5, 6, 7}))
+ })
+ It("should accumulate cores up to the max", func() {
+ acc := newCPUAccumulator()
+ for _, node := range topology1.Nodes {
+ acc.AddCores(3, node.Cores)
+ }
+ cores := acc.Result().ToSlice()
+ Expect(cores).Should(Equal([]int{0, 1, 2}))
+ })
+
+ })
+})
diff --git a/pkg/pao/utils/testing/testing.go b/pkg/pao/utils/testing/testing.go
new file mode 100644
index 000000000..461ee6987
--- /dev/null
+++ b/pkg/pao/utils/testing/testing.go
@@ -0,0 +1,100 @@
+package testing
+
+import (
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/pointer"
+)
+
+const (
+ // HugePageSize defines the huge page size used for tests
+ HugePageSize = performancev2.HugePageSize("1G")
+ // HugePagesCount defines the huge page count used for tests
+ HugePagesCount = 4
+ // IsolatedCPUs defines the isolated CPU set used for tests
+ IsolatedCPUs = performancev2.CPUSet("4-7")
+ // ReservedCPUs defines the reserved CPU set used for tests
+ ReservedCPUs = performancev2.CPUSet("0-3")
+ // SingleNUMAPolicy defines the topologyManager policy used for tests
+ SingleNUMAPolicy = "single-numa-node"
+
+ //MachineConfigLabelKey defines the MachineConfig label key of the test profile
+ MachineConfigLabelKey = "mcKey"
+ //MachineConfigLabelValue defines the MachineConfig label vlue of the test profile
+ MachineConfigLabelValue = "mcValue"
+ //MachineConfigPoolLabelKey defines the MachineConfigPool label key of the test profile
+ MachineConfigPoolLabelKey = "mcpKey"
+ //MachineConfigPoolLabelValue defines the MachineConfigPool label value of the test profile
+ MachineConfigPoolLabelValue = "mcpValue"
+)
+
+// NewPerformanceProfile returns new performance profile object that used for tests
+func NewPerformanceProfile(name string) *performancev2.PerformanceProfile {
+ size := HugePageSize
+ isolatedCPUs := IsolatedCPUs
+ reservedCPUs := ReservedCPUs
+ numaPolicy := SingleNUMAPolicy
+
+ return &performancev2.PerformanceProfile{
+ TypeMeta: metav1.TypeMeta{Kind: "PerformanceProfile"},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ UID: types.UID("11111111-1111-1111-1111-1111111111111"),
+ },
+ Spec: performancev2.PerformanceProfileSpec{
+ CPU: &performancev2.CPU{
+ Isolated: &isolatedCPUs,
+ Reserved: &reservedCPUs,
+ },
+ HugePages: &performancev2.HugePages{
+ DefaultHugePagesSize: &size,
+ Pages: []performancev2.HugePage{
+ {
+ Count: HugePagesCount,
+ Size: size,
+ },
+ },
+ },
+ RealTimeKernel: &performancev2.RealTimeKernel{
+ Enabled: pointer.BoolPtr(true),
+ },
+ NUMA: &performancev2.NUMA{
+ TopologyPolicy: &numaPolicy,
+ },
+ MachineConfigLabel: map[string]string{
+ MachineConfigLabelKey: MachineConfigLabelValue,
+ },
+ MachineConfigPoolSelector: map[string]string{
+ MachineConfigPoolLabelKey: MachineConfigPoolLabelValue,
+ },
+ NodeSelector: map[string]string{
+ "nodekey": "nodeValue",
+ },
+ },
+ }
+}
+
+// NewProfileMCP returns new MCP used for testing
+func NewProfileMCP() *mcov1.MachineConfigPool {
+ return &mcov1.MachineConfigPool{
+ TypeMeta: metav1.TypeMeta{Kind: "MachineConfigPool"},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ UID: "11111111-1111-1111-1111-1111111111111",
+ Labels: map[string]string{
+ MachineConfigPoolLabelKey: MachineConfigPoolLabelValue,
+ },
+ },
+ Spec: mcov1.MachineConfigPoolSpec{
+ NodeSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"nodekey": "nodeValue"},
+ },
+ MachineConfigSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{MachineConfigLabelKey: MachineConfigLabelValue},
+ },
+ },
+ }
+}
diff --git a/test/e2e/pao/cluster-setup/base/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/base/performance/kustomization.yaml
new file mode 100644
index 000000000..75d1dde24
--- /dev/null
+++ b/test/e2e/pao/cluster-setup/base/performance/kustomization.yaml
@@ -0,0 +1,6 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - machine_config_pool.yaml
+
diff --git a/test/e2e/pao/cluster-setup/base/performance/machine_config_pool.yaml b/test/e2e/pao/cluster-setup/base/performance/machine_config_pool.yaml
new file mode 100644
index 000000000..03a6e18c4
--- /dev/null
+++ b/test/e2e/pao/cluster-setup/base/performance/machine_config_pool.yaml
@@ -0,0 +1,17 @@
+apiVersion: machineconfiguration.openshift.io/v1
+kind: MachineConfigPool
+metadata:
+ name: worker-cnf
+ namespace: openshift-machine-config-operator
+ labels:
+ machineconfiguration.openshift.io/role: worker-cnf
+spec:
+ paused: true
+ machineConfigSelector:
+ matchExpressions:
+ - key: machineconfiguration.openshift.io/role
+ operator: In
+ values: [worker,worker-cnf]
+ nodeSelector:
+ matchLabels:
+ node-role.kubernetes.io/worker-cnf: ""
diff --git a/test/e2e/pao/cluster-setup/ci-cluster/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/ci-cluster/performance/kustomization.yaml
new file mode 100644
index 000000000..87722061e
--- /dev/null
+++ b/test/e2e/pao/cluster-setup/ci-cluster/performance/kustomization.yaml
@@ -0,0 +1,6 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+bases:
+ - ../../base/performance
+
diff --git a/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/kustomization.yaml
new file mode 100644
index 000000000..beac1882f
--- /dev/null
+++ b/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/kustomization.yaml
@@ -0,0 +1,9 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+bases:
+ - ../../mcp-only-cluster/performance
+
+resources:
+ - performance_profile.yaml
+
diff --git a/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/performance_profile.yaml b/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/performance_profile.yaml
new file mode 100644
index 000000000..36d890ef5
--- /dev/null
+++ b/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/performance_profile.yaml
@@ -0,0 +1,27 @@
+apiVersion: performance.openshift.io/v1alpha1
+kind: PerformanceProfile
+metadata:
+ name: ci-upgrade-test
+spec:
+ additionalKernelArgs:
+ - "nmi_watchdog=0"
+ - "audit=0"
+ - "mce=off"
+ - "processor.max_cstate=1"
+ - "idle=poll"
+ - "intel_idle.max_cstate=0"
+ cpu:
+ isolated: "1-3"
+ reserved: "0"
+ hugepages:
+ defaultHugepagesSize: "1G"
+ pages:
+ - size: "1G"
+ count: 1
+ node: 0
+ realTimeKernel:
+ enabled: true
+ numa:
+ topologyPolicy: "single-numa-node"
+ nodeSelector:
+ node-role.kubernetes.io/worker-cnf: ""
diff --git a/test/e2e/pao/cluster-setup/manual-cluster/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/manual-cluster/performance/kustomization.yaml
new file mode 100644
index 000000000..69a20cd13
--- /dev/null
+++ b/test/e2e/pao/cluster-setup/manual-cluster/performance/kustomization.yaml
@@ -0,0 +1,8 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+bases:
+ - ../../base/performance
+
+resources:
+ - performance_profile.yaml
diff --git a/test/e2e/pao/cluster-setup/manual-cluster/performance/performance_profile.yaml b/test/e2e/pao/cluster-setup/manual-cluster/performance/performance_profile.yaml
new file mode 100644
index 000000000..ed6c4e47e
--- /dev/null
+++ b/test/e2e/pao/cluster-setup/manual-cluster/performance/performance_profile.yaml
@@ -0,0 +1,29 @@
+apiVersion: performance.openshift.io/v2
+kind: PerformanceProfile
+metadata:
+ name: manual
+spec:
+ additionalKernelArgs:
+ - "nmi_watchdog=0"
+ - "audit=0"
+ - "mce=off"
+ - "processor.max_cstate=1"
+ - "idle=poll"
+ - "intel_idle.max_cstate=0"
+ cpu:
+ isolated: "1-3"
+ reserved: "0"
+ hugepages:
+ defaultHugepagesSize: "1G"
+ pages:
+ - size: "1G"
+ count: 1
+ node: 0
+ - size: "2M"
+ count: 128
+ realTimeKernel:
+ enabled: true
+ numa:
+ topologyPolicy: "single-numa-node"
+ nodeSelector:
+ node-role.kubernetes.io/worker-cnf: ""
diff --git a/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/kustomization.yaml
new file mode 100644
index 000000000..b07d9d059
--- /dev/null
+++ b/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/kustomization.yaml
@@ -0,0 +1,5 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - machine_config_pool.yaml
diff --git a/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/machine_config_pool.yaml b/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/machine_config_pool.yaml
new file mode 100644
index 000000000..03a6e18c4
--- /dev/null
+++ b/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/machine_config_pool.yaml
@@ -0,0 +1,17 @@
+apiVersion: machineconfiguration.openshift.io/v1
+kind: MachineConfigPool
+metadata:
+ name: worker-cnf
+ namespace: openshift-machine-config-operator
+ labels:
+ machineconfiguration.openshift.io/role: worker-cnf
+spec:
+ paused: true
+ machineConfigSelector:
+ matchExpressions:
+ - key: machineconfiguration.openshift.io/role
+ operator: In
+ values: [worker,worker-cnf]
+ nodeSelector:
+ matchLabels:
+ node-role.kubernetes.io/worker-cnf: ""
diff --git a/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/performance_profile_creator_suite_test.go b/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/performance_profile_creator_suite_test.go
new file mode 100644
index 000000000..314d823d3
--- /dev/null
+++ b/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/performance_profile_creator_suite_test.go
@@ -0,0 +1,22 @@
+package __performance_profile_creator_test
+
+import (
+ "testing"
+
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit"
+ ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestPerformanceProfileCreator(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ rr := []Reporter{}
+ if ginkgo_reporters.Polarion.Run {
+ rr = append(rr, &ginkgo_reporters.Polarion)
+ }
+ rr = append(rr, junit.NewJUnitReporter("performance_profile_creator"))
+ RunSpecsWithDefaultAndCustomReporters(t, "Performance Profile Creator tests", rr)
+}
diff --git a/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/ppc.go b/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/ppc.go
new file mode 100644
index 000000000..1de524feb
--- /dev/null
+++ b/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/ppc.go
@@ -0,0 +1,263 @@
+package __performance_profile_creator
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "path"
+ "path/filepath"
+ "strings"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/ghodss/yaml"
+
+ "github.com/openshift/cluster-node-tuning-operator/cmd/performance-profile-creator/cmd"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+)
+
+const (
+ mustGatherPath = "../../test/e2e/pao/testdata/must-gather"
+ expectedProfilesPath = "../../test/e2e/pao/testdata/ppc-expected-profiles"
+ expectedInfoPath = "../../test/e2e/pao/testdata/ppc-expected-info"
+ ppcPath = "../../build/_output/bin/performance-profile-creator"
+)
+
+var mustGatherFullPath = path.Join(mustGatherPath, "must-gather.bare-metal")
+
+var defaultArgs = []string{
+ "--disable-ht=false",
+ "--mcp-name=worker-cnf",
+ "--rt-kernel=true",
+ "--user-level-networking=false",
+ "--profile-name=Performance",
+ fmt.Sprintf("--must-gather-dir-path=%s", mustGatherFullPath),
+}
+
+var _ = Describe("[rfe_id:OCP-38968][ppc] Performance Profile Creator", func() {
+ It("[test_id:OCP-40940] performance profile creator regression tests", func() {
+ Expect(ppcPath).To(BeAnExistingFile())
+
+ // directory base name => full path
+ mustGatherDirs := getMustGatherDirs(mustGatherPath)
+ // full profile path => arguments the profile was created with
+ expectedProfiles := getExpectedProfiles(expectedProfilesPath, mustGatherDirs)
+
+ for expectedProfilePath, args := range expectedProfiles {
+ cmdArgs := []string{
+ fmt.Sprintf("--disable-ht=%v", args.DisableHT),
+ fmt.Sprintf("--mcp-name=%s", args.MCPName),
+ fmt.Sprintf("--must-gather-dir-path=%s", args.MustGatherDirPath),
+ fmt.Sprintf("--reserved-cpu-count=%d", args.ReservedCPUCount),
+ fmt.Sprintf("--rt-kernel=%v", args.RTKernel),
+ fmt.Sprintf("--split-reserved-cpus-across-numa=%v", args.SplitReservedCPUsAcrossNUMA),
+ }
+
+ if args.UserLevelNetworking != nil {
+ cmdArgs = append(cmdArgs, fmt.Sprintf("--user-level-networking=%v", *args.UserLevelNetworking))
+ }
+
+ // do not pass empty strings for optional args
+ if len(args.ProfileName) > 0 {
+ cmdArgs = append(cmdArgs, fmt.Sprintf("--profile-name=%s", args.ProfileName))
+ }
+ if len(args.PowerConsumptionMode) > 0 {
+ cmdArgs = append(cmdArgs, fmt.Sprintf("--power-consumption-mode=%s", args.PowerConsumptionMode))
+ }
+ if len(args.TMPolicy) > 0 {
+ cmdArgs = append(cmdArgs, fmt.Sprintf("--topology-manager-policy=%s", args.TMPolicy))
+ }
+
+ out, err := testutils.ExecAndLogCommand(ppcPath, cmdArgs...)
+ Expect(err).To(BeNil(), "failed to run ppc for '%s': %v", expectedProfilePath, err)
+
+ profile := &performancev2.PerformanceProfile{}
+ err = yaml.Unmarshal(out, profile)
+ Expect(err).To(BeNil(), "failed to unmarshal the output yaml for '%s': %v", expectedProfilePath, err)
+
+ bytes, err := ioutil.ReadFile(expectedProfilePath)
+ Expect(err).To(BeNil(), "failed to read the expected yaml for '%s': %v", expectedProfilePath, err)
+
+ expectedProfile := &performancev2.PerformanceProfile{}
+ err = yaml.Unmarshal(bytes, expectedProfile)
+ Expect(err).To(BeNil(), "failed to unmarshal the expected yaml for '%s': %v", expectedProfilePath, err)
+
+ Expect(profile).To(BeEquivalentTo(expectedProfile), "regression test failed for '%s' case", expectedProfilePath)
+ }
+ })
+
+ It("should describe the cluster from must-gather data in info mode", func() {
+ Expect(ppcPath).To(BeAnExistingFile())
+
+ // directory base name => full path
+ mustGatherDirs := getMustGatherDirs(mustGatherPath)
+
+ for name, path := range mustGatherDirs {
+ cmdArgs := []string{
+ "--info=json",
+ fmt.Sprintf("--must-gather-dir-path=%s", path),
+ }
+
+ out, err := testutils.ExecAndLogCommand(ppcPath, cmdArgs...)
+ Expect(err).To(BeNil(), "failed to run ppc for %q: %v", path, err)
+
+ var cInfo cmd.ClusterInfo
+ err = json.Unmarshal(out, &cInfo)
+ Expect(err).To(BeNil(), "failed to unmarshal the output json for %q: %v", path, err)
+ expectedClusterInfoPath := filepath.Join(expectedInfoPath, fmt.Sprintf("%s.json", name))
+ bytes, err := ioutil.ReadFile(expectedClusterInfoPath)
+ Expect(err).To(BeNil(), "failed to read the expected json for %q: %v", expectedClusterInfoPath, err)
+
+ var expectedInfo cmd.ClusterInfo
+ err = json.Unmarshal(bytes, &expectedInfo)
+ Expect(err).To(BeNil(), "failed to unmarshal the expected json for '%s': %v", expectedClusterInfoPath, err)
+
+ expectedInfo.Sort()
+
+ Expect(cInfo).To(BeEquivalentTo(expectedInfo), "regression test failed for '%s' case", expectedClusterInfoPath)
+ }
+ })
+ Context("Systems with Hyperthreading enabled", func() {
+ It("[test_id:41419] Verify PPC script fails when reserved cpu count is 2 and requires to split across numa nodes", func() {
+ Expect(ppcPath).To(BeAnExistingFile())
+ Expect(mustGatherFullPath).To(BeADirectory())
+ ppcArgs := []string{
+ "--reserved-cpu-count=2",
+ "--split-reserved-cpus-across-numa=true",
+ }
+ cmdArgs := append(defaultArgs, ppcArgs...)
+ _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...)
+ ppcErrorString := errorStringParser(errData)
+ Expect(ppcErrorString).To(ContainSubstring("can't allocate odd number of CPUs from a NUMA Node"))
+ })
+
+ It("[test_id:41405] Verify PPC fails when splitting of reserved cpus and single numa-node policy is specified", func() {
+ Expect(ppcPath).To(BeAnExistingFile())
+ Expect(mustGatherFullPath).To(BeADirectory())
+ ppcArgs := []string{
+ fmt.Sprintf("--reserved-cpu-count=%d", 2),
+ fmt.Sprintf("--split-reserved-cpus-across-numa=%t", true),
+ fmt.Sprintf("--topology-manager-policy=%s", "single-numa-node"),
+ }
+ cmdArgs := append(defaultArgs, ppcArgs...)
+ _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...)
+ ppcErrorString := errorStringParser(errData)
+ Expect(ppcErrorString).To(ContainSubstring("not appropriate to split reserved CPUs in case of topology-manager-policy: single-numa-node"))
+ })
+
+ It("[test_id:41420] Verify PPC fails when reserved cpu count is more than available cpus", func() {
+ Expect(ppcPath).To(BeAnExistingFile())
+ Expect(mustGatherFullPath).To(BeADirectory())
+ ppcArgs := []string{
+ fmt.Sprintf("--reserved-cpu-count=%d", 100),
+ }
+ cmdArgs := append(defaultArgs, ppcArgs...)
+ _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...)
+ ppcErrorString := errorStringParser(errData)
+ Expect(ppcErrorString).To(ContainSubstring("please specify the reserved CPU count in the range"))
+ })
+
+ It("[test_id:41421] Verify PPC fails when odd number of reserved cpus are specified", func() {
+ Expect(ppcPath).To(BeAnExistingFile())
+ Expect(mustGatherFullPath).To(BeADirectory())
+ ppcArgs := []string{
+ fmt.Sprintf("--reserved-cpu-count=%d", 5),
+ }
+ cmdArgs := append(defaultArgs, ppcArgs...)
+ _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...)
+ ppcErrorString := errorStringParser(errData)
+ Expect(ppcErrorString).To(ContainSubstring("can't allocate odd number of CPUs from a NUMA Node"))
+ })
+ })
+ Context("Systems with Hyperthreading disabled", func() {
+ It("[test_id:42035] verify PPC fails when splitting of reserved cpus and single numa-node policy is specified", func() {
+ Expect(ppcPath).To(BeAnExistingFile())
+ Expect(mustGatherFullPath).To(BeADirectory())
+ ppcArgs := []string{
+ fmt.Sprintf("--reserved-cpu-count=%d", 2),
+ fmt.Sprintf("--split-reserved-cpus-across-numa=%t", true),
+ fmt.Sprintf("--topology-manager-policy=%s", "single-numa-node"),
+ }
+ cmdArgs := append(defaultArgs, ppcArgs...)
+ _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...)
+ ppcErrorString := errorStringParser(errData)
+ Expect(ppcErrorString).To(ContainSubstring("not appropriate to split reserved CPUs in case of topology-manager-policy: single-numa-node"))
+ })
+ })
+})
+
+func getMustGatherDirs(mustGatherPath string) map[string]string {
+ Expect(mustGatherPath).To(BeADirectory())
+
+ mustGatherDirs := make(map[string]string)
+ mustGatherPathContent, err := ioutil.ReadDir(mustGatherPath)
+ Expect(err).To(BeNil(), fmt.Errorf("can't list '%s' files: %v", mustGatherPath, err))
+
+ for _, file := range mustGatherPathContent {
+ fullFilePath := filepath.Join(mustGatherPath, file.Name())
+ Expect(fullFilePath).To(BeADirectory())
+
+ mustGatherDirs[file.Name()] = fullFilePath
+ }
+
+ return mustGatherDirs
+}
+
+func getExpectedProfiles(expectedProfilesPath string, mustGatherDirs map[string]string) map[string]cmd.ProfileCreatorArgs {
+ Expect(expectedProfilesPath).To(BeADirectory())
+
+ expectedProfilesPathContent, err := ioutil.ReadDir(expectedProfilesPath)
+ Expect(err).To(BeNil(), fmt.Errorf("can't list '%s' files: %v", expectedProfilesPath, err))
+
+ // read ppc params files
+ ppcParams := make(map[string]cmd.ProfileCreatorArgs)
+ for _, file := range expectedProfilesPathContent {
+ if filepath.Ext(file.Name()) != ".json" {
+ continue
+ }
+
+ fullFilePath := filepath.Join(expectedProfilesPath, file.Name())
+ bytes, err := ioutil.ReadFile(fullFilePath)
+ Expect(err).To(BeNil(), "failed to read the ppc params file for '%s': %v", fullFilePath, err)
+
+ var ppcArgs cmd.ProfileCreatorArgs
+ err = json.Unmarshal(bytes, &ppcArgs)
+ Expect(err).To(BeNil(), "failed to decode the ppc params file for '%s': %v", fullFilePath, err)
+
+ Expect(ppcArgs.MustGatherDirPath).ToNot(BeEmpty(), "must-gather arg missing for '%s'", fullFilePath)
+ ppcArgs.MustGatherDirPath = path.Join(mustGatherPath, ppcArgs.MustGatherDirPath)
+ Expect(ppcArgs.MustGatherDirPath).To(BeADirectory())
+
+ profileKey := strings.TrimSuffix(file.Name(), filepath.Ext(file.Name()))
+ ppcParams[profileKey] = ppcArgs
+ }
+
+ // pickup profile files
+ expectedProfiles := make(map[string]cmd.ProfileCreatorArgs)
+ for _, file := range expectedProfilesPathContent {
+ if filepath.Ext(file.Name()) != ".yaml" {
+ continue
+ }
+ profileKey := strings.TrimSuffix(file.Name(), filepath.Ext(file.Name()))
+ ppcArgs, ok := ppcParams[profileKey]
+ Expect(ok).To(BeTrue(), "can't find ppc params for the expected profile: '%s'", file.Name())
+
+ fullFilePath := filepath.Join(expectedProfilesPath, file.Name())
+ expectedProfiles[fullFilePath] = ppcArgs
+ }
+
+ return expectedProfiles
+}
+
+// PPC stderr parser
+func errorStringParser(errData []byte) string {
+ stdError := string(errData)
+ for _, line := range strings.Split(stdError, "\n") {
+ if strings.Contains(line, "Error: ") {
+ return line
+ }
+ }
+ return ""
+}
diff --git a/test/e2e/pao/functests-render-command/1_render_command/render_suite_test.go b/test/e2e/pao/functests-render-command/1_render_command/render_suite_test.go
new file mode 100644
index 000000000..11690a8f0
--- /dev/null
+++ b/test/e2e/pao/functests-render-command/1_render_command/render_suite_test.go
@@ -0,0 +1,59 @@
+package __render_command_test
+
+import (
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "github.com/ghodss/yaml"
+ "github.com/google/go-cmp/cmp"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit"
+ ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters"
+)
+
+var (
+ testDir string
+ workspaceDir string
+ binPath string
+)
+
+func TestRenderCmd(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ rr := []Reporter{}
+ if ginkgo_reporters.Polarion.Run {
+ rr = append(rr, &ginkgo_reporters.Polarion)
+ }
+ rr = append(rr, junit.NewJUnitReporter("render_manifests"))
+ RunSpecsWithDefaultAndCustomReporters(t, "Performance Operator render tests", rr)
+}
+
+var _ = BeforeSuite(func() {
+ _, file, _, ok := runtime.Caller(0)
+ if !ok {
+ Fail("Cannot retrieve test directory")
+ }
+
+ testDir = filepath.Dir(file)
+ workspaceDir = filepath.Clean(filepath.Join(testDir, "..", ".."))
+ binPath = filepath.Clean(filepath.Join(workspaceDir, "build", "_output", "bin"))
+ fmt.Fprintf(GinkgoWriter, "using binary at %q\n", binPath)
+})
+
+func getFilesDiff(wantFile, gotFile []byte) (string, error) {
+ var wantObj interface{}
+ var gotObj interface{}
+
+ if err := yaml.Unmarshal(wantFile, &wantObj); err != nil {
+ return "", fmt.Errorf("failed to unmarshal data for 'want':%s", err)
+ }
+
+ if err := yaml.Unmarshal(gotFile, &gotObj); err != nil {
+ return "", fmt.Errorf("failed to unmarshal data for 'got':%s", err)
+ }
+
+ return cmp.Diff(wantObj, gotObj), nil
+}
diff --git a/test/e2e/pao/functests-render-command/1_render_command/render_test.go b/test/e2e/pao/functests-render-command/1_render_command/render_test.go
new file mode 100644
index 000000000..3928f3926
--- /dev/null
+++ b/test/e2e/pao/functests-render-command/1_render_command/render_test.go
@@ -0,0 +1,104 @@
+package __render_command_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var (
+ assetsOutDir string
+ assetsInDir string
+ ppInFiles string
+ testDataPath string
+)
+
+var _ = Describe("render command e2e test", func() {
+
+ BeforeEach(func() {
+ assetsOutDir = createTempAssetsDir()
+ assetsInDir = filepath.Join(workspaceDir, "build", "assets")
+ ppInFiles = filepath.Join(workspaceDir, "test", "e2e", "pao", "cluster-setup", "manual-cluster", "performance", "performance_profile.yaml")
+ testDataPath = filepath.Join(workspaceDir, "test", "e2e", "pao", "testdata")
+ })
+
+ Context("With a single performance-profile", func() {
+ It("Gets cli args and produces the expected components to output directory", func() {
+
+ cmdline := []string{
+ filepath.Join(binPath, "performance-addon-operators"),
+ "render",
+ "--performance-profile-input-files", ppInFiles,
+ "--asset-input-dir", assetsInDir,
+ "--asset-output-dir", assetsOutDir,
+ }
+ fmt.Fprintf(GinkgoWriter, "running: %v\n", cmdline)
+
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ runAndCompare(cmd)
+
+ })
+
+ It("Gets environment variables and produces the expected components to output directory", func() {
+ cmdline := []string{
+ filepath.Join(binPath, "performance-addon-operators"),
+ "render",
+ }
+ fmt.Fprintf(GinkgoWriter, "running: %v\n", cmdline)
+
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ cmd.Env = append(cmd.Env,
+ fmt.Sprintf("PERFORMANCE_PROFILE_INPUT_FILES=%s", ppInFiles),
+ fmt.Sprintf("ASSET_INPUT_DIR=%s", assetsInDir),
+ fmt.Sprintf("ASSET_OUTPUT_DIR=%s", assetsOutDir),
+ )
+ runAndCompare(cmd)
+ })
+ })
+
+ AfterEach(func() {
+ cleanArtifacts()
+ })
+
+})
+
+func createTempAssetsDir() string {
+ assets, err := ioutil.TempDir("", "assets")
+ Expect(err).ToNot(HaveOccurred())
+ fmt.Printf("assets` output dir at: %q\n", assets)
+ return assets
+}
+
+func cleanArtifacts() {
+ os.RemoveAll(assetsOutDir)
+}
+
+func runAndCompare(cmd *exec.Cmd) {
+ _, err := cmd.Output()
+ Expect(err).ToNot(HaveOccurred())
+
+ outputAssetsFiles, err := ioutil.ReadDir(assetsOutDir)
+ Expect(err).ToNot(HaveOccurred())
+
+ refPath := filepath.Join(testDataPath, "render-expected-output")
+ fmt.Fprintf(GinkgoWriter, "reference data at: %q\n", refPath)
+
+ for _, f := range outputAssetsFiles {
+ refData, err := ioutil.ReadFile(filepath.Join(refPath, f.Name()))
+ Expect(err).ToNot(HaveOccurred())
+
+ data, err := ioutil.ReadFile(filepath.Join(assetsOutDir, f.Name()))
+ Expect(err).ToNot(HaveOccurred())
+
+ diff, err := getFilesDiff(data, refData)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(diff).To(BeZero(), "rendered %s file is not identical to its reference file; diff: %v",
+ f.Name(),
+ diff)
+ }
+}
diff --git a/test/e2e/pao/functests/0_config/config.go b/test/e2e/pao/functests/0_config/config.go
new file mode 100644
index 000000000..8c80fb370
--- /dev/null
+++ b/test/e2e/pao/functests/0_config/config.go
@@ -0,0 +1,197 @@
+package __performance_config
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/pointer"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+)
+
+var RunningOnSingleNode bool
+
+var _ = Describe("[performance][config] Performance configuration", func() {
+
+ testutils.BeforeAll(func() {
+ isSNO, err := cluster.IsSingleNode()
+ Expect(err).ToNot(HaveOccurred())
+ RunningOnSingleNode = isSNO
+ })
+
+ It("Should successfully deploy the performance profile", func() {
+
+ performanceProfile := testProfile()
+ profileAlreadyExists := false
+
+ performanceManifest, foundOverride := os.LookupEnv("PERFORMANCE_PROFILE_MANIFEST_OVERRIDE")
+ var err error
+ if foundOverride {
+ performanceProfile, err = externalPerformanceProfile(performanceManifest)
+ Expect(err).ToNot(HaveOccurred(), "Failed overriding performance profile", performanceManifest)
+ testlog.Warningf("Consuming performance profile from %s", performanceManifest)
+ }
+ if discovery.Enabled() {
+ performanceProfile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred(), "Failed finding a performance profile in discovery mode using selector %v", testutils.NodeSelectorLabels)
+ testlog.Info("Discovery mode: consuming a deployed performance profile from the cluster")
+ profileAlreadyExists = true
+ }
+
+ By("Getting MCP for profile")
+ mcpLabel := profile.GetMachineConfigLabel(performanceProfile)
+ key, value := components.GetFirstKeyAndValue(mcpLabel)
+ mcpsByLabel, err := mcps.GetByLabel(key, value)
+ Expect(err).ToNot(HaveOccurred(), "Failed getting MCP by label key %v value %v", key, value)
+ Expect(len(mcpsByLabel)).To(Equal(1), fmt.Sprintf("Unexpected number of MCPs found: %v", len(mcpsByLabel)))
+ performanceMCP := &mcpsByLabel[0]
+
+ if !discovery.Enabled() {
+ By("Creating the PerformanceProfile")
+ // this might fail while the operator is still being deployed and the CRD does not exist yet
+ Eventually(func() error {
+ err := testclient.Client.Create(context.TODO(), performanceProfile)
+ if errors.IsAlreadyExists(err) {
+ testlog.Warning(fmt.Sprintf("A PerformanceProfile with name %s already exists! If created externally, tests might have unexpected behaviour", performanceProfile.Name))
+ profileAlreadyExists = true
+ return nil
+ }
+ return err
+ }, cluster.ComputeTestTimeout(15*time.Minute, RunningOnSingleNode), 15*time.Second).ShouldNot(HaveOccurred(), "Failed creating the performance profile")
+ }
+
+ if !performanceMCP.Spec.Paused {
+ By("MCP is already unpaused")
+ } else {
+ By("Unpausing the MCP")
+ Expect(testclient.Client.Patch(context.TODO(), performanceMCP,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec/paused", "value": %v }]`, false)),
+ ),
+ )).ToNot(HaveOccurred(), "Failed unpausing MCP")
+ }
+
+ By("Waiting for the MCP to pick the PerformanceProfile's MC")
+ mcps.WaitForProfilePickedUp(performanceMCP.Name, performanceProfile)
+
+ // If the profile is already there, it's likely to have been through the updating phase, so we only
+ // wait for updated.
+ if !profileAlreadyExists {
+ By("Waiting for MCP starting to update")
+ mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+ }
+ By("Waiting for MCP being updated")
+ mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+
+ })
+
+})
+
+func externalPerformanceProfile(performanceManifest string) (*performancev2.PerformanceProfile, error) {
+ performanceScheme := runtime.NewScheme()
+ performancev2.AddToScheme(performanceScheme)
+
+ decode := serializer.NewCodecFactory(performanceScheme).UniversalDeserializer().Decode
+ manifest, err := ioutil.ReadFile(performanceManifest)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to read %s file", performanceManifest)
+ }
+ obj, _, err := decode([]byte(manifest), nil, nil)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to read the manifest file %s", performanceManifest)
+ }
+ profile, ok := obj.(*performancev2.PerformanceProfile)
+ if !ok {
+ return nil, fmt.Errorf("Failed to convert manifest file to profile")
+ }
+ return profile, nil
+}
+
+func testProfile() *performancev2.PerformanceProfile {
+ reserved := performancev2.CPUSet("0")
+ isolated := performancev2.CPUSet("1-3")
+ hugePagesSize := performancev2.HugePageSize("1G")
+
+ profile := &performancev2.PerformanceProfile{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PerformanceProfile",
+ APIVersion: performancev2.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: utils.PerformanceProfileName,
+ },
+ Spec: performancev2.PerformanceProfileSpec{
+ CPU: &performancev2.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ },
+ HugePages: &performancev2.HugePages{
+ DefaultHugePagesSize: &hugePagesSize,
+ Pages: []performancev2.HugePage{
+ {
+ Size: "1G",
+ Count: 1,
+ Node: pointer.Int32Ptr(0),
+ },
+ {
+ Size: "2M",
+ Count: 128,
+ },
+ },
+ },
+ NodeSelector: testutils.NodeSelectorLabels,
+ RealTimeKernel: &performancev2.RealTimeKernel{
+ Enabled: pointer.BoolPtr(true),
+ },
+ AdditionalKernelArgs: []string{
+ "nmi_watchdog=0",
+ "audit=0",
+ "mce=off",
+ "processor.max_cstate=1",
+ "idle=poll",
+ "intel_idle.max_cstate=0",
+ },
+ NUMA: &performancev2.NUMA{
+ TopologyPolicy: pointer.StringPtr("single-numa-node"),
+ },
+ Net: &performancev2.Net{
+ UserLevelNetworking: pointer.BoolPtr(true),
+ },
+ },
+ }
+ // If the machineConfigPool is master, the automatic selector from PAO won't work
+ // since the machineconfiguration.openshift.io/role label is not applied to the
+ // master pool, hence we put an explicit selector here.
+ if utils.RoleWorkerCNF == "master" {
+ profile.Spec.MachineConfigPoolSelector = map[string]string{
+ "pools.operator.machineconfiguration.openshift.io/master": "",
+ }
+ }
+ return profile
+}
diff --git a/test/e2e/pao/functests/0_config/test_suite_performance_config_test.go b/test/e2e/pao/functests/0_config/test_suite_performance_config_test.go
new file mode 100644
index 000000000..2817d296d
--- /dev/null
+++ b/test/e2e/pao/functests/0_config/test_suite_performance_config_test.go
@@ -0,0 +1,31 @@
+//go:build !unittests
+// +build !unittests
+
+package __performance_config_test
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit"
+
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters"
+)
+
+func TestPerformanceConfig(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ rr := []Reporter{}
+ if ginkgo_reporters.Polarion.Run {
+ rr = append(rr, &ginkgo_reporters.Polarion)
+ }
+ rr = append(rr, junit.NewJUnitReporter("performance_config"))
+ RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator configuration", rr)
+}
+
+var _ = BeforeSuite(func() {
+ Expect(testclient.ClientsEnabled).To(BeTrue())
+
+})
diff --git a/test/e2e/pao/functests/1_performance/cpu_management.go b/test/e2e/pao/functests/1_performance/cpu_management.go
new file mode 100644
index 000000000..cb3427d9f
--- /dev/null
+++ b/test/e2e/pao/functests/1_performance/cpu_management.go
@@ -0,0 +1,682 @@
+package __performance
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+ "k8s.io/utils/pointer"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ . "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/extensions/table"
+ . "github.com/onsi/gomega"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/events"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+)
+
+var workerRTNode *corev1.Node
+var profile *performancev2.PerformanceProfile
+
+const (
+ sysDevicesOnlineCPUs = "/sys/devices/system/cpu/online"
+)
+
+var _ = Describe("[rfe_id:27363][performance] CPU Management", func() {
+ var balanceIsolated bool
+ var reservedCPU, isolatedCPU string
+ var listReservedCPU []int
+ var reservedCPUSet cpuset.CPUSet
+ var onlineCPUSet cpuset.CPUSet
+
+ testutils.BeforeAll(func() {
+ isSNO, err := cluster.IsSingleNode()
+ Expect(err).ToNot(HaveOccurred())
+ RunningOnSingleNode = isSNO
+ })
+
+ BeforeEach(func() {
+ if discovery.Enabled() && testutils.ProfileNotFound {
+ Skip("Discovery mode enabled, performance profile not found")
+ }
+
+ workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err))
+ Expect(workerRTNodes).ToNot(BeEmpty())
+ workerRTNode = &workerRTNodes[0]
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+
+ By(fmt.Sprintf("Checking the profile %s with cpus %s", profile.Name, cpuSpecToString(profile.Spec.CPU)))
+ balanceIsolated = true
+ if profile.Spec.CPU.BalanceIsolated != nil {
+ balanceIsolated = *profile.Spec.CPU.BalanceIsolated
+ }
+
+ Expect(profile.Spec.CPU.Isolated).NotTo(BeNil())
+ isolatedCPU = string(*profile.Spec.CPU.Isolated)
+
+ Expect(profile.Spec.CPU.Reserved).NotTo(BeNil())
+ reservedCPU = string(*profile.Spec.CPU.Reserved)
+ reservedCPUSet, err = cpuset.Parse(reservedCPU)
+ Expect(err).ToNot(HaveOccurred())
+ listReservedCPU = reservedCPUSet.ToSlice()
+
+ onlineCPUSet, err = nodes.GetOnlineCPUsSet(workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ Describe("Verification of configuration on the worker node", func() {
+ It("[test_id:28528][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Verify CPU reservation on the node", func() {
+ By(fmt.Sprintf("Allocatable CPU should be less than capacity by %d", len(listReservedCPU)))
+ capacityCPU, _ := workerRTNode.Status.Capacity.Cpu().AsInt64()
+ allocatableCPU, _ := workerRTNode.Status.Allocatable.Cpu().AsInt64()
+ differenceCPUGot := capacityCPU - allocatableCPU
+ differenceCPUExpected := int64(len(listReservedCPU))
+ Expect(differenceCPUGot).To(Equal(differenceCPUExpected), "Allocatable CPU %d should be less than capacity %d by %d; got %d instead", allocatableCPU, capacityCPU, differenceCPUExpected, differenceCPUGot)
+ })
+
+ It("[test_id:37862][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Verify CPU affinity mask, CPU reservation and CPU isolation on worker node", func() {
+ By("checking isolated CPU")
+ cmd := []string{"cat", "/sys/devices/system/cpu/isolated"}
+ sysIsolatedCpus, err := nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+ if balanceIsolated {
+ Expect(sysIsolatedCpus).To(BeEmpty())
+ } else {
+ Expect(sysIsolatedCpus).To(Equal(isolatedCPU))
+ }
+
+ By("checking reserved CPU in kubelet config file")
+ cmd = []string{"cat", "/rootfs/etc/kubernetes/kubelet.conf"}
+ conf, err := nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred(), "failed to cat kubelet.conf")
+ // kubelet.conf changed formatting, there is a space after colons atm. Let's deal with both cases with a regex
+ Expect(conf).To(MatchRegexp(fmt.Sprintf(`"reservedSystemCPUs": ?"%s"`, reservedCPU)))
+
+ By("checking CPU affinity mask for kernel scheduler")
+ cmd = []string{"/bin/bash", "-c", "taskset -pc 1"}
+ sched, err := nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred(), "failed to execute taskset")
+ mask := strings.SplitAfter(sched, " ")
+ maskSet, err := cpuset.Parse(mask[len(mask)-1])
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(reservedCPUSet.IsSubsetOf(maskSet)).To(Equal(true), fmt.Sprintf("The init process (pid 1) should have cpu affinity: %s", reservedCPU))
+ })
+
+ It("[test_id:34358] Verify rcu_nocbs kernel argument on the node", func() {
+ By("checking that cmdline contains rcu_nocbs with right value")
+ cmd := []string{"cat", "/proc/cmdline"}
+ cmdline, err := nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+ re := regexp.MustCompile(`rcu_nocbs=\S+`)
+ rcuNocbsArgument := re.FindString(cmdline)
+ Expect(rcuNocbsArgument).To(ContainSubstring("rcu_nocbs="))
+ rcuNocbsCpu := strings.Split(rcuNocbsArgument, "=")[1]
+ Expect(rcuNocbsCpu).To(Equal(isolatedCPU))
+
+ By("checking that new rcuo processes are running on non_isolated cpu")
+ cmd = []string{"pgrep", "rcuo"}
+ rcuoList, err := nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+ for _, rcuo := range strings.Split(rcuoList, "\n") {
+ // check cpu affinity mask
+ cmd = []string{"/bin/bash", "-c", fmt.Sprintf("taskset -pc %s", rcuo)}
+ taskset, err := nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+ mask := strings.SplitAfter(taskset, " ")
+ maskSet, err := cpuset.Parse(mask[len(mask)-1])
+ Expect(err).ToNot(HaveOccurred())
+ Expect(reservedCPUSet.IsSubsetOf(maskSet)).To(Equal(true), "The process should have cpu affinity: %s", reservedCPU)
+ }
+ })
+ })
+
+ Describe("Verification of cpu manager functionality", func() {
+ var testpod *corev1.Pod
+ var discoveryFailed bool
+
+ testutils.BeforeAll(func() {
+ discoveryFailed = false
+ if discovery.Enabled() {
+ profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ isolatedCPU = string(*profile.Spec.CPU.Isolated)
+ }
+ })
+
+ BeforeEach(func() {
+ if discoveryFailed {
+ Skip("Skipping tests since there are insufficant isolated cores to create a stress pod")
+ }
+ })
+
+ AfterEach(func() {
+ deleteTestPod(testpod)
+ })
+
+ table.DescribeTable("Verify CPU usage by stress PODs", func(guaranteed bool) {
+ cpuID := onlineCPUSet.ToSliceNoSort()[0]
+ smtLevel := nodes.GetSMTLevel(cpuID, workerRTNode)
+ if smtLevel < 2 {
+ Skip(fmt.Sprintf("designated worker node %q has SMT level %d - minimum required 2", workerRTNode.Name, smtLevel))
+ }
+
+ // note must be a multiple of the smtLevel. Pick the minimum to maximize the chances to run on CI
+ cpuRequest := smtLevel
+ testpod = getStressPod(workerRTNode.Name, cpuRequest)
+ testpod.Namespace = testutils.NamespaceTesting
+
+ listCPU := onlineCPUSet.ToSlice()
+ expectedQos := corev1.PodQOSBurstable
+
+ if guaranteed {
+ listCPU = onlineCPUSet.Difference(reservedCPUSet).ToSlice()
+ expectedQos = corev1.PodQOSGuaranteed
+ promotePodToGuaranteed(testpod)
+ } else if !balanceIsolated {
+ // when balanceIsolated is False - non-guaranteed pod should run on reserved cpu
+ listCPU = listReservedCPU
+ }
+
+ var err error
+ err = testclient.Client.Create(context.TODO(), testpod)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
+ logEventsForPod(testpod)
+ Expect(err).ToNot(HaveOccurred())
+
+ updatedPod := &corev1.Pod{}
+ err = testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(testpod), updatedPod)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(updatedPod.Status.QOSClass).To(Equal(expectedQos),
+ "unexpected QoS Class for %s/%s: %s (looking for %s)",
+ updatedPod.Namespace, updatedPod.Name, updatedPod.Status.QOSClass, expectedQos)
+
+ output, err := nodes.ExecCommandOnNode(
+ []string{"/bin/bash", "-c", "ps -o psr $(pgrep -n stress) | tail -1"},
+ workerRTNode,
+ )
+ Expect(err).ToNot(HaveOccurred(), "failed to get cpu of stress process")
+ cpu, err := strconv.Atoi(strings.Trim(output, " "))
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(cpu).To(BeElementOf(listCPU))
+ },
+ table.Entry("[test_id:37860] Non-guaranteed POD can work on any CPU", false),
+ table.Entry("[test_id:27492] Guaranteed POD should work on isolated cpu", true),
+ )
+ })
+
+ When("pod runs with the CPU load balancing runtime class", func() {
+ var smtLevel int
+ var testpod *corev1.Pod
+ var defaultFlags map[int][]int
+
+ getCPUsSchedulingDomainFlags := func() (map[int][]int, error) {
+ cmd := []string{"/bin/bash", "-c", "more /proc/sys/kernel/sched_domain/cpu*/domain*/flags | cat"}
+ out, err := nodes.ExecCommandOnNode(cmd, workerRTNode)
+ if err != nil {
+ return nil, err
+ }
+
+ re, err := regexp.Compile(`/proc/sys/kernel/sched_domain/cpu(\d+)/domain\d+/flags\n:+\n(\d+)`)
+ if err != nil {
+ return nil, err
+ }
+
+ allSubmatch := re.FindAllStringSubmatch(out, -1)
+ cpuToSchedDomains := map[int][]int{}
+ for _, submatch := range allSubmatch {
+ if len(submatch) != 3 {
+ return nil, fmt.Errorf("the sched_domain submatch %v does not have a valid length", submatch)
+ }
+
+ cpu, err := strconv.Atoi(submatch[1])
+ if err != nil {
+ return nil, err
+ }
+
+ if _, ok := cpuToSchedDomains[cpu]; !ok {
+ cpuToSchedDomains[cpu] = []int{}
+ }
+
+ flags, err := strconv.Atoi(submatch[2])
+ if err != nil {
+ return nil, err
+ }
+
+ cpuToSchedDomains[cpu] = append(cpuToSchedDomains[cpu], flags)
+ }
+
+ // sort sched_domain
+ for cpu := range cpuToSchedDomains {
+ sort.Ints(cpuToSchedDomains[cpu])
+ }
+
+ testlog.Infof("Scheduler domains: %v", cpuToSchedDomains)
+ return cpuToSchedDomains, nil
+ }
+
+ BeforeEach(func() {
+ var err error
+ defaultFlags, err = getCPUsSchedulingDomainFlags()
+ Expect(err).ToNot(HaveOccurred())
+
+ annotations := map[string]string{
+ "cpu-load-balancing.crio.io": "disable",
+ }
+ // any random existing cpu is fine
+ cpuID := onlineCPUSet.ToSliceNoSort()[0]
+ smtLevel = nodes.GetSMTLevel(cpuID, workerRTNode)
+ testpod = getTestPodWithAnnotations(annotations, smtLevel)
+ })
+
+ AfterEach(func() {
+ deleteTestPod(testpod)
+ })
+
+ It("[test_id:32646] should disable CPU load balancing for CPU's used by the pod", func() {
+ var err error
+ By("Starting the pod")
+ err = testclient.Client.Create(context.TODO(), testpod)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
+ logEventsForPod(testpod)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Getting the container cpuset.cpus cgroup")
+ containerID, err := pods.GetContainerIDByName(testpod, "test")
+ Expect(err).ToNot(HaveOccurred())
+
+ containerCgroup := ""
+ Eventually(func() string {
+ cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find /rootfs/sys/fs/cgroup/cpuset/ -name *%s*", containerID)}
+ containerCgroup, err = nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+ return containerCgroup
+ }, (cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode)), 5*time.Second).ShouldNot(BeEmpty(),
+ fmt.Sprintf("cannot find cgroup for container %q", containerID))
+
+ By("Checking what CPU the pod is using")
+ cmd := []string{"/bin/bash", "-c", fmt.Sprintf("cat %s/cpuset.cpus", containerCgroup)}
+ output, err := nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+
+ cpus, err := cpuset.Parse(output)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Getting the CPU scheduling flags")
+ flags, err := getCPUsSchedulingDomainFlags()
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Verifying that the CPU load balancing was disabled")
+ for _, cpu := range cpus.ToSlice() {
+ Expect(len(flags[cpu])).To(Equal(len(defaultFlags[cpu])))
+ // the CPU flags should be almost the same except the LSB that should be disabled
+ // see https://github.com/torvalds/linux/blob/0fe5f9ca223573167c4c4156903d751d2c8e160e/include/linux/sched/topology.h#L14
+ // for more information regarding the sched domain flags
+ for i := range flags[cpu] {
+ Expect(flags[cpu][i]).To(Equal(defaultFlags[cpu][i] - 1))
+ }
+ }
+
+ By("Deleting the pod")
+ deleteTestPod(testpod)
+
+ By("Getting the CPU scheduling flags")
+ flags, err = getCPUsSchedulingDomainFlags()
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Verifying that the CPU load balancing was enabled back")
+ for _, cpu := range cpus.ToSlice() {
+ Expect(len(flags[cpu])).To(Equal(len(defaultFlags[cpu])))
+ // the CPU scheduling flags should be restored to the default values
+ for i := range flags[cpu] {
+ Expect(flags[cpu][i]).To(Equal(defaultFlags[cpu][i]))
+ }
+ }
+ })
+ })
+
+ Describe("Verification that IRQ load balance can be disabled per POD", func() {
+ var smtLevel int
+ var testpod *corev1.Pod
+
+ BeforeEach(func() {
+ Skip("part of interrupts does not support CPU affinity change because of underlying hardware")
+
+ if profile.Spec.GloballyDisableIrqLoadBalancing != nil && *profile.Spec.GloballyDisableIrqLoadBalancing {
+ Skip("IRQ load balance should be enabled (GloballyDisableIrqLoadBalancing=false), skipping test")
+ }
+
+ cpuID := onlineCPUSet.ToSliceNoSort()[0]
+ smtLevel = nodes.GetSMTLevel(cpuID, workerRTNode)
+ })
+
+ AfterEach(func() {
+ deleteTestPod(testpod)
+ })
+
+ It("[test_id:36364] should disable IRQ balance for CPU where POD is running", func() {
+ By("checking default smp affinity is equal to all active CPUs")
+ defaultSmpAffinitySet, err := nodes.GetDefaultSmpAffinitySet(workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+
+ onlineCPUsSet, err := nodes.GetOnlineCPUsSet(workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(onlineCPUsSet.IsSubsetOf(defaultSmpAffinitySet)).To(BeTrue(), "All online CPUs %s should be subset of default SMP affinity %s", onlineCPUsSet, defaultSmpAffinitySet)
+
+ By("Running pod with annotations that disable specific CPU from IRQ balancer")
+ annotations := map[string]string{
+ "irq-load-balancing.crio.io": "disable",
+ "cpu-quota.crio.io": "disable",
+ }
+ testpod = getTestPodWithAnnotations(annotations, smtLevel)
+
+ err = testclient.Client.Create(context.TODO(), testpod)
+ Expect(err).ToNot(HaveOccurred())
+ err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
+ logEventsForPod(testpod)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Checking that the default smp affinity mask was updated and CPU (where POD is running) isolated")
+ defaultSmpAffinitySet, err = nodes.GetDefaultSmpAffinitySet(workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+
+ getPsr := []string{"/bin/bash", "-c", "grep Cpus_allowed_list /proc/self/status | awk '{print $2}'"}
+ psr, err := pods.WaitForPodOutput(testclient.K8sClient, testpod, getPsr)
+ Expect(err).ToNot(HaveOccurred())
+ psrSet, err := cpuset.Parse(strings.Trim(string(psr), "\n"))
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(psrSet.IsSubsetOf(defaultSmpAffinitySet)).To(BeFalse(), fmt.Sprintf("Default SMP affinity should not contain isolated CPU %s", psr))
+
+ By("Checking that there are no any active IRQ on isolated CPU")
+ // It may takes some time for the system to reschedule active IRQs
+ Eventually(func() bool {
+ getActiveIrq := []string{"/bin/bash", "-c", "for n in $(find /proc/irq/ -name smp_affinity_list); do echo $(cat $n); done"}
+ activeIrq, err := nodes.ExecCommandOnNode(getActiveIrq, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(activeIrq).ToNot(BeEmpty())
+ for _, irq := range strings.Split(activeIrq, "\n") {
+ irqAffinity, err := cpuset.Parse(irq)
+ Expect(err).ToNot(HaveOccurred())
+ if !irqAffinity.Equals(onlineCPUsSet) && psrSet.IsSubsetOf(irqAffinity) {
+ return false
+ }
+ }
+ return true
+ }, (cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode)), 5*time.Second).Should(BeTrue(),
+ fmt.Sprintf("IRQ still active on CPU%s", psr))
+
+ By("Checking that after removing POD default smp affinity is returned back to all active CPUs")
+ deleteTestPod(testpod)
+ defaultSmpAffinitySet, err = nodes.GetDefaultSmpAffinitySet(workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(onlineCPUsSet.IsSubsetOf(defaultSmpAffinitySet)).To(BeTrue(), "All online CPUs %s should be subset of default SMP affinity %s", onlineCPUsSet, defaultSmpAffinitySet)
+ })
+ })
+
+ When("reserved CPUs specified", func() {
+ var testpod *corev1.Pod
+
+ BeforeEach(func() {
+ testpod = pods.GetTestPod()
+ testpod.Namespace = testutils.NamespaceTesting
+ testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name}
+ testpod.Spec.ShareProcessNamespace = pointer.BoolPtr(true)
+
+ err := testclient.Client.Create(context.TODO(), testpod)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
+ logEventsForPod(testpod)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("[test_id:49147] should run infra containers on reserved CPUs", func() {
+ var err error
+ // find used because that crictl does not show infra containers, `runc list` shows them
+ // but you will need somehow to find infra containers ID's
+ podUID := strings.Replace(string(testpod.UID), "-", "_", -1)
+
+ podCgroup := ""
+ Eventually(func() string {
+ cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find /rootfs/sys/fs/cgroup/cpuset/ -name *%s*", podUID)}
+ podCgroup, err = nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+ return podCgroup
+ }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), 5*time.Second).ShouldNot(BeEmpty(),
+ fmt.Sprintf("cannot find cgroup for pod %q", podUID))
+
+ containersCgroups := ""
+ Eventually(func() string {
+ cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find %s -name crio-*", podCgroup)}
+ containersCgroups, err = nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+ return containersCgroups
+ }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), 5*time.Second).ShouldNot(BeEmpty(),
+ fmt.Sprintf("cannot find containers cgroups from pod cgroup %q", podCgroup))
+
+ containerID, err := pods.GetContainerIDByName(testpod, "test")
+ Expect(err).ToNot(HaveOccurred())
+
+ containersCgroups = strings.Trim(containersCgroups, "\n")
+ containersCgroupsDirs := strings.Split(containersCgroups, "\n")
+ Expect(len(containersCgroupsDirs)).To(Equal(2), "unexpected amount of containers cgroups")
+
+ for _, dir := range containersCgroupsDirs {
+ // skip application container cgroup
+ if strings.Contains(dir, containerID) {
+ continue
+ }
+
+ By("Checking what CPU the infra container is using")
+ cmd := []string{"/bin/bash", "-c", fmt.Sprintf("cat %s/cpuset.cpus", dir)}
+ output, err := nodes.ExecCommandOnNode(cmd, workerRTNode)
+ Expect(err).ToNot(HaveOccurred())
+
+ cpus, err := cpuset.Parse(output)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(cpus.ToSlice()).To(Equal(reservedCPUSet.ToSlice()))
+ }
+ })
+ })
+
+ When("strict NUMA aligment is requested", func() {
+ var testpod *corev1.Pod
+
+ BeforeEach(func() {
+ if profile.Spec.NUMA == nil || profile.Spec.NUMA.TopologyPolicy == nil {
+ Skip("Topology Manager Policy is not configured")
+ }
+ tmPolicy := *profile.Spec.NUMA.TopologyPolicy
+ if tmPolicy != "single-numa-node" {
+ Skip("Topology Manager Policy is not Single NUMA Node")
+ }
+ })
+
+ AfterEach(func() {
+ if testpod == nil {
+ return
+ }
+ deleteTestPod(testpod)
+ })
+
+ It("[test_id:49149] should reject pods which request integral CPUs not aligned with machine SMT level", func() {
+ // any random existing cpu is fine
+ cpuID := onlineCPUSet.ToSliceNoSort()[0]
+ smtLevel := nodes.GetSMTLevel(cpuID, workerRTNode)
+ if smtLevel < 2 {
+ Skip(fmt.Sprintf("designated worker node %q has SMT level %d - minimum required 2", workerRTNode.Name, smtLevel))
+ }
+
+ cpuCount := 1 // must be intentionally < than the smtLevel to trigger the kubelet validation
+ testpod = promotePodToGuaranteed(getStressPod(workerRTNode.Name, cpuCount))
+ testpod.Namespace = testutils.NamespaceTesting
+
+ err := testclient.Client.Create(context.TODO(), testpod)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = pods.WaitForPredicate(testpod, 10*time.Minute, func(pod *corev1.Pod) (bool, error) {
+ if pod.Status.Phase != corev1.PodPending {
+ return true, nil
+ }
+ return false, nil
+ })
+ Expect(err).ToNot(HaveOccurred())
+
+ updatedPod := &corev1.Pod{}
+ err = testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(testpod), updatedPod)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(updatedPod.Status.Phase).To(Equal(corev1.PodFailed), "pod %s not failed: %v", updatedPod.Name, updatedPod.Status)
+ Expect(isSMTAlignmentError(updatedPod)).To(BeTrue(), "pod %s failed for wrong reason: %q", updatedPod.Name, updatedPod.Status.Reason)
+ })
+ })
+
+})
+
+func isSMTAlignmentError(pod *corev1.Pod) bool {
+ re := regexp.MustCompile(`SMT.*Alignment.*Error`)
+ return re.MatchString(pod.Status.Reason)
+}
+
+func getStressPod(nodeName string, cpus int) *corev1.Pod {
+ cpuCount := fmt.Sprintf("%d", cpus)
+ return &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "test-cpu-",
+ Labels: map[string]string{
+ "test": "",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "stress-test",
+ Image: images.Test(),
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse(cpuCount),
+ corev1.ResourceMemory: resource.MustParse("1Gi"),
+ },
+ },
+ Command: []string{"/usr/bin/stresser"},
+ Args: []string{"-cpus", cpuCount},
+ },
+ },
+ NodeSelector: map[string]string{
+ testutils.LabelHostname: nodeName,
+ },
+ },
+ }
+}
+
+func promotePodToGuaranteed(pod *corev1.Pod) *corev1.Pod {
+ for idx := 0; idx < len(pod.Spec.Containers); idx++ {
+ cnt := &pod.Spec.Containers[idx] // shortcut
+ if cnt.Resources.Limits == nil {
+ cnt.Resources.Limits = make(corev1.ResourceList)
+ }
+ for resName, resQty := range cnt.Resources.Requests {
+ cnt.Resources.Limits[resName] = resQty
+ }
+ }
+ return pod
+}
+
+func getTestPodWithAnnotations(annotations map[string]string, cpus int) *corev1.Pod {
+ testpod := pods.GetTestPod()
+ testpod.Annotations = annotations
+ testpod.Namespace = testutils.NamespaceTesting
+
+ cpuCount := fmt.Sprintf("%d", cpus)
+
+ resCpu := resource.MustParse(cpuCount)
+ resMem := resource.MustParse("256Mi")
+
+ // change pod resource requirements, to change the pod QoS class to guaranteed
+ testpod.Spec.Containers[0].Resources = corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resCpu,
+ corev1.ResourceMemory: resMem,
+ },
+ }
+
+ runtimeClassName := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ testpod.Spec.RuntimeClassName = &runtimeClassName
+ testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name}
+
+ return testpod
+}
+
+func deleteTestPod(testpod *corev1.Pod) {
+ // it possible that the pod already was deleted as part of the test, in this case we want to skip teardown
+ err := testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(testpod), testpod)
+ if errors.IsNotFound(err) {
+ return
+ }
+
+ err = testclient.Client.Delete(context.TODO(), testpod)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = pods.WaitForDeletion(testpod, pods.DefaultDeletionTimeout*time.Second)
+ Expect(err).ToNot(HaveOccurred())
+}
+
+func cpuSpecToString(cpus *performancev2.CPU) string {
+ if cpus == nil {
+ return ""
+ }
+ sb := strings.Builder{}
+ if cpus.Reserved != nil {
+ fmt.Fprintf(&sb, "reserved=[%s]", *cpus.Reserved)
+ }
+ if cpus.Isolated != nil {
+ fmt.Fprintf(&sb, " isolated=[%s]", *cpus.Isolated)
+ }
+ if cpus.BalanceIsolated != nil {
+ fmt.Fprintf(&sb, " balanceIsolated=%t", *cpus.BalanceIsolated)
+ }
+ return sb.String()
+}
+
+func logEventsForPod(testPod *corev1.Pod) {
+ evs, err := events.GetEventsForObject(testclient.Client, testPod.Namespace, testPod.Name, string(testPod.UID))
+ if err != nil {
+ testlog.Error(err)
+ }
+ for _, event := range evs.Items {
+ testlog.Warningf("-> %s %s %s", event.Action, event.Reason, event.Message)
+ }
+}
diff --git a/test/e2e/pao/functests/1_performance/hugepages.go b/test/e2e/pao/functests/1_performance/hugepages.go
new file mode 100644
index 000000000..688ab0b0e
--- /dev/null
+++ b/test/e2e/pao/functests/1_performance/hugepages.go
@@ -0,0 +1,213 @@
+package __performance
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+)
+
+var _ = Describe("[performance]Hugepages", func() {
+ var workerRTNode *corev1.Node
+ var profile *performancev2.PerformanceProfile
+
+ testutils.BeforeAll(func() {
+ isSNO, err := cluster.IsSingleNode()
+ Expect(err).ToNot(HaveOccurred())
+ RunningOnSingleNode = isSNO
+ })
+
+ BeforeEach(func() {
+ if discovery.Enabled() && testutils.ProfileNotFound {
+ Skip("Discovery mode enabled, performance profile not found")
+ }
+
+ var err error
+ workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err))
+ Expect(workerRTNodes).ToNot(BeEmpty())
+ workerRTNode = &workerRTNodes[0]
+
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ if profile.Spec.HugePages == nil || len(profile.Spec.HugePages.Pages) == 0 {
+ Skip("Hugepages is not configured in performance profile")
+ }
+ })
+
+ // We have multiple hugepages e2e tests under the upstream, so the only thing that we should check, if the PAO configure
+ // correctly number of hugepages that will be available on the node
+ Context("[rfe_id:27369]when NUMA node specified", func() {
+ It("[test_id:27752][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] should be allocated on the specifed NUMA node ", func() {
+ for _, page := range profile.Spec.HugePages.Pages {
+ if page.Node == nil {
+ continue
+ }
+
+ hugepagesSize, err := machineconfig.GetHugepagesSizeKilobytes(page.Size)
+ Expect(err).ToNot(HaveOccurred())
+
+ availableHugepagesFile := fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%skB/nr_hugepages", *page.Node, hugepagesSize)
+ nrHugepages := checkHugepagesStatus(availableHugepagesFile, workerRTNode)
+
+ freeHugepagesFile := fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%skB/free_hugepages", *page.Node, hugepagesSize)
+ freeHugepages := checkHugepagesStatus(freeHugepagesFile, workerRTNode)
+
+ Expect(int32(nrHugepages)).To(Equal(page.Count), "The number of available hugepages should be equal to the number in performance profile")
+ Expect(nrHugepages).To(Equal(freeHugepages), "On idle system the number of available hugepages should be equal to free hugepages")
+ }
+ })
+ })
+
+ Context("with multiple sizes", func() {
+ It("[test_id:34080] should be supported and available for the container usage", func() {
+ for _, page := range profile.Spec.HugePages.Pages {
+ hugepagesSize, err := machineconfig.GetHugepagesSizeKilobytes(page.Size)
+ Expect(err).ToNot(HaveOccurred())
+
+ availableHugepagesFile := fmt.Sprintf("/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages", hugepagesSize)
+ if page.Node != nil {
+ availableHugepagesFile = fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%skB/nr_hugepages", *page.Node, hugepagesSize)
+ }
+ nrHugepages := checkHugepagesStatus(availableHugepagesFile, workerRTNode)
+
+ if discovery.Enabled() && nrHugepages != 0 {
+ Skip("Skipping test since other guests might reside in the cluster affecting results")
+ }
+
+ freeHugepagesFile := fmt.Sprintf("/sys/kernel/mm/hugepages/hugepages-%skB/free_hugepages", hugepagesSize)
+ if page.Node != nil {
+ freeHugepagesFile = fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%skB/free_hugepages", *page.Node, hugepagesSize)
+ }
+
+ freeHugepages := checkHugepagesStatus(freeHugepagesFile, workerRTNode)
+
+ Expect(int32(nrHugepages)).To(Equal(page.Count), "The number of available hugepages should be equal to the number in performance profile")
+ Expect(nrHugepages).To(Equal(freeHugepages), "On idle system the number of available hugepages should be equal to free hugepages")
+ }
+ })
+ })
+
+ Context("[rfe_id:27354]Huge pages support for container workloads", func() {
+ var testpod *corev1.Pod
+
+ AfterEach(func() {
+ err := testclient.Client.Delete(context.TODO(), testpod)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = pods.WaitForDeletion(testpod, pods.DefaultDeletionTimeout*time.Second)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("[test_id:27477][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Huge pages support for container workloads", func() {
+ hpSize := profile.Spec.HugePages.Pages[0].Size
+ hpSizeKb, err := machineconfig.GetHugepagesSizeKilobytes(hpSize)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("checking hugepages usage in bytes - should be 0 on idle system")
+ usageHugepagesFile := fmt.Sprintf("/rootfs/sys/fs/cgroup/hugetlb/hugetlb.%sB.usage_in_bytes", hpSize)
+ usageHugepages := checkHugepagesStatus(usageHugepagesFile, workerRTNode)
+ if discovery.Enabled() && usageHugepages != 0 {
+ Skip("Skipping test since other guests might reside in the cluster affecting results")
+ }
+ Expect(usageHugepages).To(Equal(0), "Found used hugepages, expected 0")
+
+ By("running the POD and waiting while it's installing testing tools")
+ testpod = getCentosPod(workerRTNode.Name)
+ testpod.Namespace = testutils.NamespaceTesting
+ testpod.Spec.Containers[0].Resources.Limits = map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceName(fmt.Sprintf("hugepages-%si", hpSize)): resource.MustParse(fmt.Sprintf("%si", hpSize)),
+ corev1.ResourceMemory: resource.MustParse("1Gi"),
+ }
+ err = testclient.Client.Create(context.TODO(), testpod)
+ Expect(err).ToNot(HaveOccurred())
+ err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute)
+ Expect(err).ToNot(HaveOccurred())
+
+ cmd2 := []string{"/bin/bash", "-c", "tmux new -d 'LD_PRELOAD=libhugetlbfs.so HUGETLB_MORECORE=yes top -b > /dev/null'"}
+ _, err = pods.ExecCommandOnPod(testclient.K8sClient, testpod, cmd2)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("checking free hugepages - one should be used by pod")
+ availableHugepagesFile := fmt.Sprintf("/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages", hpSizeKb)
+ availableHugepages := checkHugepagesStatus(availableHugepagesFile, workerRTNode)
+
+ freeHugepagesFile := fmt.Sprintf("/sys/kernel/mm/hugepages/hugepages-%skB/free_hugepages", hpSizeKb)
+ Eventually(func() int {
+ freeHugepages := checkHugepagesStatus(freeHugepagesFile, workerRTNode)
+ return availableHugepages - freeHugepages
+ }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), time.Second).Should(Equal(1))
+
+ By("checking hugepages usage in bytes")
+ usageHugepages = checkHugepagesStatus(usageHugepagesFile, workerRTNode)
+ Expect(strconv.Itoa(usageHugepages/1024)).To(Equal(hpSizeKb), "usage in bytes should be %s", hpSizeKb)
+ })
+ })
+})
+
+func checkHugepagesStatus(path string, workerRTNode *corev1.Node) int {
+ command := []string{"cat", path}
+ out, err := nodes.ExecCommandOnMachineConfigDaemon(workerRTNode, command)
+ Expect(err).ToNot(HaveOccurred())
+ n, err := strconv.Atoi(strings.Trim(string(out), "\n\r"))
+ Expect(err).ToNot(HaveOccurred())
+ return n
+}
+
+func getCentosPod(nodeName string) *corev1.Pod {
+ return &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "test-hugepages-",
+ Labels: map[string]string{
+ "test": "",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Volumes: []corev1.Volume{
+ {
+ Name: "hugepages",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumHugePages},
+ },
+ },
+ },
+ Containers: []corev1.Container{
+ {
+ Name: "test",
+ Image: images.Test(),
+ Command: []string{"sleep", "10h"},
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "hugepages",
+ MountPath: "/dev/hugepages",
+ },
+ },
+ },
+ },
+ NodeSelector: map[string]string{
+ testutils.LabelHostname: nodeName,
+ },
+ },
+ }
+}
diff --git a/test/e2e/pao/functests/1_performance/netqueues.go b/test/e2e/pao/functests/1_performance/netqueues.go
new file mode 100644
index 000000000..5cf9b436f
--- /dev/null
+++ b/test/e2e/pao/functests/1_performance/netqueues.go
@@ -0,0 +1,364 @@
+package __performance
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+ "k8s.io/utils/pointer"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+)
+
+var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", func() {
+ var workerRTNodes []corev1.Node
+ var profile, initialProfile *performancev2.PerformanceProfile
+ var performanceProfileName string
+
+ testutils.BeforeAll(func() {
+ isSNO, err := cluster.IsSingleNode()
+ Expect(err).ToNot(HaveOccurred())
+ RunningOnSingleNode = isSNO
+
+ workerRTNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+
+ workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes)
+ Expect(err).ToNot(HaveOccurred())
+
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Backing up the profile")
+ initialProfile = profile.DeepCopy()
+
+ performanceProfileName = profile.Name
+
+ tunedPaoProfile := fmt.Sprintf("openshift-node-performance-%s", performanceProfileName)
+ //Verify the tuned profile is created on the worker-cnf nodes:
+ tunedCmd := []string{"tuned-adm", "profile_info", tunedPaoProfile}
+ for _, node := range workerRTNodes {
+ tunedPod := nodes.TunedForNode(&node, RunningOnSingleNode)
+ _, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
+ Expect(err).ToNot(HaveOccurred())
+ }
+ })
+
+ BeforeEach(func() {
+ if discovery.Enabled() && testutils.ProfileNotFound {
+ Skip("Discovery mode enabled, performance profile not found")
+ }
+ profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ if profile.Spec.Net == nil {
+ By("Enable UserLevelNetworking in Profile")
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.Bool(true),
+ }
+ By("Updating the performance profile")
+ profiles.UpdateWithRetry(profile)
+ }
+ })
+
+ AfterEach(func() {
+ By("Reverting the Profile")
+ spec, err := json.Marshal(initialProfile.Spec)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
+ ),
+ )).ToNot(HaveOccurred())
+ })
+
+ Context("Updating performance profile for netqueues", func() {
+ It("[test_id:40308][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Network device queues Should be set to the profile's reserved CPUs count ", func() {
+ nodesDevices := make(map[string]map[string]int)
+ if profile.Spec.Net != nil {
+ if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 {
+ By("To all non virtual network devices when no devices are specified under profile.Spec.Net.Devices")
+ err := checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
+ if err != nil {
+ Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
+ }
+ }
+ }
+ })
+
+ It("[test_id:40542] Verify the number of network queues of all supported network interfaces are equal to reserved cpus count", func() {
+ nodesDevices := make(map[string]map[string]int)
+ err := checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
+ if err != nil {
+ Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
+ }
+ })
+
+ It("[test_id:40543] Add interfaceName and verify the interface netqueues are equal to reserved cpus count.", func() {
+ nodesDevices := make(map[string]map[string]int)
+ deviceSupport, err := checkDeviceSupport(workerRTNodes, nodesDevices)
+ Expect(err).ToNot(HaveOccurred())
+ if !deviceSupport {
+ Skip("Skipping Test: There are no supported Network Devices")
+ }
+ nodeName, device := getRandomNodeDevice(nodesDevices)
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 {
+ By("Enable UserLevelNetworking and add Devices in Profile")
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.Bool(true),
+ Devices: []performancev2.Device{
+ {
+ InterfaceName: &device,
+ },
+ },
+ }
+ By("Updating the performance profile")
+ profiles.UpdateWithRetry(profile)
+ }
+ //Verify the tuned profile is created on the worker-cnf nodes:
+ tunedCmd := []string{"bash", "-c",
+ fmt.Sprintf("cat /etc/tuned/openshift-node-performance-%s/tuned.conf | grep devices_udev_regex", performanceProfileName)}
+
+ node, err := nodes.GetByName(nodeName)
+ Expect(err).ToNot(HaveOccurred())
+ tunedPod := nodes.TunedForNode(node, RunningOnSingleNode)
+
+ Eventually(func() bool {
+ out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
+ if err != nil {
+ return false
+ }
+ return strings.ContainsAny(string(out), device)
+ }, cluster.ComputeTestTimeout(2*time.Minute, RunningOnSingleNode), 5*time.Second).Should(BeTrue(), "could not get a tuned profile set with devices_udev_regex")
+
+ nodesDevices = make(map[string]map[string]int)
+ err = checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
+ if err != nil {
+ Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
+ }
+ })
+
+ It("[test_id:40545] Verify reserved cpus count is applied to specific supported networking devices using wildcard matches", func() {
+ nodesDevices := make(map[string]map[string]int)
+ var device, devicePattern string
+ deviceSupport, err := checkDeviceSupport(workerRTNodes, nodesDevices)
+ Expect(err).ToNot(HaveOccurred())
+ if !deviceSupport {
+ Skip("Skipping Test: There are no supported Network Devices")
+ }
+ nodeName, device := getRandomNodeDevice(nodesDevices)
+ devicePattern = device[:len(device)-1] + "*"
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 {
+ By("Enable UserLevelNetworking and add Devices in Profile")
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.Bool(true),
+ Devices: []performancev2.Device{
+ {
+ InterfaceName: &devicePattern,
+ },
+ },
+ }
+ profiles.UpdateWithRetry(profile)
+ }
+ //Verify the tuned profile is created on the worker-cnf nodes:
+ tunedCmd := []string{"bash", "-c",
+ fmt.Sprintf("cat /etc/tuned/openshift-node-performance-%s/tuned.conf | grep devices_udev_regex", performanceProfileName)}
+
+ node, err := nodes.GetByName(nodeName)
+ Expect(err).ToNot(HaveOccurred())
+ tunedPod := nodes.TunedForNode(node, RunningOnSingleNode)
+
+ Eventually(func() bool {
+ out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
+ if err != nil {
+ return false
+ }
+ return strings.ContainsAny(string(out), device)
+ }, cluster.ComputeTestTimeout(2*time.Minute, RunningOnSingleNode), 5*time.Second).Should(BeTrue(), "could not get a tuned profile set with devices_udev_regex")
+
+ nodesDevices = make(map[string]map[string]int)
+ err = checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
+ if err != nil {
+ Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
+ }
+ })
+
+ It("[test_id:40668] Verify reserved cpu count is added to networking devices matched with vendor and Device id", func() {
+ nodesDevices := make(map[string]map[string]int)
+ deviceSupport, err := checkDeviceSupport(workerRTNodes, nodesDevices)
+ Expect(err).ToNot(HaveOccurred())
+ if !deviceSupport {
+ Skip("Skipping Test: There are no supported Network Devices")
+ }
+ nodeName, device := getRandomNodeDevice(nodesDevices)
+ node, err := nodes.GetByName(nodeName)
+ Expect(err).ToNot(HaveOccurred())
+ vid := getVendorID(*node, device)
+ did := getDeviceID(*node, device)
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 {
+ By("Enable UserLevelNetworking and add DeviceID, VendorID and Interface in Profile")
+ profile.Spec.Net = &performancev2.Net{
+ UserLevelNetworking: pointer.Bool(true),
+ Devices: []performancev2.Device{
+ {
+ InterfaceName: &device,
+ },
+ {
+ VendorID: &vid,
+ DeviceID: &did,
+ },
+ },
+ }
+ profiles.UpdateWithRetry(profile)
+ }
+ //Verify the tuned profile is created on the worker-cnf nodes:
+ tunedCmd := []string{"bash", "-c",
+ fmt.Sprintf("cat /etc/tuned/openshift-node-performance-%s/tuned.conf | grep devices_udev_regex", performanceProfileName)}
+
+ node, err = nodes.GetByName(nodeName)
+ Expect(err).ToNot(HaveOccurred())
+ tunedPod := nodes.TunedForNode(node, RunningOnSingleNode)
+ Eventually(func() bool {
+ out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd)
+ if err != nil {
+ return false
+ }
+ return strings.ContainsAny(string(out), device)
+ }, cluster.ComputeTestTimeout(2*time.Minute, RunningOnSingleNode), 5*time.Second).Should(BeTrue(), "could not get a tuned profile set with devices_udev_regex")
+
+ nodesDevices = make(map[string]map[string]int)
+ err = checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile)
+ if err != nil {
+ Skip("Skipping Test: Unable to set Network queue size to reserved cpu count")
+ }
+ })
+ })
+})
+
+// Check a device that supports multiple queues and set with with reserved CPU size exists
+func checkDeviceSetWithReservedCPU(workerRTNodes []corev1.Node, nodesDevices map[string]map[string]int, profile performancev2.PerformanceProfile) error {
+ return wait.PollImmediate(5*time.Second, 90*time.Second, func() (bool, error) {
+ deviceSupport, err := checkDeviceSupport(workerRTNodes, nodesDevices)
+ Expect(err).ToNot(HaveOccurred())
+ if !deviceSupport {
+ return false, nil
+ }
+ for _, devices := range nodesDevices {
+ for _, size := range devices {
+ if size == getReservedCPUSize(profile.Spec.CPU) {
+ return true, nil
+ }
+ }
+ }
+ return false, nil
+ })
+}
+
+// Check if the device support multiple queues
+func checkDeviceSupport(workernodes []corev1.Node, nodesDevices map[string]map[string]int) (bool, error) {
+ cmdGetPhysicalDevices := []string{"find", "/sys/class/net", "-type", "l", "-not", "-lname", "*virtual*", "-printf", "%f "}
+ var channelCurrentCombined int
+ var noSupportedDevices = true
+ var err error
+ for _, node := range workernodes {
+ if nodesDevices[node.Name] == nil {
+ nodesDevices[node.Name] = make(map[string]int)
+ }
+ tunedPod := nodes.TunedForNode(&node, RunningOnSingleNode)
+ phyDevs, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, cmdGetPhysicalDevices)
+ Expect(err).ToNot(HaveOccurred())
+ for _, d := range strings.Split(string(phyDevs), " ") {
+ if d == "" {
+ continue
+ }
+ _, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, []string{"ethtool", "-l", d})
+ if err == nil {
+ cmdCombinedChannelsCurrent := []string{"bash", "-c",
+ fmt.Sprintf("ethtool -l %s | sed -n '/Current hardware settings:/,/Combined:/{s/^Combined:\\s*//p}'", d)}
+ out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, cmdCombinedChannelsCurrent)
+ if strings.Contains(string(out), "n/a") {
+ fmt.Printf("Device %s doesn't support multiple queues\n", d)
+ } else {
+ channelCurrentCombined, err = strconv.Atoi(strings.TrimSpace(string(out)))
+ if err != nil {
+ testlog.Warningf(fmt.Sprintf("unable to retrieve current multi-purpose channels hardware settings for device %s on %s",
+ d, node.Name))
+ }
+ if channelCurrentCombined == 1 {
+ fmt.Printf("Device %s doesn't support multiple queues\n", d)
+ } else {
+ fmt.Printf("Device %s supports multiple queues\n", d)
+ nodesDevices[node.Name][d] = channelCurrentCombined
+ noSupportedDevices = false
+ }
+ }
+ }
+ }
+ }
+ if noSupportedDevices {
+ return false, err
+ }
+ return true, err
+}
+
+func getReservedCPUSize(CPU *performancev2.CPU) int {
+ reservedCPUs, err := cpuset.Parse(string(*CPU.Reserved))
+ Expect(err).ToNot(HaveOccurred())
+ return reservedCPUs.Size()
+}
+
+func getVendorID(node corev1.Node, device string) string {
+ cmd := []string{"bash", "-c",
+ fmt.Sprintf("cat /sys/class/net/%s/device/vendor", device)}
+ stdout, err := nodes.ExecCommandOnNode(cmd, &node)
+ Expect(err).ToNot(HaveOccurred())
+ return stdout
+}
+
+func getDeviceID(node corev1.Node, device string) string {
+ cmd := []string{"bash", "-c",
+ fmt.Sprintf("cat /sys/class/net/%s/device/device", device)}
+ stdout, err := nodes.ExecCommandOnNode(cmd, &node)
+ Expect(err).ToNot(HaveOccurred())
+ return stdout
+}
+
+func getRandomNodeDevice(nodesDevices map[string]map[string]int) (string, string) {
+ node := ""
+ device := ""
+ for n := range nodesDevices {
+ node = n
+ for d := range nodesDevices[node] {
+ if d != "" {
+ device = d
+ return node, device
+ }
+ }
+ }
+ return node, device
+}
diff --git a/test/e2e/pao/functests/1_performance/performance.go b/test/e2e/pao/functests/1_performance/performance.go
new file mode 100644
index 000000000..6c8c065f9
--- /dev/null
+++ b/test/e2e/pao/functests/1_performance/performance.go
@@ -0,0 +1,1333 @@
+package __performance
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/api/node/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/klog"
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+ "k8s.io/utils/pointer"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ performancev1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1"
+ performancev1alpha1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1alpha1"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig"
+ componentprofile "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+ machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+)
+
+const (
+ testTimeout = 480
+ testPollInterval = 2
+)
+
+var RunningOnSingleNode bool
+
+var _ = Describe("[rfe_id:27368][performance]", func() {
+ var workerRTNodes []corev1.Node
+ var profile *performancev2.PerformanceProfile
+
+ testutils.BeforeAll(func() {
+ isSNO, err := cluster.IsSingleNode()
+ Expect(err).ToNot(HaveOccurred())
+ RunningOnSingleNode = isSNO
+ })
+
+ BeforeEach(func() {
+ if discovery.Enabled() && testutils.ProfileNotFound {
+ Skip("Discovery mode enabled, performance profile not found")
+ }
+
+ var err error
+ workerRTNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred(), "error looking for node with role %q: %v", testutils.RoleWorkerCNF, err)
+ workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err))
+ Expect(workerRTNodes).ToNot(BeEmpty(), "no nodes with role %q found", testutils.RoleWorkerCNF)
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred(), "cannot get profile by node labels %v", testutils.NodeSelectorLabels)
+ })
+
+ Context("Tuned CRs generated from profile", func() {
+ tunedExpectedName := components.GetComponentName(testutils.PerformanceProfileName, components.ProfileNamePerformance)
+ It("[test_id:31748] Should have the expected name for tuned from the profile owner object", func() {
+ tunedList := &tunedv1.TunedList{}
+ key := types.NamespacedName{
+ Name: tunedExpectedName,
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ tuned := &tunedv1.Tuned{}
+ err := testclient.Client.Get(context.TODO(), key, tuned)
+ Expect(err).ToNot(HaveOccurred(), "cannot find the Cluster Node Tuning Operator object %q", tuned.Name)
+
+ Eventually(func() bool {
+ err := testclient.Client.List(context.TODO(), tunedList)
+ Expect(err).NotTo(HaveOccurred())
+ for t := range tunedList.Items {
+ tunedItem := tunedList.Items[t]
+ ownerReferences := tunedItem.ObjectMeta.OwnerReferences
+ for o := range ownerReferences {
+ if ownerReferences[o].Name == profile.Name && tunedItem.Name != tunedExpectedName {
+ return false
+ }
+ }
+ }
+ return true
+ }, cluster.ComputeTestTimeout(120*time.Second, RunningOnSingleNode), testPollInterval*time.Second).Should(BeTrue(),
+ "tuned CR name owned by a performance profile CR should only be %q", tunedExpectedName)
+ })
+
+ It("[test_id:37127] Node should point to right tuned profile", func() {
+ for _, node := range workerRTNodes {
+ tuned := nodes.TunedForNode(&node, RunningOnSingleNode)
+ activeProfile, err := pods.WaitForPodOutput(testclient.K8sClient, tuned, []string{"cat", "/etc/tuned/active_profile"})
+ Expect(err).ToNot(HaveOccurred(), "Error getting the tuned active profile")
+ activeProfileName := string(activeProfile)
+ Expect(strings.TrimSpace(activeProfileName)).To(Equal(tunedExpectedName), "active profile name mismatch got %q expected %q", activeProfileName, tunedExpectedName)
+ }
+ })
+ })
+
+ Context("Pre boot tuning adjusted by tuned ", func() {
+
+ It("[test_id:31198] Should set CPU affinity kernel argument", func() {
+ for _, node := range workerRTNodes {
+ cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/proc/cmdline"})
+ Expect(err).ToNot(HaveOccurred())
+ // since systemd.cpu_affinity is calculated on node level using tuned we can check only the key in this context.
+ Expect(string(cmdline)).To(ContainSubstring("systemd.cpu_affinity="))
+ }
+ })
+
+ It("[test_id:32702] Should set CPU isolcpu's kernel argument managed_irq flag", func() {
+ for _, node := range workerRTNodes {
+ cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/proc/cmdline"})
+ Expect(err).ToNot(HaveOccurred())
+ if profile.Spec.CPU.BalanceIsolated != nil && *profile.Spec.CPU.BalanceIsolated == false {
+ Expect(string(cmdline)).To(ContainSubstring("isolcpus=domain,managed_irq,"))
+ } else {
+ Expect(string(cmdline)).To(ContainSubstring("isolcpus=managed_irq,"))
+ }
+ }
+ })
+
+ It("[test_id:27081][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should set workqueue CPU mask", func() {
+ for _, node := range workerRTNodes {
+ By(fmt.Sprintf("Getting tuned.non_isolcpus kernel argument on %q", node.Name))
+ cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/proc/cmdline"})
+ Expect(err).ToNot(HaveOccurred())
+ re := regexp.MustCompile(`tuned.non_isolcpus=\S+`)
+ nonIsolcpusFullArgument := re.FindString(string(cmdline))
+ Expect(nonIsolcpusFullArgument).To(ContainSubstring("tuned.non_isolcpus="), "tuned.non_isolcpus parameter not found in %q", cmdline)
+ nonIsolcpusMask := strings.Split(string(nonIsolcpusFullArgument), "=")[1]
+ nonIsolcpusMaskNoDelimiters := strings.Replace(nonIsolcpusMask, ",", "", -1)
+
+ getTrimmedMaskFromData := func(maskType string, data []byte) string {
+ trimmed := strings.TrimSpace(string(data))
+ testlog.Infof("workqueue %s mask for %q: %q", maskType, node.Name, trimmed)
+ return strings.Replace(trimmed, ",", "", -1)
+ }
+
+ expectMasksEqual := func(expected, got string) {
+ expectedTrimmed := strings.TrimLeft(expected, "0")
+ gotTrimmed := strings.TrimLeft(got, "0")
+ ExpectWithOffset(1, expectedTrimmed).Should(Equal(gotTrimmed), "wrong workqueue mask on %q - got %q (from %q) expected %q (from %q)", node.Name, expectedTrimmed, expected, got, gotTrimmed)
+ }
+
+ By(fmt.Sprintf("Getting the virtual workqueue mask (/sys/devices/virtual/workqueue/cpumask) on %q", node.Name))
+ workqueueMaskData, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/sys/devices/virtual/workqueue/cpumask"})
+ Expect(err).ToNot(HaveOccurred())
+ workqueueMask := getTrimmedMaskFromData("virtual", workqueueMaskData)
+ expectMasksEqual(nonIsolcpusMaskNoDelimiters, workqueueMask)
+
+ By(fmt.Sprintf("Getting the writeback workqueue mask (/sys/bus/workqueue/devices/writeback/cpumask) on %q", node.Name))
+ workqueueWritebackMaskData, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/sys/bus/workqueue/devices/writeback/cpumask"})
+ Expect(err).ToNot(HaveOccurred())
+ workqueueWritebackMask := getTrimmedMaskFromData("workqueue", workqueueWritebackMaskData)
+ expectMasksEqual(nonIsolcpusMaskNoDelimiters, workqueueWritebackMask)
+ }
+ })
+
+ It("[test_id:32375][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] initramfs should not have injected configuration", func() {
+ for _, node := range workerRTNodes {
+ rhcosId, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"awk", "-F", "/", "{printf $3}", "/rootfs/proc/cmdline"})
+ Expect(err).ToNot(HaveOccurred())
+ initramfsImagesPath, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"find", filepath.Join("/rootfs/boot/ostree", string(rhcosId)), "-name", "*.img"})
+ Expect(err).ToNot(HaveOccurred())
+ modifiedImagePath := strings.TrimPrefix(strings.TrimSpace(string(initramfsImagesPath)), "/rootfs")
+ initrd, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"chroot", "/rootfs", "lsinitrd", modifiedImagePath})
+ Expect(err).ToNot(HaveOccurred())
+ Expect(string(initrd)).ShouldNot(ContainSubstring("'/etc/systemd/system.conf /etc/systemd/system.conf.d/setAffinity.conf'"))
+ }
+ })
+
+ It("[test_id:35363][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] stalld daemon is running on the host", func() {
+ for _, node := range workerRTNodes {
+ tuned := nodes.TunedForNode(&node, RunningOnSingleNode)
+ _, err := pods.WaitForPodOutput(testclient.K8sClient, tuned, []string{"pidof", "stalld"})
+ Expect(err).ToNot(HaveOccurred())
+ }
+ })
+ It("[test_id:42400][crit:medium][vendor:cnf-qe@redhat.com][level:acceptance] stalld daemon is running as sched_fifo", func() {
+ for _, node := range workerRTNodes {
+ pid, err := nodes.ExecCommandOnNode([]string{"pidof", "stalld"}, &node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(pid).ToNot(BeEmpty())
+ sched_tasks, err := nodes.ExecCommandOnNode([]string{"chrt", "-ap", pid}, &node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(sched_tasks).To(ContainSubstring("scheduling policy: SCHED_FIFO"))
+ Expect(sched_tasks).To(ContainSubstring("scheduling priority: 10"))
+ }
+ })
+ It("[test_id:42696][crit:medium][vendor:cnf-qe@redhat.com][level:acceptance] Stalld runs in higher priority than ksoftirq and rcu{c,b}", func() {
+ for _, node := range workerRTNodes {
+ stalld_pid, err := nodes.ExecCommandOnNode([]string{"pidof", "stalld"}, &node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(stalld_pid).ToNot(BeEmpty())
+ sched_tasks, err := nodes.ExecCommandOnNode([]string{"chrt", "-ap", stalld_pid}, &node)
+ Expect(err).ToNot(HaveOccurred())
+ re := regexp.MustCompile("scheduling priority: ([0-9]+)")
+ match := re.FindStringSubmatch(sched_tasks)
+ stalld_prio, err := strconv.Atoi(match[1])
+ Expect(err).ToNot(HaveOccurred())
+
+ ksoftirq_pid, err := nodes.ExecCommandOnNode([]string{"pgrep", "-f", "ksoftirqd", "-n"}, &node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(ksoftirq_pid).ToNot(BeEmpty())
+ sched_tasks, err = nodes.ExecCommandOnNode([]string{"chrt", "-ap", ksoftirq_pid}, &node)
+ Expect(err).ToNot(HaveOccurred())
+ match = re.FindStringSubmatch(sched_tasks)
+ ksoftirq_prio, err := strconv.Atoi(match[1])
+ Expect(err).ToNot(HaveOccurred())
+
+ if profile.Spec.RealTimeKernel == nil ||
+ profile.Spec.RealTimeKernel.Enabled == nil ||
+ *profile.Spec.RealTimeKernel.Enabled != true {
+ Expect(stalld_prio).To(BeNumerically("<", ksoftirq_prio))
+ testlog.Warning("Skip checking rcu since RT kernel is disabled")
+ return
+ }
+ //rcuc/n : kthreads that are pinned to CPUs & are responsible to execute the callbacks of rcu threads .
+ //rcub/n : are boosting kthreads ,responsible to monitor per-cpu arrays of lists of tasks that were blocked while in an rcu read-side critical sections.
+ rcu_pid, err := nodes.ExecCommandOnNode([]string{"pgrep", "-f", "rcu[c,b]", "-n"}, &node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(rcu_pid).ToNot(BeEmpty())
+ sched_tasks, err = nodes.ExecCommandOnNode([]string{"chrt", "-ap", rcu_pid}, &node)
+ Expect(err).ToNot(HaveOccurred())
+ match = re.FindStringSubmatch(sched_tasks)
+ rcu_prio, err := strconv.Atoi(match[1])
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(stalld_prio).To(BeNumerically("<", rcu_prio))
+ Expect(stalld_prio).To(BeNumerically("<", ksoftirq_prio))
+ }
+ })
+
+ })
+
+ Context("Additional kernel arguments added from perfomance profile", func() {
+ It("[test_id:28611][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should set additional kernel arguments on the machine", func() {
+ if profile.Spec.AdditionalKernelArgs != nil {
+ for _, node := range workerRTNodes {
+ cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/proc/cmdline"})
+ Expect(err).ToNot(HaveOccurred())
+ for _, arg := range profile.Spec.AdditionalKernelArgs {
+ Expect(string(cmdline)).To(ContainSubstring(arg))
+ }
+ }
+ }
+ })
+ })
+
+ Context("Tuned kernel parameters", func() {
+ It("[test_id:28466][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should contain configuration injected through openshift-node-performance profile", func() {
+ sysctlMap := map[string]string{
+ "kernel.hung_task_timeout_secs": "600",
+ "kernel.nmi_watchdog": "0",
+ "kernel.sched_rt_runtime_us": "-1",
+ "vm.stat_interval": "10",
+ "kernel.timer_migration": "1",
+ }
+
+ key := types.NamespacedName{
+ Name: components.GetComponentName(testutils.PerformanceProfileName, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ tuned := &tunedv1.Tuned{}
+ err := testclient.Client.Get(context.TODO(), key, tuned)
+ Expect(err).ToNot(HaveOccurred(), "cannot find the Cluster Node Tuning Operator object "+key.String())
+ validateTunedActiveProfile(workerRTNodes)
+ execSysctlOnWorkers(workerRTNodes, sysctlMap)
+ })
+ })
+
+ Context("RPS configuration", func() {
+ It("Should have the correct RPS configuration", func() {
+ if profile.Spec.CPU == nil || profile.Spec.CPU.Reserved != nil {
+ return
+ }
+
+ expectedRPSCPUs, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved))
+ Expect(err).ToNot(HaveOccurred())
+ ociHookPath := filepath.Join("/rootfs", machineconfig.OCIHooksConfigDir, machineconfig.OCIHooksConfig)
+ Expect(err).ToNot(HaveOccurred())
+ for _, node := range workerRTNodes {
+ // Verify the OCI RPS hook uses the correct RPS mask
+ hooksConfig, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", ociHookPath})
+ Expect(err).ToNot(HaveOccurred())
+
+ var hooks map[string]interface{}
+ err = json.Unmarshal(hooksConfig, &hooks)
+ Expect(err).ToNot(HaveOccurred())
+ hook := hooks["hook"].(map[string]interface{})
+ Expect(hook).ToNot(BeNil())
+ args := hook["args"].([]interface{})
+ Expect(len(args)).To(Equal(2), "unexpected arguments: %v", args)
+
+ rpsCPUs, err := components.CPUMaskToCPUSet(args[1].(string))
+ Expect(err).ToNot(HaveOccurred())
+ Expect(rpsCPUs).To(Equal(expectedRPSCPUs), "the hook rps mask is different from the reserved CPUs")
+
+ // Verify the systemd RPS service uses the correct RPS mask
+ cmd := []string{"sed", "-n", "s/^ExecStart=.*echo \\([A-Fa-f0-9]*\\) .*/\\1/p", "/rootfs/etc/systemd/system/update-rps@.service"}
+ serviceRPSCPUs, err := nodes.ExecCommandOnNode(cmd, &node)
+ Expect(err).ToNot(HaveOccurred())
+
+ rpsCPUs, err = components.CPUMaskToCPUSet(serviceRPSCPUs)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(rpsCPUs).To(Equal(expectedRPSCPUs), "the service rps mask is different from the reserved CPUs")
+
+ // Verify all host network devices have the correct RPS mask
+ cmd = []string{"find", "/rootfs/sys/devices", "-type", "f", "-name", "rps_cpus", "-exec", "cat", "{}", ";"}
+ devsRPS, err := nodes.ExecCommandOnNode(cmd, &node)
+ Expect(err).ToNot(HaveOccurred())
+
+ for _, devRPS := range strings.Split(devsRPS, "\n") {
+ rpsCPUs, err = components.CPUMaskToCPUSet(devRPS)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(rpsCPUs).To(Equal(expectedRPSCPUs), "a host device rps mask is different from the reserved CPUs")
+ }
+
+ // Verify all node pod network devices have the correct RPS mask
+ nodePods := &corev1.PodList{}
+ listOptions := &client.ListOptions{
+ Namespace: "",
+ FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}),
+ }
+ err = testclient.Client.List(context.TODO(), nodePods, listOptions)
+ Expect(err).ToNot(HaveOccurred())
+
+ for _, pod := range nodePods.Items {
+ cmd := []string{"find", "/sys/devices", "-type", "f", "-name", "rps_cpus", "-exec", "cat", "{}", ";"}
+ devsRPS, err := pods.WaitForPodOutput(testclient.K8sClient, &pod, cmd)
+ for _, devRPS := range strings.Split(strings.Trim(string(devsRPS), "\n"), "\n") {
+ rpsCPUs, err = components.CPUMaskToCPUSet(devRPS)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(rpsCPUs).To(Equal(expectedRPSCPUs), pod.Name+" has a device rps mask different from the reserved CPUs")
+ }
+ }
+ }
+ })
+ })
+
+ Context("Network latency parameters adjusted by the Node Tuning Operator", func() {
+ It("[test_id:28467][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should contain configuration injected through the openshift-node-performance profile", func() {
+ sysctlMap := map[string]string{
+ "net.ipv4.tcp_fastopen": "3",
+ "kernel.sched_min_granularity_ns": "10000000",
+ "vm.dirty_ratio": "10",
+ "vm.dirty_background_ratio": "3",
+ "vm.swappiness": "10",
+ "kernel.sched_migration_cost_ns": "5000000",
+ }
+ key := types.NamespacedName{
+ Name: components.GetComponentName(testutils.PerformanceProfileName, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ tuned := &tunedv1.Tuned{}
+ err := testclient.Client.Get(context.TODO(), key, tuned)
+ Expect(err).ToNot(HaveOccurred(), "cannot find the Cluster Node Tuning Operator object "+components.ProfileNamePerformance)
+ validateTunedActiveProfile(workerRTNodes)
+ execSysctlOnWorkers(workerRTNodes, sysctlMap)
+ })
+ })
+
+ Context("KubeletConfig experimental annotation", func() {
+ var secondMCP *mcov1.MachineConfigPool
+ var secondProfile *performancev2.PerformanceProfile
+ var newRole = "test-annotation"
+
+ BeforeEach(func() {
+ newLabel := fmt.Sprintf("%s/%s", testutils.LabelRole, newRole)
+
+ reserved := performancev2.CPUSet("0")
+ isolated := performancev2.CPUSet("1-3")
+
+ secondProfile = &performancev2.PerformanceProfile{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PerformanceProfile",
+ APIVersion: performancev2.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-annotation",
+ Annotations: map[string]string{
+ "kubeletconfig.experimental": `{"systemReserved": {"memory": "256Mi"}, "kubeReserved": {"memory": "256Mi"}}`,
+ },
+ },
+ Spec: performancev2.PerformanceProfileSpec{
+ CPU: &performancev2.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ },
+ NodeSelector: map[string]string{newLabel: ""},
+ RealTimeKernel: &performancev2.RealTimeKernel{
+ Enabled: pointer.BoolPtr(true),
+ },
+ NUMA: &performancev2.NUMA{
+ TopologyPolicy: pointer.StringPtr("restricted"),
+ },
+ },
+ }
+ Expect(testclient.Client.Create(context.TODO(), secondProfile)).ToNot(HaveOccurred())
+
+ machineConfigSelector := componentprofile.GetMachineConfigLabel(secondProfile)
+ secondMCP = &mcov1.MachineConfigPool{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-annotation",
+ Labels: map[string]string{
+ machineconfigv1.MachineConfigRoleLabelKey: newRole,
+ },
+ },
+ Spec: mcov1.MachineConfigPoolSpec{
+ MachineConfigSelector: &metav1.LabelSelector{
+ MatchLabels: machineConfigSelector,
+ },
+ NodeSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ newLabel: "",
+ },
+ },
+ },
+ }
+
+ Expect(testclient.Client.Create(context.TODO(), secondMCP)).ToNot(HaveOccurred())
+ })
+
+ It("should override system-reserved memory", func() {
+ var kubeletConfig *machineconfigv1.KubeletConfig
+
+ Eventually(func() error {
+ By("Getting that new KubeletConfig")
+ configKey := types.NamespacedName{
+ Name: components.GetComponentName(secondProfile.Name, components.ComponentNamePrefix),
+ Namespace: metav1.NamespaceNone,
+ }
+ kubeletConfig = &machineconfigv1.KubeletConfig{}
+ if err := testclient.GetWithRetry(context.TODO(), configKey, kubeletConfig); err != nil {
+ klog.Warningf("Failed to get the KubeletConfig %q", configKey.Name)
+ return err
+ }
+
+ return nil
+ }, time.Minute, 5*time.Second).Should(BeNil())
+
+ kubeletConfigString := string(kubeletConfig.Spec.KubeletConfig.Raw)
+ Expect(kubeletConfigString).To(ContainSubstring(`"kubeReserved":{"memory":"256Mi"}`))
+ Expect(kubeletConfigString).To(ContainSubstring(`"systemReserved":{"memory":"256Mi"}`))
+ })
+
+ AfterEach(func() {
+ if secondProfile != nil {
+ if err := profiles.Delete(secondProfile.Name); err != nil {
+ klog.Warningf("failed to delete the performance profile %q: %v", secondProfile.Name, err)
+ }
+ }
+
+ if secondMCP != nil {
+ if err := mcps.Delete(secondMCP.Name); err != nil {
+ klog.Warningf("failed to delete the machine config pool %q: %v", secondMCP.Name, err)
+ }
+ }
+ })
+ })
+
+ Context("Create second performance profiles on a cluster", func() {
+ var secondMCP *mcov1.MachineConfigPool
+ var secondProfile *performancev2.PerformanceProfile
+ var newRole = "worker-new"
+
+ BeforeEach(func() {
+ newLabel := fmt.Sprintf("%s/%s", testutils.LabelRole, newRole)
+
+ reserved := performancev2.CPUSet("0")
+ isolated := performancev2.CPUSet("1-3")
+
+ secondProfile = &performancev2.PerformanceProfile{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PerformanceProfile",
+ APIVersion: performancev2.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "second-profile",
+ },
+ Spec: performancev2.PerformanceProfileSpec{
+ CPU: &performancev2.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ },
+ NodeSelector: map[string]string{newLabel: ""},
+ RealTimeKernel: &performancev2.RealTimeKernel{
+ Enabled: pointer.BoolPtr(true),
+ },
+ AdditionalKernelArgs: []string{
+ "NEW_ARGUMENT",
+ },
+ NUMA: &performancev2.NUMA{
+ TopologyPolicy: pointer.StringPtr("restricted"),
+ },
+ },
+ }
+
+ machineConfigSelector := componentprofile.GetMachineConfigLabel(secondProfile)
+ secondMCP = &mcov1.MachineConfigPool{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "second-mcp",
+ Labels: map[string]string{
+ machineconfigv1.MachineConfigRoleLabelKey: newRole,
+ },
+ },
+ Spec: mcov1.MachineConfigPoolSpec{
+ MachineConfigSelector: &metav1.LabelSelector{
+ MatchLabels: machineConfigSelector,
+ },
+ NodeSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ newLabel: "",
+ },
+ },
+ },
+ }
+
+ Expect(testclient.Client.Create(context.TODO(), secondMCP)).ToNot(HaveOccurred())
+ })
+
+ AfterEach(func() {
+ if secondProfile != nil {
+ if err := profiles.Delete(secondProfile.Name); err != nil {
+ klog.Warningf("failed to delete the performance profile %q: %v", secondProfile.Name, err)
+ }
+ }
+
+ if secondMCP != nil {
+ if err := mcps.Delete(secondMCP.Name); err != nil {
+ klog.Warningf("failed to delete the machine config pool %q: %v", secondMCP.Name, err)
+ }
+ }
+ })
+
+ It("[test_id:32364] Verifies that cluster can have multiple profiles", func() {
+ Expect(testclient.Client.Create(context.TODO(), secondProfile)).ToNot(HaveOccurred())
+
+ By("Checking that new KubeletConfig, MachineConfig and RuntimeClass created")
+ configKey := types.NamespacedName{
+ Name: components.GetComponentName(secondProfile.Name, components.ComponentNamePrefix),
+ Namespace: metav1.NamespaceNone,
+ }
+ kubeletConfig := &machineconfigv1.KubeletConfig{}
+ err := testclient.GetWithRetry(context.TODO(), configKey, kubeletConfig)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find KubeletConfig object %s", configKey.Name))
+ Expect(kubeletConfig.Spec.MachineConfigPoolSelector.MatchLabels[machineconfigv1.MachineConfigRoleLabelKey]).Should(Equal(newRole))
+ Expect(kubeletConfig.Spec.KubeletConfig.Raw).Should(ContainSubstring("restricted"), "Can't find value in KubeletConfig")
+
+ runtimeClass := &v1beta1.RuntimeClass{}
+ err = testclient.GetWithRetry(context.TODO(), configKey, runtimeClass)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find RuntimeClass profile object %s", runtimeClass.Name))
+ Expect(runtimeClass.Handler).Should(Equal(machineconfig.HighPerformanceRuntime))
+
+ machineConfigKey := types.NamespacedName{
+ Name: machineconfig.GetMachineConfigName(secondProfile),
+ Namespace: metav1.NamespaceNone,
+ }
+ machineConfig := &machineconfigv1.MachineConfig{}
+ err = testclient.GetWithRetry(context.TODO(), machineConfigKey, machineConfig)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find MachineConfig object %s", configKey.Name))
+ Expect(machineConfig.Labels[machineconfigv1.MachineConfigRoleLabelKey]).Should(Equal(newRole))
+
+ By("Checking that new Tuned profile created")
+ tunedKey := types.NamespacedName{
+ Name: components.GetComponentName(secondProfile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ tunedProfile := &tunedv1.Tuned{}
+ err = testclient.GetWithRetry(context.TODO(), tunedKey, tunedProfile)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find Tuned profile object %s", tunedKey.Name))
+ Expect(tunedProfile.Spec.Recommend[0].MachineConfigLabels[machineconfigv1.MachineConfigRoleLabelKey]).Should(Equal(newRole))
+ Expect(*tunedProfile.Spec.Profile[0].Data).Should(ContainSubstring("NEW_ARGUMENT"), "Can't find value in Tuned profile")
+
+ By("Checking that the initial MCP does not start updating")
+ Consistently(func() corev1.ConditionStatus {
+ return mcps.GetConditionStatus(testutils.RoleWorkerCNF, machineconfigv1.MachineConfigPoolUpdating)
+ }, 30, 5).Should(Equal(corev1.ConditionFalse))
+
+ By("Remove second profile and verify that KubeletConfig and MachineConfig were removed")
+ Expect(testclient.Client.Delete(context.TODO(), secondProfile)).ToNot(HaveOccurred())
+
+ profileKey := types.NamespacedName{
+ Name: secondProfile.Name,
+ Namespace: secondProfile.Namespace,
+ }
+ Expect(profiles.WaitForDeletion(profileKey, 60*time.Second)).ToNot(HaveOccurred())
+
+ Consistently(func() corev1.ConditionStatus {
+ return mcps.GetConditionStatus(testutils.RoleWorkerCNF, machineconfigv1.MachineConfigPoolUpdating)
+ }, 30, 5).Should(Equal(corev1.ConditionFalse))
+
+ Expect(testclient.Client.Get(context.TODO(), configKey, kubeletConfig)).To(HaveOccurred(), fmt.Sprintf("KubeletConfig %s should be removed", configKey.Name))
+ Expect(testclient.Client.Get(context.TODO(), machineConfigKey, machineConfig)).To(HaveOccurred(), fmt.Sprintf("MachineConfig %s should be removed", configKey.Name))
+ Expect(testclient.Client.Get(context.TODO(), configKey, runtimeClass)).To(HaveOccurred(), fmt.Sprintf("RuntimeClass %s should be removed", configKey.Name))
+ Expect(testclient.Client.Get(context.TODO(), tunedKey, tunedProfile)).To(HaveOccurred(), fmt.Sprintf("Tuned profile object %s should be removed", tunedKey.Name))
+
+ By("Checking that initial KubeletConfig and MachineConfig still exist")
+ initialKey := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ err = testclient.GetWithRetry(context.TODO(), initialKey, &machineconfigv1.KubeletConfig{})
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find KubeletConfig object %s", initialKey.Name))
+
+ initialMachineConfigKey := types.NamespacedName{
+ Name: machineconfig.GetMachineConfigName(profile),
+ Namespace: metav1.NamespaceNone,
+ }
+ err = testclient.GetWithRetry(context.TODO(), initialMachineConfigKey, &machineconfigv1.MachineConfig{})
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find MachineConfig object %s", initialKey.Name))
+ })
+ })
+
+ Context("Verify API Conversions", func() {
+ verifyV2V1 := func() {
+ By("Checking v2 -> v1 conversion")
+ v1Profile := &performancev1.PerformanceProfile{}
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: profile.Namespace,
+ }
+
+ err := testclient.Client.Get(context.TODO(), key, v1Profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed getting v1Profile")
+ Expect(verifyV2Conversion(profile, v1Profile)).ToNot(HaveOccurred())
+
+ By("Checking v1 -> v2 conversion")
+ v1Profile.Name = "v1"
+ v1Profile.ResourceVersion = ""
+ v1Profile.Spec.NodeSelector = map[string]string{"v1/v1": "v1"}
+ v1Profile.Spec.MachineConfigPoolSelector = nil
+ v1Profile.Spec.MachineConfigLabel = nil
+ Expect(testclient.Client.Create(context.TODO(), v1Profile)).ToNot(HaveOccurred())
+
+ defer func() {
+ Expect(testclient.Client.Delete(context.TODO(), v1Profile)).ToNot(HaveOccurred())
+ Expect(profiles.WaitForDeletion(key, 60*time.Second)).ToNot(HaveOccurred())
+ }()
+
+ key = types.NamespacedName{
+ Name: v1Profile.Name,
+ Namespace: v1Profile.Namespace,
+ }
+ err = testclient.Client.Get(context.TODO(), key, v1Profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed getting v1Profile")
+
+ v2Profile := &performancev2.PerformanceProfile{}
+ err = testclient.GetWithRetry(context.TODO(), key, v2Profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed getting v2Profile")
+ Expect(verifyV2Conversion(v2Profile, v1Profile)).ToNot(HaveOccurred())
+ }
+
+ verifyV1VAlpha1 := func() {
+ By("Acquiring the tests profile as a v1 profile")
+ v1Profile := &performancev1.PerformanceProfile{}
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: profile.Namespace,
+ }
+
+ err := testclient.Client.Get(context.TODO(), key, v1Profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed acquiring a v1 profile")
+
+ By("Checking v1 -> v1alpha1 conversion")
+ v1alpha1Profile := &performancev1alpha1.PerformanceProfile{}
+ key = types.NamespacedName{
+ Name: v1Profile.Name,
+ Namespace: v1Profile.Namespace,
+ }
+
+ err = testclient.Client.Get(context.TODO(), key, v1alpha1Profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed getting v1alpha1Profile")
+ Expect(verifyV1alpha1Conversion(v1alpha1Profile, v1Profile)).ToNot(HaveOccurred())
+
+ By("Checking v1alpha1 -> v1 conversion")
+ v1alpha1Profile.Name = "v1alpha"
+ v1alpha1Profile.ResourceVersion = ""
+ v1alpha1Profile.Spec.NodeSelector = map[string]string{"v1alpha/v1alpha": "v1alpha"}
+ v1alpha1Profile.Spec.MachineConfigPoolSelector = nil
+ v1alpha1Profile.Spec.MachineConfigLabel = nil
+ Expect(testclient.Client.Create(context.TODO(), v1alpha1Profile)).ToNot(HaveOccurred())
+
+ key = types.NamespacedName{
+ Name: v1alpha1Profile.Name,
+ Namespace: v1alpha1Profile.Namespace,
+ }
+
+ defer func() {
+ Expect(testclient.Client.Delete(context.TODO(), v1alpha1Profile)).ToNot(HaveOccurred())
+ Expect(profiles.WaitForDeletion(key, 60*time.Second)).ToNot(HaveOccurred())
+ }()
+
+ v1Profile = &performancev1.PerformanceProfile{}
+ err = testclient.GetWithRetry(context.TODO(), key, v1Profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed getting v1profile")
+ Expect(verifyV1alpha1Conversion(v1alpha1Profile, v1Profile)).ToNot(HaveOccurred())
+ }
+
+ // empty context to use the same JustBeforeEach and AfterEach
+ Context("", func() {
+ var testProfileName string
+ var globallyDisableIrqLoadBalancing bool
+
+ JustBeforeEach(func() {
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: profile.Namespace,
+ }
+ err := testclient.Client.Get(context.TODO(), key, profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed to get profile")
+
+ profile.Name = testProfileName
+ profile.ResourceVersion = ""
+ profile.Spec.NodeSelector = map[string]string{"test/test": "test"}
+ profile.Spec.GloballyDisableIrqLoadBalancing = pointer.BoolPtr(globallyDisableIrqLoadBalancing)
+ profile.Spec.MachineConfigPoolSelector = nil
+ profile.Spec.MachineConfigLabel = nil
+
+ err = testclient.Client.Create(context.TODO(), profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed to create profile")
+
+ // we need to get updated profile object after the name and spec changes
+ key = types.NamespacedName{
+ Name: profile.Name,
+ Namespace: profile.Namespace,
+ }
+ err = testclient.Client.Get(context.TODO(), key, profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed to get profile")
+ })
+
+ When("the GloballyDisableIrqLoadBalancing field set to false", func() {
+ BeforeEach(func() {
+ testProfileName = "gdilb-false"
+ globallyDisableIrqLoadBalancing = false
+ })
+
+ It("should preserve the value during the v1 <-> v2 conversion", func() {
+ verifyV2V1()
+ })
+ })
+
+ When("the GloballyDisableIrqLoadBalancing field set to true", func() {
+ BeforeEach(func() {
+ testProfileName = "gdilb-true"
+ globallyDisableIrqLoadBalancing = true
+ })
+
+ It("should preserve the value during the v1 <-> v2 conversion", func() {
+ verifyV2V1()
+ })
+ })
+
+ AfterEach(func() {
+ Expect(testclient.Client.Delete(context.TODO(), profile)).ToNot(HaveOccurred())
+ Expect(profiles.WaitForDeletion(types.NamespacedName{
+ Name: profile.Name,
+ Namespace: profile.Namespace,
+ }, 60*time.Second)).ToNot(HaveOccurred())
+ })
+
+ })
+
+ When("the performance profile does not contain NUMA field", func() {
+ BeforeEach(func() {
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: profile.Namespace,
+ }
+ err := testclient.Client.Get(context.TODO(), key, profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed getting v1Profile")
+
+ profile.Name = "without-numa"
+ profile.ResourceVersion = ""
+ profile.Spec.NodeSelector = map[string]string{"withoutNUMA/withoutNUMA": "withoutNUMA"}
+ profile.Spec.NUMA = nil
+ profile.Spec.MachineConfigPoolSelector = nil
+ profile.Spec.MachineConfigLabel = nil
+
+ err = testclient.Client.Create(context.TODO(), profile)
+ Expect(err).ToNot(HaveOccurred(), "Failed to create profile without NUMA")
+ })
+
+ AfterEach(func() {
+ Expect(testclient.Client.Delete(context.TODO(), profile)).ToNot(HaveOccurred())
+ Expect(profiles.WaitForDeletion(types.NamespacedName{
+ Name: profile.Name,
+ Namespace: profile.Namespace,
+ }, 60*time.Second)).ToNot(HaveOccurred())
+ })
+
+ It("Verifies v1 <-> v1alpha1 conversions", func() {
+ verifyV1VAlpha1()
+ })
+
+ It("Verifies v1 <-> v2 conversions", func() {
+ verifyV2V1()
+ })
+ })
+
+ It("[test_id:35887] Verifies v1 <-> v1alpha1 conversions", func() {
+ verifyV1VAlpha1()
+ })
+
+ It("[test_id:35888] Verifies v1 <-> v2 conversions", func() {
+ verifyV2V1()
+ })
+ })
+
+ Context("Validation webhook", func() {
+ BeforeEach(func() {
+ if discovery.Enabled() {
+ Skip("Discovery mode enabled, test skipped because it creates incorrect profiles")
+ }
+ })
+
+ validateObject := func(obj client.Object, message string) {
+ err := testclient.Client.Create(context.TODO(), obj)
+ Expect(err).To(HaveOccurred(), "expected the validation error")
+ Expect(err.Error()).To(ContainSubstring(message))
+ }
+
+ Context("with API version v1alpha1 profile", func() {
+ var v1alpha1Profile *performancev1alpha1.PerformanceProfile
+
+ BeforeEach(func() {
+ v1alpha1Profile = &performancev1alpha1.PerformanceProfile{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PerformanceProfile",
+ APIVersion: performancev1alpha1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "v1alpha1-profile",
+ },
+ Spec: performancev1alpha1.PerformanceProfileSpec{
+ RealTimeKernel: &performancev1alpha1.RealTimeKernel{
+ Enabled: pointer.BoolPtr(true),
+ },
+ NodeSelector: map[string]string{"v1alpha1/v1alpha1": "v1alpha1"},
+ NUMA: &performancev1alpha1.NUMA{
+ TopologyPolicy: pointer.StringPtr("restricted"),
+ },
+ },
+ }
+ })
+
+ It("should reject the creation of the profile with overlapping CPUs", func() {
+ reserved := performancev1alpha1.CPUSet("0-3")
+ isolated := performancev1alpha1.CPUSet("0-7")
+
+ v1alpha1Profile.Spec.CPU = &performancev1alpha1.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ validateObject(v1alpha1Profile, "reserved and isolated cpus overlap")
+ })
+
+ It("should reject the creation of the profile with no isolated CPUs", func() {
+ reserved := performancev1alpha1.CPUSet("0-3")
+ isolated := performancev1alpha1.CPUSet("")
+
+ v1alpha1Profile.Spec.CPU = &performancev1alpha1.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ validateObject(v1alpha1Profile, "isolated CPUs can not be empty")
+ })
+
+ It("should reject the creation of the profile with the node selector that already in use", func() {
+ reserved := performancev1alpha1.CPUSet("0,1")
+ isolated := performancev1alpha1.CPUSet("2,3")
+
+ v1alpha1Profile.Spec.CPU = &performancev1alpha1.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ v1alpha1Profile.Spec.NodeSelector = testutils.NodeSelectorLabels
+ validateObject(v1alpha1Profile, "the profile has the same node selector as the performance profile")
+ })
+ })
+
+ Context("with API version v1 profile", func() {
+ var v1Profile *performancev1.PerformanceProfile
+
+ BeforeEach(func() {
+ v1Profile = &performancev1.PerformanceProfile{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PerformanceProfile",
+ APIVersion: performancev1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "v1-profile",
+ },
+ Spec: performancev1.PerformanceProfileSpec{
+ RealTimeKernel: &performancev1.RealTimeKernel{
+ Enabled: pointer.BoolPtr(true),
+ },
+ NodeSelector: map[string]string{"v1/v1": "v1"},
+ NUMA: &performancev1.NUMA{
+ TopologyPolicy: pointer.StringPtr("restricted"),
+ },
+ },
+ }
+ })
+
+ It("should reject the creation of the profile with overlapping CPUs", func() {
+ reserved := performancev1.CPUSet("0-3")
+ isolated := performancev1.CPUSet("0-7")
+
+ v1Profile.Spec.CPU = &performancev1.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ validateObject(v1Profile, "reserved and isolated cpus overlap")
+ })
+
+ It("should reject the creation of the profile with no isolated CPUs", func() {
+ reserved := performancev1.CPUSet("0-3")
+ isolated := performancev1.CPUSet("")
+
+ v1Profile.Spec.CPU = &performancev1.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ validateObject(v1Profile, "isolated CPUs can not be empty")
+ })
+
+ It("should reject the creation of the profile with the node selector that already in use", func() {
+ reserved := performancev1.CPUSet("0,1")
+ isolated := performancev1.CPUSet("2,3")
+
+ v1Profile.Spec.CPU = &performancev1.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ v1Profile.Spec.NodeSelector = testutils.NodeSelectorLabels
+ validateObject(v1Profile, "the profile has the same node selector as the performance profile")
+ })
+ })
+
+ Context("with profile version v2", func() {
+ var v2Profile *performancev2.PerformanceProfile
+
+ BeforeEach(func() {
+ v2Profile = &performancev2.PerformanceProfile{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PerformanceProfile",
+ APIVersion: performancev2.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "v2-profile",
+ },
+ Spec: performancev2.PerformanceProfileSpec{
+ RealTimeKernel: &performancev2.RealTimeKernel{
+ Enabled: pointer.BoolPtr(true),
+ },
+ NodeSelector: map[string]string{"v2/v2": "v2"},
+ NUMA: &performancev2.NUMA{
+ TopologyPolicy: pointer.StringPtr("restricted"),
+ },
+ },
+ }
+ })
+
+ It("should reject the creation of the profile with overlapping CPUs", func() {
+ reserved := performancev2.CPUSet("0-3")
+ isolated := performancev2.CPUSet("0-7")
+
+ v2Profile.Spec.CPU = &performancev2.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ validateObject(v2Profile, "reserved and isolated cpus overlap")
+ })
+
+ It("should reject the creation of the profile with no isolated CPUs", func() {
+ reserved := performancev2.CPUSet("0-3")
+ isolated := performancev2.CPUSet("")
+
+ v2Profile.Spec.CPU = &performancev2.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ validateObject(v2Profile, "isolated CPUs can not be empty")
+ })
+
+ It("should reject the creation of the profile with the node selector that already in use", func() {
+ reserved := performancev2.CPUSet("0,1")
+ isolated := performancev2.CPUSet("2,3")
+
+ v2Profile.Spec.CPU = &performancev2.CPU{
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ v2Profile.Spec.NodeSelector = testutils.NodeSelectorLabels
+ validateObject(v2Profile, "the profile has the same node selector as the performance profile")
+ })
+ })
+ })
+})
+
+func verifyV1alpha1Conversion(v1alpha1Profile *performancev1alpha1.PerformanceProfile, v1Profile *performancev1.PerformanceProfile) error {
+ specCPU := v1alpha1Profile.Spec.CPU
+ if (specCPU == nil) != (v1Profile.Spec.CPU == nil) {
+ return fmt.Errorf("spec CPUs field is different")
+ }
+
+ if specCPU != nil {
+ if (specCPU.Reserved == nil) != (v1Profile.Spec.CPU.Reserved == nil) {
+ return fmt.Errorf("spec CPUs Reserved field is different")
+ }
+ if specCPU.Reserved != nil {
+ if string(*specCPU.Reserved) != string(*v1Profile.Spec.CPU.Reserved) {
+ return fmt.Errorf("reserved CPUs are different [v1alpha1: %s, v1: %s]",
+ *specCPU.Reserved, *v1Profile.Spec.CPU.Reserved)
+ }
+ }
+
+ if (specCPU.Isolated == nil) != (v1Profile.Spec.CPU.Isolated == nil) {
+ return fmt.Errorf("spec CPUs Isolated field is different")
+ }
+ if specCPU.Isolated != nil {
+ if string(*specCPU.Isolated) != string(*v1Profile.Spec.CPU.Isolated) {
+ return fmt.Errorf("isolated CPUs are different [v1alpha1: %s, v1: %s]",
+ *specCPU.Isolated, *v1Profile.Spec.CPU.Isolated)
+ }
+ }
+
+ if (specCPU.BalanceIsolated == nil) != (v1Profile.Spec.CPU.BalanceIsolated == nil) {
+ return fmt.Errorf("spec CPUs BalanceIsolated field is different")
+ }
+ if specCPU.BalanceIsolated != nil {
+ if *specCPU.BalanceIsolated != *v1Profile.Spec.CPU.BalanceIsolated {
+ return fmt.Errorf("balanceIsolated field is different [v1alpha1: %t, v1: %t]",
+ *specCPU.BalanceIsolated, *v1Profile.Spec.CPU.BalanceIsolated)
+ }
+ }
+ }
+
+ specHugePages := v1alpha1Profile.Spec.HugePages
+ if (specHugePages == nil) != (v1Profile.Spec.HugePages == nil) {
+ return fmt.Errorf("spec HugePages field is different")
+ }
+
+ if specHugePages != nil {
+ if (specHugePages.DefaultHugePagesSize == nil) != (v1Profile.Spec.HugePages.DefaultHugePagesSize == nil) {
+ return fmt.Errorf("spec HugePages defaultHugePagesSize field is different")
+ }
+ if specHugePages.DefaultHugePagesSize != nil {
+ if string(*specHugePages.DefaultHugePagesSize) != string(*v1Profile.Spec.HugePages.DefaultHugePagesSize) {
+ return fmt.Errorf("defaultHugePagesSize field is different [v1alpha1: %s, v1: %s]",
+ *specHugePages.DefaultHugePagesSize, *v1Profile.Spec.HugePages.DefaultHugePagesSize)
+ }
+ }
+
+ if len(specHugePages.Pages) != len(v1Profile.Spec.HugePages.Pages) {
+ return fmt.Errorf("pages field is different [v1alpha1: %v, v1: %v]",
+ specHugePages.Pages, v1Profile.Spec.HugePages.Pages)
+ }
+
+ for i, v1alpha1Page := range specHugePages.Pages {
+ v1page := v1Profile.Spec.HugePages.Pages[i]
+ if string(v1alpha1Page.Size) != string(v1page.Size) ||
+ (v1alpha1Page.Node == nil) != (v1page.Node == nil) ||
+ (v1alpha1Page.Node != nil && *v1alpha1Page.Node != *v1page.Node) ||
+ v1alpha1Page.Count != v1page.Count {
+ return fmt.Errorf("pages field is different [v1alpha1: %v, v1: %v]",
+ specHugePages.Pages, v1Profile.Spec.HugePages.Pages)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v1alpha1Profile.Spec.MachineConfigLabel, v1Profile.Spec.MachineConfigLabel) {
+ return fmt.Errorf("machineConfigLabel field is different [v1alpha1: %v, v1: %v]",
+ v1alpha1Profile.Spec.MachineConfigLabel, v1Profile.Spec.MachineConfigLabel)
+ }
+
+ if !reflect.DeepEqual(v1alpha1Profile.Spec.MachineConfigPoolSelector, v1Profile.Spec.MachineConfigPoolSelector) {
+ return fmt.Errorf("machineConfigPoolSelector field is different [v1alpha1: %v, v1: %v]",
+ v1alpha1Profile.Spec.MachineConfigPoolSelector, v1Profile.Spec.MachineConfigPoolSelector)
+ }
+
+ if !reflect.DeepEqual(v1alpha1Profile.Spec.NodeSelector, v1Profile.Spec.NodeSelector) {
+ return fmt.Errorf("nodeSelector field is different [v1alpha1: %v, v1: %v]",
+ v1alpha1Profile.Spec.NodeSelector, v1Profile.Spec.NodeSelector)
+ }
+
+ specRealTimeKernel := v1alpha1Profile.Spec.RealTimeKernel
+ if (specRealTimeKernel == nil) != (v1Profile.Spec.RealTimeKernel == nil) {
+ return fmt.Errorf("spec RealTimeKernel field is different")
+ }
+
+ if specRealTimeKernel != nil {
+ if (specRealTimeKernel.Enabled == nil) != (v1Profile.Spec.RealTimeKernel.Enabled == nil) {
+ return fmt.Errorf("spec RealTimeKernel.Enabled field is different")
+ }
+
+ if specRealTimeKernel.Enabled != nil {
+ if *specRealTimeKernel.Enabled != *v1Profile.Spec.RealTimeKernel.Enabled {
+ return fmt.Errorf("specRealTimeKernel field is different [v1alpha1: %t, v1: %t]",
+ *specRealTimeKernel.Enabled, *v1Profile.Spec.RealTimeKernel.Enabled)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v1alpha1Profile.Spec.AdditionalKernelArgs, v1Profile.Spec.AdditionalKernelArgs) {
+ return fmt.Errorf("additionalKernelArgs field is different [v1alpha1: %v, v1: %v]",
+ v1alpha1Profile.Spec.AdditionalKernelArgs, v1Profile.Spec.AdditionalKernelArgs)
+ }
+
+ specNUMA := v1alpha1Profile.Spec.NUMA
+ if (specNUMA == nil) != (v1Profile.Spec.NUMA == nil) {
+ return fmt.Errorf("spec NUMA field is different")
+ }
+
+ if specNUMA != nil {
+ if (specNUMA.TopologyPolicy == nil) != (v1Profile.Spec.NUMA.TopologyPolicy == nil) {
+ return fmt.Errorf("spec NUMA topologyPolicy field is different")
+ }
+ if specNUMA.TopologyPolicy != nil {
+ if *specNUMA.TopologyPolicy != *v1Profile.Spec.NUMA.TopologyPolicy {
+ return fmt.Errorf("topologyPolicy field is different [v1alpha1: %s, v1: %s]",
+ *specNUMA.TopologyPolicy, *v1Profile.Spec.NUMA.TopologyPolicy)
+ }
+ }
+ }
+
+ return nil
+}
+
+func verifyV2Conversion(v2Profile *performancev2.PerformanceProfile, v1Profile *performancev1.PerformanceProfile) error {
+ specCPU := v2Profile.Spec.CPU
+ if (specCPU == nil) != (v1Profile.Spec.CPU == nil) {
+ return fmt.Errorf("spec CPUs field is different")
+ }
+
+ if specCPU != nil {
+ if (specCPU.Reserved == nil) != (v1Profile.Spec.CPU.Reserved == nil) {
+ return fmt.Errorf("spec CPUs Reserved field is different")
+ }
+ if specCPU.Reserved != nil {
+ if string(*specCPU.Reserved) != string(*v1Profile.Spec.CPU.Reserved) {
+ return fmt.Errorf("reserved CPUs are different [v2: %s, v1: %s]",
+ *specCPU.Reserved, *v1Profile.Spec.CPU.Reserved)
+ }
+ }
+
+ if (specCPU.Isolated == nil) != (v1Profile.Spec.CPU.Isolated == nil) {
+ return fmt.Errorf("spec CPUs Isolated field is different")
+ }
+ if specCPU.Isolated != nil {
+ if string(*specCPU.Isolated) != string(*v1Profile.Spec.CPU.Isolated) {
+ return fmt.Errorf("isolated CPUs are different [v2: %s, v1: %s]",
+ *specCPU.Isolated, *v1Profile.Spec.CPU.Isolated)
+ }
+ }
+
+ if (specCPU.BalanceIsolated == nil) != (v1Profile.Spec.CPU.BalanceIsolated == nil) {
+ return fmt.Errorf("spec CPUs BalanceIsolated field is different")
+ }
+ if specCPU.BalanceIsolated != nil {
+ if *specCPU.BalanceIsolated != *v1Profile.Spec.CPU.BalanceIsolated {
+ return fmt.Errorf("balanceIsolated field is different [v2: %t, v1: %t]",
+ *specCPU.BalanceIsolated, *v1Profile.Spec.CPU.BalanceIsolated)
+ }
+ }
+ }
+
+ specHugePages := v2Profile.Spec.HugePages
+ if (specHugePages == nil) != (v1Profile.Spec.HugePages == nil) {
+ return fmt.Errorf("spec HugePages field is different")
+ }
+
+ if specHugePages != nil {
+ if (specHugePages.DefaultHugePagesSize == nil) != (v1Profile.Spec.HugePages.DefaultHugePagesSize == nil) {
+ return fmt.Errorf("spec HugePages defaultHugePagesSize field is different")
+ }
+ if specHugePages.DefaultHugePagesSize != nil {
+ if string(*specHugePages.DefaultHugePagesSize) != string(*v1Profile.Spec.HugePages.DefaultHugePagesSize) {
+ return fmt.Errorf("defaultHugePagesSize field is different [v2: %s, v1: %s]",
+ *specHugePages.DefaultHugePagesSize, *v1Profile.Spec.HugePages.DefaultHugePagesSize)
+ }
+ }
+
+ if len(specHugePages.Pages) != len(v1Profile.Spec.HugePages.Pages) {
+ return fmt.Errorf("pages field is different [v2: %v, v1: %v]",
+ specHugePages.Pages, v1Profile.Spec.HugePages.Pages)
+ }
+
+ for i, v1alpha1Page := range specHugePages.Pages {
+ v1page := v1Profile.Spec.HugePages.Pages[i]
+ if string(v1alpha1Page.Size) != string(v1page.Size) ||
+ (v1alpha1Page.Node == nil) != (v1page.Node == nil) ||
+ (v1alpha1Page.Node != nil && *v1alpha1Page.Node != *v1page.Node) ||
+ v1alpha1Page.Count != v1page.Count {
+ return fmt.Errorf("pages field is different [v2: %v, v1: %v]",
+ specHugePages.Pages, v1Profile.Spec.HugePages.Pages)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v2Profile.Spec.MachineConfigLabel, v1Profile.Spec.MachineConfigLabel) {
+ return fmt.Errorf("machineConfigLabel field is different [v2: %v, v1: %v]",
+ v2Profile.Spec.MachineConfigLabel, v1Profile.Spec.MachineConfigLabel)
+ }
+
+ if !reflect.DeepEqual(v2Profile.Spec.MachineConfigPoolSelector, v1Profile.Spec.MachineConfigPoolSelector) {
+ return fmt.Errorf("machineConfigPoolSelector field is different [v2: %v, v1: %v]",
+ v2Profile.Spec.MachineConfigPoolSelector, v1Profile.Spec.MachineConfigPoolSelector)
+ }
+
+ if !reflect.DeepEqual(v2Profile.Spec.NodeSelector, v1Profile.Spec.NodeSelector) {
+ return fmt.Errorf("nodeSelector field is different [v2: %v, v1: %v]",
+ v2Profile.Spec.NodeSelector, v1Profile.Spec.NodeSelector)
+ }
+
+ specRealTimeKernel := v2Profile.Spec.RealTimeKernel
+ if (specRealTimeKernel == nil) != (v1Profile.Spec.RealTimeKernel == nil) {
+ return fmt.Errorf("spec RealTimeKernel field is different")
+ }
+
+ if specRealTimeKernel != nil {
+ if (specRealTimeKernel.Enabled == nil) != (v1Profile.Spec.RealTimeKernel.Enabled == nil) {
+ return fmt.Errorf("spec RealTimeKernel.Enabled field is different")
+ }
+
+ if specRealTimeKernel.Enabled != nil {
+ if *specRealTimeKernel.Enabled != *v1Profile.Spec.RealTimeKernel.Enabled {
+ return fmt.Errorf("specRealTimeKernel field is different [v2: %t, v1: %t]",
+ *specRealTimeKernel.Enabled, *v1Profile.Spec.RealTimeKernel.Enabled)
+ }
+ }
+ }
+
+ if !reflect.DeepEqual(v2Profile.Spec.AdditionalKernelArgs, v1Profile.Spec.AdditionalKernelArgs) {
+ return fmt.Errorf("additionalKernelArgs field is different [v2: %v, v1: %v]",
+ v2Profile.Spec.AdditionalKernelArgs, v1Profile.Spec.AdditionalKernelArgs)
+ }
+
+ specNUMA := v2Profile.Spec.NUMA
+ if (specNUMA == nil) != (v1Profile.Spec.NUMA == nil) {
+ return fmt.Errorf("spec NUMA field is different")
+ }
+
+ if specNUMA != nil {
+ if (specNUMA.TopologyPolicy == nil) != (v1Profile.Spec.NUMA.TopologyPolicy == nil) {
+ return fmt.Errorf("spec NUMA topologyPolicy field is different")
+ }
+ if specNUMA.TopologyPolicy != nil {
+ if *specNUMA.TopologyPolicy != *v1Profile.Spec.NUMA.TopologyPolicy {
+ return fmt.Errorf("topologyPolicy field is different [v2: %s, v1: %s]",
+ *specNUMA.TopologyPolicy, *v1Profile.Spec.NUMA.TopologyPolicy)
+ }
+ }
+ }
+
+ for _, f := range v2Profile.GetObjectMeta().GetManagedFields() {
+ if f.APIVersion == performancev1alpha1.GroupVersion.String() ||
+ f.APIVersion == performancev1.GroupVersion.String() {
+ if v2Profile.Spec.GloballyDisableIrqLoadBalancing == nil {
+ return fmt.Errorf("globallyDisableIrqLoadBalancing field must be set to true")
+ }
+ }
+ }
+
+ return nil
+}
+
+func execSysctlOnWorkers(workerNodes []corev1.Node, sysctlMap map[string]string) {
+ var err error
+ var out []byte
+ for _, node := range workerNodes {
+ for param, expected := range sysctlMap {
+ By(fmt.Sprintf("executing the command \"sysctl -n %s\"", param))
+ out, err = nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"sysctl", "-n", param})
+ Expect(err).ToNot(HaveOccurred())
+ Expect(strings.TrimSpace(string(out))).Should(Equal(expected), "parameter %s value is not %s.", param, expected)
+ }
+ }
+}
+
+// execute sysctl command inside container in a tuned pod
+func validateTunedActiveProfile(wrknodes []corev1.Node) {
+ var err error
+ var out []byte
+ activeProfileName := components.GetComponentName(testutils.PerformanceProfileName, components.ProfileNamePerformance)
+
+ // check if some another Tuned profile overwrites PAO profile
+ tunedList := &tunedv1.TunedList{}
+ err = testclient.Client.List(context.TODO(), tunedList)
+ Expect(err).NotTo(HaveOccurred())
+
+ for _, t := range tunedList.Items {
+ if len(t.Spec.Profile) > 0 && t.Spec.Profile[0].Data != nil && strings.Contains(*t.Spec.Profile[0].Data, fmt.Sprintf("include=%s", activeProfileName)) {
+ testlog.Warning(fmt.Sprintf("PAO tuned profile amended by '%s' profile, test may fail", t.Name))
+ if t.Spec.Profile[0].Name != nil {
+ activeProfileName = *t.Spec.Profile[0].Name
+ }
+ }
+ }
+
+ for _, node := range wrknodes {
+ tuned := nodes.TunedForNode(&node, RunningOnSingleNode)
+ tunedName := tuned.ObjectMeta.Name
+ By(fmt.Sprintf("executing the command cat /etc/tuned/active_profile inside the pod %s", tunedName))
+ Eventually(func() string {
+ out, err = pods.WaitForPodOutput(testclient.K8sClient, tuned, []string{"cat", "/etc/tuned/active_profile"})
+ return strings.TrimSpace(string(out))
+ }, cluster.ComputeTestTimeout(testTimeout*time.Second, RunningOnSingleNode), testPollInterval*time.Second).Should(Equal(activeProfileName),
+ fmt.Sprintf("active_profile is not set to %s. %v", activeProfileName, err))
+ }
+}
diff --git a/test/e2e/pao/functests/1_performance/rt-kernel.go b/test/e2e/pao/functests/1_performance/rt-kernel.go
new file mode 100644
index 000000000..9172ca498
--- /dev/null
+++ b/test/e2e/pao/functests/1_performance/rt-kernel.go
@@ -0,0 +1,74 @@
+package __performance
+
+import (
+ "fmt"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+)
+
+var _ = Describe("[performance]RT Kernel", func() {
+ var discoveryFailed bool
+ var profile *performancev2.PerformanceProfile
+ var err error
+
+ testutils.BeforeAll(func() {
+ profile, err = discovery.GetFilteredDiscoveryPerformanceProfile(
+ func(profile performancev2.PerformanceProfile) bool {
+ if profile.Spec.RealTimeKernel != nil &&
+ profile.Spec.RealTimeKernel.Enabled != nil &&
+ *profile.Spec.RealTimeKernel.Enabled == true {
+ return true
+ }
+ return false
+ })
+
+ if err == discovery.ErrProfileNotFound {
+ discoveryFailed = true
+ return
+ }
+ Expect(err).ToNot(HaveOccurred(), "failed to get a profile using a filter for RT kernel")
+ })
+
+ BeforeEach(func() {
+ if discoveryFailed {
+ Skip("Skipping RT Kernel tests since no profile found with RT kernel set")
+ }
+
+ })
+
+ It("[test_id:26861][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] should have RT kernel enabled", func() {
+ workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err))
+ Expect(workerRTNodes).ToNot(BeEmpty(), "No RT worker node found!")
+
+ err = nodes.HasPreemptRTKernel(&workerRTNodes[0])
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("[test_id:28526][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] a node without performance profile applied should not have RT kernel installed", func() {
+
+ By("Skipping test if cluster does not have another available worker node")
+ nonPerformancesWorkers, err := nodes.GetNonPerformancesWorkers(profile.Spec.NodeSelector)
+ Expect(err).ToNot(HaveOccurred())
+
+ if len(nonPerformancesWorkers) == 0 {
+ Skip("Skipping test because there are no additional non-cnf worker nodes")
+ }
+
+ cmd := []string{"uname", "-a"}
+ kernel, err := nodes.ExecCommandOnNode(cmd, &nonPerformancesWorkers[0])
+ Expect(err).ToNot(HaveOccurred(), "failed to execute uname")
+ Expect(kernel).To(ContainSubstring("Linux"), "Node should have Linux string")
+
+ err = nodes.HasPreemptRTKernel(&nonPerformancesWorkers[0])
+ Expect(err).To(HaveOccurred(), "Node should have non-RT kernel")
+ })
+})
diff --git a/test/e2e/pao/functests/1_performance/test_suite_performance_test.go b/test/e2e/pao/functests/1_performance/test_suite_performance_test.go
new file mode 100644
index 000000000..6f344538e
--- /dev/null
+++ b/test/e2e/pao/functests/1_performance/test_suite_performance_test.go
@@ -0,0 +1,51 @@
+//go:build !unittests
+// +build !unittests
+
+package __performance_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+
+ ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters"
+
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces"
+)
+
+var _ = BeforeSuite(func() {
+ Expect(testclient.ClientsEnabled).To(BeTrue(), "package client not enabled")
+ // create test namespace
+ err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace)
+ if errors.IsAlreadyExists(err) {
+ testlog.Warning("test namespace already exists, that is unexpected")
+ return
+ }
+ Expect(err).ToNot(HaveOccurred())
+})
+
+var _ = AfterSuite(func() {
+ err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute)
+})
+
+func TestPerformance(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ rr := []Reporter{}
+ if ginkgo_reporters.Polarion.Run {
+ rr = append(rr, &ginkgo_reporters.Polarion)
+ }
+ rr = append(rr, junit.NewJUnitReporter("performance"))
+ RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator e2e tests", rr)
+}
diff --git a/test/e2e/pao/functests/1_performance/topology_manager.go b/test/e2e/pao/functests/1_performance/topology_manager.go
new file mode 100644
index 000000000..da9a7f4af
--- /dev/null
+++ b/test/e2e/pao/functests/1_performance/topology_manager.go
@@ -0,0 +1,47 @@
+package __performance
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+
+ corev1 "k8s.io/api/core/v1"
+ kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
+)
+
+var _ = Describe("[rfe_id:27350][performance]Topology Manager", func() {
+ var workerRTNodes []corev1.Node
+ var profile *performancev2.PerformanceProfile
+
+ BeforeEach(func() {
+ if discovery.Enabled() && testutils.ProfileNotFound {
+ Skip("Discovery mode enabled, performance profile not found")
+ }
+
+ var err error
+ workerRTNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes)
+ Expect(err).ToNot(HaveOccurred(), "Error looking for the optional selector: %v", err)
+ Expect(workerRTNodes).ToNot(BeEmpty(), "No RT worker node found!")
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("[test_id:26932][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] should be enabled with the policy specified in profile", func() {
+ kubeletConfig, err := nodes.GetKubeletConfig(&workerRTNodes[0])
+ Expect(err).ToNot(HaveOccurred())
+
+ // verify topology manager policy
+ if profile.Spec.NUMA != nil && profile.Spec.NUMA.TopologyPolicy != nil {
+ Expect(kubeletConfig.TopologyManagerPolicy).To(Equal(*profile.Spec.NUMA.TopologyPolicy), "Topology Manager policy mismatch got %q expected %q", kubeletConfig.TopologyManagerPolicy, *profile.Spec.NUMA.TopologyPolicy)
+ } else {
+ Expect(kubeletConfig.TopologyManagerPolicy).To(Equal(kubeletconfigv1beta1.BestEffortTopologyManagerPolicy), "Topology Manager policy mismatch got %q expected %q", kubeletconfigv1beta1.BestEffortTopologyManagerPolicy)
+ }
+ })
+})
diff --git a/test/e2e/pao/functests/2_performance_update/kubelet.go b/test/e2e/pao/functests/2_performance_update/kubelet.go
new file mode 100644
index 000000000..9910b0e33
--- /dev/null
+++ b/test/e2e/pao/functests/2_performance_update/kubelet.go
@@ -0,0 +1,196 @@
+package __performance_update
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+ machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+)
+
+var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", func() {
+ var profile *performancev2.PerformanceProfile
+ var workerRTNodes []corev1.Node
+ var performanceMCP string
+
+ testutils.BeforeAll(func() {
+ workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+
+ workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes)
+ Expect(err).ToNot(HaveOccurred())
+
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+
+ performanceMCP, err = mcps.GetByProfile(profile)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Verify that worker and performance MCP have updated state equals to true
+ for _, mcpName := range []string{testutils.RoleWorker, performanceMCP} {
+ mcps.WaitForCondition(mcpName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ }
+
+ })
+ BeforeEach(func() {
+ if discovery.Enabled() && testutils.ProfileNotFound {
+ Skip("Discovery mode enabled, performance profile not found")
+ }
+ })
+ Context("Additional kubelet arguments", func() {
+ It("[test_id:45488]Test performance profile annotation for changing multiple kubelet settings", func() {
+ profile.Annotations = map[string]string{
+ "kubeletconfig.experimental": "{\"allowedUnsafeSysctls\":[\"net.core.somaxconn\",\"kernel.msg*\"],\"systemReserved\":{\"memory\":\"300Mi\"},\"kubeReserved\":{\"memory\":\"768Mi\"},\"imageMinimumGCAge\":\"3m\"}",
+ }
+ annotations, err := json.Marshal(profile.Annotations)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Applying changes in performance profile and waiting until mcp will start updating")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/metadata/annotations", "value": %s }]`, annotations)),
+ ),
+ )).ToNot(HaveOccurred())
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+ By("Waiting when mcp finishes updates")
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ for _, node := range workerRTNodes {
+ kubeletConfig, err := nodes.GetKubeletConfig(&node)
+ Expect(err).ToNot(HaveOccurred())
+ sysctlsValue := kubeletConfig.AllowedUnsafeSysctls
+ Expect(sysctlsValue).Should(ContainElements("net.core.somaxconn", "kernel.msg*"))
+ Expect(kubeletConfig.KubeReserved["memory"]).To(Equal("768Mi"))
+ Expect(kubeletConfig.ImageMinimumGCAge.Seconds()).To(Equal(180))
+ }
+ kubeletArguments := []string{"/bin/bash", "-c", "ps -ef | grep kubelet | grep config"}
+ for _, node := range workerRTNodes {
+ stdout, err := nodes.ExecCommandOnNode(kubeletArguments, &node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(strings.Contains(stdout, "300Mi")).To(BeTrue())
+ }
+ })
+ Context("When setting cpu manager related parameters", func() {
+ It("[test_id:45493]Should not override performance-addon-operator values", func() {
+ cpuManagerAnnotation := map[string]string{
+ "kubeletconfig.experimental": "{\"cpuManagerPolicy\":\"static\",\"cpuManagerReconcilePeriod\":\"5s\"}",
+ }
+ profile.SetAnnotations(cpuManagerAnnotation)
+ By("Applying changes in performance profile and waiting until mcp will start updating")
+ profiles.UpdateWithRetry(profile)
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+ By("Waiting when mcp finishes updates")
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ for _, node := range workerRTNodes {
+ kubeletConfig, err := nodes.GetKubeletConfig(&node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(kubeletConfig.CPUManagerPolicy).Should(Equal("static"))
+ Expect(kubeletConfig.CPUManagerReconcilePeriod.Seconds()).To(Equal(5))
+ }
+ })
+ })
+ It("[test_id:45490]Test memory reservation changes", func() {
+ // In this test case we are testing if after applying reserving memory for
+ // systemReserved and KubeReserved, the allocatable is reduced and Allocatable
+ // Verify that Allocatable = Node capacity - (kubereserved + systemReserved + EvictionMemory)
+ profile.Annotations = map[string]string{
+ "kubeletconfig.experimental": "{\"systemReserved\":{\"memory\":\"300Mi\"},\"kubeReserved\":{\"memory\":\"768Mi\"}}",
+ }
+ annotations, err := json.Marshal(profile.Annotations)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Applying changes in performance profile and waiting until mcp will start updating")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/metadata/annotations", "value": %s }]`, annotations)),
+ ),
+ )).ToNot(HaveOccurred())
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+
+ By("Waiting when mcp finishes updates")
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ for _, node := range workerRTNodes {
+ kubeletConfig, err := nodes.GetKubeletConfig(&node)
+ Expect(err).ToNot(HaveOccurred())
+ totalCapactity := node.Status.Capacity.Memory().MilliValue()
+ evictionMemory := kubeletConfig.EvictionHard["memory.available"]
+ kubeReserved := kubeletConfig.KubeReserved["memory"]
+ evictionMemoryInt, err := strconv.ParseInt(strings.TrimSuffix(evictionMemory, "Mi"), 10, 64)
+ kubeReservedMemoryInt, err := strconv.ParseInt(strings.TrimSuffix(kubeReserved, "Mi"), 10, 64)
+ systemReservedResource := resource.NewQuantity(300*1024*1024, resource.BinarySI)
+ kubeReservedMemoryResource := resource.NewQuantity(kubeReservedMemoryInt*1024*1024, resource.BinarySI)
+ evictionMemoryResource := resource.NewQuantity(evictionMemoryInt*1024*1024, resource.BinarySI)
+ totalKubeMemory := systemReservedResource.MilliValue() + kubeReservedMemoryResource.MilliValue() + evictionMemoryResource.MilliValue()
+ calculatedAllocatable := totalCapactity - totalKubeMemory
+ currentAllocatable := node.Status.Allocatable.Memory().MilliValue()
+ Expect(calculatedAllocatable).To(Equal(currentAllocatable))
+ }
+ })
+ It("[test_id:45495] Test setting PAO managed parameters", func() {
+ profile.Annotations = map[string]string{
+ "kubeletconfig.experimental": "{\"topologyManagerPolicy\":\"single-numa-node\"}",
+ }
+ annotations, err := json.Marshal(profile.Annotations)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Applying changes in performance profile and waiting until mcp will start updating")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/metadata/annotations", "value": %s }]`, annotations)),
+ ),
+ )).ToNot(HaveOccurred())
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+ By("Waiting when mcp finishes updates")
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ for _, node := range workerRTNodes {
+ kubeletConfig, err := nodes.GetKubeletConfig(&node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(kubeletConfig.TopologyManagerPolicy).To(Equal("single-numa-node"))
+ }
+ })
+ It("[test_id:45489] Verify settings are reverted to default profile", func() {
+ By("Reverting the Profile")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "remove", "path": "/metadata/annotations/kubeletconfig.experimental"}]`)),
+ ),
+ )).ToNot(HaveOccurred())
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ kubeletArguments := []string{"/bin/bash", "-c", "ps -ef | grep kubelet | grep config"}
+ for _, node := range workerRTNodes {
+ kubeletConfig, err := nodes.GetKubeletConfig(&node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(kubeletConfig.AllowedUnsafeSysctls).To(Equal(nil))
+ Expect(kubeletConfig.KubeReserved["memory"]).ToNot(Equal("768Mi"))
+ Expect(kubeletConfig.ImageMinimumGCAge.Seconds()).ToNot(Equal(180))
+ }
+ for _, node := range workerRTNodes {
+ stdout, err := nodes.ExecCommandOnNode(kubeletArguments, &node)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(strings.Contains(stdout, "300Mi")).To(BeTrue())
+ }
+
+ })
+
+ })
+})
diff --git a/test/e2e/pao/functests/2_performance_update/test_suite_performance_update_test.go b/test/e2e/pao/functests/2_performance_update/test_suite_performance_update_test.go
new file mode 100644
index 000000000..0972ae252
--- /dev/null
+++ b/test/e2e/pao/functests/2_performance_update/test_suite_performance_update_test.go
@@ -0,0 +1,51 @@
+//go:build !unittests
+// +build !unittests
+
+package __performance_update_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+
+ ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters"
+
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces"
+)
+
+var _ = BeforeSuite(func() {
+ Expect(testclient.ClientsEnabled).To(BeTrue())
+ // create test namespace
+ err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace)
+ if errors.IsAlreadyExists(err) {
+ testlog.Warning("test namespace already exists, that is unexpected")
+ return
+ }
+ Expect(err).ToNot(HaveOccurred())
+})
+
+var _ = AfterSuite(func() {
+ err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute)
+})
+
+func TestPerformanceUpdate(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ rr := []Reporter{}
+ if ginkgo_reporters.Polarion.Run {
+ rr = append(rr, &ginkgo_reporters.Polarion)
+ }
+ rr = append(rr, junit.NewJUnitReporter("performance_update"))
+ RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator Update e2e tests", rr)
+}
diff --git a/test/e2e/pao/functests/2_performance_update/updating_profile.go b/test/e2e/pao/functests/2_performance_update/updating_profile.go
new file mode 100644
index 000000000..f3a2b08a2
--- /dev/null
+++ b/test/e2e/pao/functests/2_performance_update/updating_profile.go
@@ -0,0 +1,589 @@
+package __performance_update
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/extensions/table"
+ . "github.com/onsi/gomega"
+ machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+ "k8s.io/utils/pointer"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+)
+
+type checkFunction func(*corev1.Node) (string, error)
+
+var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance profile", func() {
+ var workerRTNodes []corev1.Node
+ var profile, initialProfile *performancev2.PerformanceProfile
+ var performanceMCP string
+ var err error
+
+ chkCmdLine := []string{"cat", "/proc/cmdline"}
+ chkKubeletConfig := []string{"cat", "/rootfs/etc/kubernetes/kubelet.conf"}
+ chkIrqbalance := []string{"cat", "/rootfs/etc/sysconfig/irqbalance"}
+
+ chkCmdLineFn := func(node *corev1.Node) (string, error) {
+ return nodes.ExecCommandOnNode(chkCmdLine, node)
+ }
+ chkKubeletConfigFn := func(node *corev1.Node) (string, error) {
+ return nodes.ExecCommandOnNode(chkKubeletConfig, node)
+ }
+
+ chkHugepages2MFn := func(node *corev1.Node) (string, error) {
+ count, err := countHugepagesOnNode(node, 2)
+ if err != nil {
+ return "", err
+ }
+ return strconv.Itoa(count), nil
+ }
+
+ chkHugepages1GFn := func(node *corev1.Node) (string, error) {
+ count, err := countHugepagesOnNode(node, 1024)
+ if err != nil {
+ return "", err
+ }
+ return strconv.Itoa(count), nil
+ }
+
+ nodeLabel := testutils.NodeSelectorLabels
+
+ BeforeEach(func() {
+ if discovery.Enabled() && testutils.ProfileNotFound {
+ Skip("Discovery mode enabled, performance profile not found")
+ }
+
+ workerRTNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err))
+ Expect(workerRTNodes).ToNot(BeEmpty(), "cannot find RT enabled worker nodes")
+ profile, err = profiles.GetByNodeLabels(nodeLabel)
+ Expect(err).ToNot(HaveOccurred())
+ performanceMCP, err = mcps.GetByProfile(profile)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Verify that worker and performance MCP have updated state equals to true
+ for _, mcpName := range []string{testutils.RoleWorker, performanceMCP} {
+ mcps.WaitForCondition(mcpName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ }
+ })
+
+ Context("Verify GloballyDisableIrqLoadBalancing Spec field", func() {
+ It("[test_id:36150] Verify that IRQ load balancing is enabled/disabled correctly", func() {
+ irqLoadBalancingDisabled := profile.Spec.GloballyDisableIrqLoadBalancing != nil && *profile.Spec.GloballyDisableIrqLoadBalancing
+
+ Expect(profile.Spec.CPU.Isolated).NotTo(BeNil(), "expected isolated CPUs, found none")
+ isolatedCPUSet, err := cpuset.Parse(string(*profile.Spec.CPU.Isolated))
+ Expect(err).ToNot(HaveOccurred())
+
+ verifyNodes := func() error {
+ var expectedBannedCPUs cpuset.CPUSet
+ if irqLoadBalancingDisabled {
+ expectedBannedCPUs = isolatedCPUSet
+ } else {
+ expectedBannedCPUs = cpuset.NewCPUSet()
+ }
+
+ for _, node := range workerRTNodes {
+ By(fmt.Sprintf("verifying worker node %q", node.Name))
+
+ bannedCPUs, err := nodes.BannedCPUs(node)
+ Expect(err).ToNot(HaveOccurred(), "failed to extract the banned CPUs from node %s", node.Name)
+
+ if !bannedCPUs.Equals(expectedBannedCPUs) {
+ return fmt.Errorf("banned CPUs %v do not match the expected mask %v on node %s",
+ bannedCPUs, expectedBannedCPUs, node.Name)
+ }
+
+ smpAffinitySet, err := nodes.GetDefaultSmpAffinitySet(&node)
+ Expect(err).ToNot(HaveOccurred(), "failed to get default smp affinity")
+
+ onlineCPUsSet, err := nodes.GetOnlineCPUsSet(&node)
+ Expect(err).ToNot(HaveOccurred(), "failed to get Online CPUs list")
+
+ if irqLoadBalancingDisabled {
+ if !smpAffinitySet.Equals(onlineCPUsSet.Difference(isolatedCPUSet)) {
+ return fmt.Errorf("found default_smp_affinity %v, expected %v",
+ smpAffinitySet, onlineCPUsSet.Difference(isolatedCPUSet))
+ }
+ } else {
+ if !smpAffinitySet.Equals(onlineCPUsSet) {
+ return fmt.Errorf("found default_smp_affinity %v, expected %v",
+ smpAffinitySet, onlineCPUsSet)
+ }
+ }
+ }
+ return nil
+ }
+
+ err = verifyNodes()
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Modifying profile")
+ initialProfile = profile.DeepCopy()
+
+ irqLoadBalancingDisabled = !irqLoadBalancingDisabled
+ profile.Spec.GloballyDisableIrqLoadBalancing = &irqLoadBalancingDisabled
+
+ spec, err := json.Marshal(profile.Spec)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Applying changes in performance profile and waiting until mcp will start updating")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
+ ),
+ )).ToNot(HaveOccurred())
+
+ defer func() { // return initial configuration
+ spec, err := json.Marshal(initialProfile.Spec)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
+ ),
+ )).ToNot(HaveOccurred())
+ }()
+
+ Eventually(verifyNodes, 1*time.Minute, 10*time.Second).ShouldNot(HaveOccurred())
+ })
+ })
+
+ Context("Verify hugepages count split on two NUMA nodes", func() {
+ hpSize2M := performancev2.HugePageSize("2M")
+
+ table.DescribeTable("Verify that profile parameters were updated", func(hpCntOnNuma0 int32, hpCntOnNuma1 int32) {
+ By("Verifying cluster configuration matches the requirement")
+ for _, node := range workerRTNodes {
+ numaInfo, err := nodes.GetNumaNodes(&node)
+ Expect(err).ToNot(HaveOccurred())
+ if len(numaInfo) < 2 {
+ Skip(fmt.Sprintf("This test need 2 NUMA nodes.The number of NUMA nodes on node %s < 2", node.Name))
+ }
+ }
+ //have total of 4 cpus so VMs can handle running the configuration
+ numaInfo, _ := nodes.GetNumaNodes(&workerRTNodes[0])
+ cpuSlice := numaInfo[0][0:4]
+ isolated := performancev2.CPUSet(fmt.Sprintf("%d-%d", cpuSlice[2], cpuSlice[3]))
+ reserved := performancev2.CPUSet(fmt.Sprintf("%d-%d", cpuSlice[0], cpuSlice[1]))
+
+ By("Modifying profile")
+ initialProfile = profile.DeepCopy()
+ profile.Spec.CPU = &performancev2.CPU{
+ BalanceIsolated: pointer.BoolPtr(false),
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ profile.Spec.HugePages = &performancev2.HugePages{
+ DefaultHugePagesSize: &hpSize2M,
+ Pages: []performancev2.HugePage{
+ {
+ Count: hpCntOnNuma0,
+ Size: hpSize2M,
+ Node: pointer.Int32Ptr(0),
+ },
+ {
+ Count: hpCntOnNuma1,
+ Size: hpSize2M,
+ Node: pointer.Int32Ptr(1),
+ },
+ },
+ }
+ profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{
+ Enabled: pointer.BoolPtr(true),
+ }
+
+ By("Verifying that mcp is ready for update")
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+
+ spec, err := json.Marshal(profile.Spec)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Applying changes in performance profile and waiting until mcp will start updating")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
+ ),
+ )).ToNot(HaveOccurred())
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+
+ By("Waiting when mcp finishes updates")
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+
+ for _, node := range workerRTNodes {
+ for i := 0; i < 2; i++ {
+ nodeCmd := []string{"cat", hugepagesPathForNode(i, 2)}
+ result, err := nodes.ExecCommandOnNode(nodeCmd, &node)
+ Expect(err).ToNot(HaveOccurred())
+
+ t, err := strconv.Atoi(result)
+ Expect(err).ToNot(HaveOccurred())
+
+ if i == 0 {
+ Expect(int32(t)).To(Equal(hpCntOnNuma0))
+ } else {
+ Expect(int32(t)).To(Equal(hpCntOnNuma1))
+ }
+ }
+ }
+ },
+ table.Entry("[test_id:45023] verify uneven split of hugepages between 2 numa nodes", int32(2), int32(1)),
+ table.Entry("[test_id:45024] verify even split between 2 numa nodes", int32(1), int32(1)),
+ )
+ })
+
+ Context("Verify that all performance profile parameters can be updated", func() {
+ var removedKernelArgs string
+
+ hpSize2M := performancev2.HugePageSize("2M")
+ hpSize1G := performancev2.HugePageSize("1G")
+ isolated := performancev2.CPUSet("1-2")
+ reserved := performancev2.CPUSet("0,3")
+ policy := "best-effort"
+
+ // Modify profile and verify that MCO successfully updated the node
+ testutils.BeforeAll(func() {
+ By("Modifying profile")
+ initialProfile = profile.DeepCopy()
+
+ profile.Spec.HugePages = &performancev2.HugePages{
+ DefaultHugePagesSize: &hpSize2M,
+ Pages: []performancev2.HugePage{
+ {
+ Count: 256,
+ Size: hpSize2M,
+ },
+ {
+ Count: 3,
+ Size: hpSize1G,
+ },
+ },
+ }
+ profile.Spec.CPU = &performancev2.CPU{
+ BalanceIsolated: pointer.BoolPtr(false),
+ Reserved: &reserved,
+ Isolated: &isolated,
+ }
+ profile.Spec.NUMA = &performancev2.NUMA{
+ TopologyPolicy: &policy,
+ }
+ profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{
+ Enabled: pointer.BoolPtr(false),
+ }
+
+ if profile.Spec.AdditionalKernelArgs == nil {
+ By("AdditionalKernelArgs is empty. Checking only adding new arguments")
+ profile.Spec.AdditionalKernelArgs = append(profile.Spec.AdditionalKernelArgs, "new-argument=test")
+ } else {
+ removedKernelArgs = profile.Spec.AdditionalKernelArgs[0]
+ profile.Spec.AdditionalKernelArgs = append(profile.Spec.AdditionalKernelArgs[1:], "new-argument=test")
+ }
+
+ By("Verifying that mcp is ready for update")
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+
+ spec, err := json.Marshal(profile.Spec)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Applying changes in performance profile and waiting until mcp will start updating")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
+ ),
+ )).ToNot(HaveOccurred())
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+
+ By("Waiting when mcp finishes updates")
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ })
+
+ table.DescribeTable("Verify that profile parameters were updated", func(cmdFn checkFunction, parameter []string, shouldContain bool, useRegex bool) {
+ for _, node := range workerRTNodes {
+ for _, param := range parameter {
+ result, err := cmdFn(&node)
+ Expect(err).ToNot(HaveOccurred())
+ matcher := ContainSubstring(param)
+ if useRegex {
+ matcher = MatchRegexp(param)
+ }
+
+ if shouldContain {
+ Expect(result).To(matcher)
+ } else {
+ Expect(result).NotTo(matcher)
+ }
+ }
+ }
+ },
+ table.Entry("[test_id:34081] verify that hugepages size and count updated", chkCmdLineFn, []string{"default_hugepagesz=2M", "hugepagesz=1G", "hugepages=3"}, true, false),
+ table.Entry("[test_id:28070] verify that hugepages updated (NUMA node unspecified)", chkCmdLineFn, []string{"hugepagesz=2M"}, true, false),
+ table.Entry("verify that the right number of hugepages 1G is available on the system", chkHugepages1GFn, []string{"3"}, true, false),
+ table.Entry("verify that the right number of hugepages 2M is available on the system", chkHugepages2MFn, []string{"256"}, true, false),
+ table.Entry("[test_id:28025] verify that cpu affinity mask was updated", chkCmdLineFn, []string{"tuned.non_isolcpus=.*9"}, true, true),
+ table.Entry("[test_id:28071] verify that cpu balancer disabled", chkCmdLineFn, []string{"isolcpus=domain,managed_irq,1-2"}, true, false),
+ table.Entry("[test_id:28071] verify that cpu balancer disabled", chkCmdLineFn, []string{"systemd.cpu_affinity=0,3"}, true, false),
+ // kubelet.conf changed formatting, there is a space after colons atm. Let's deal with both cases with a regex
+ table.Entry("[test_id:28935] verify that reservedSystemCPUs was updated", chkKubeletConfigFn, []string{`"reservedSystemCPUs": ?"0,3"`}, true, true),
+ table.Entry("[test_id:28760] verify that topologyManager was updated", chkKubeletConfigFn, []string{`"topologyManagerPolicy": ?"best-effort"`}, true, true),
+ )
+
+ It("[test_id:27738] should succeed to disable the RT kernel", func() {
+ for _, node := range workerRTNodes {
+ err := nodes.HasPreemptRTKernel(&node)
+ Expect(err).To(HaveOccurred())
+ }
+ })
+
+ It("[test_id:28612]Verify that Kernel arguments can me updated (added, removed) thru performance profile", func() {
+ for _, node := range workerRTNodes {
+ cmdline, err := nodes.ExecCommandOnNode(chkCmdLine, &node)
+ Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine)
+
+ // Verifying that new argument was added
+ Expect(cmdline).To(ContainSubstring("new-argument=test"))
+
+ // Verifying that one of old arguments was removed
+ if removedKernelArgs != "" {
+ Expect(cmdline).NotTo(ContainSubstring(removedKernelArgs), "%s should be removed from /proc/cmdline", removedKernelArgs)
+ }
+ }
+ })
+
+ It("[test_id:22764] verify that by default RT kernel is disabled", func() {
+ conditionUpdating := machineconfigv1.MachineConfigPoolUpdating
+
+ if profile.Spec.RealTimeKernel == nil || *profile.Spec.RealTimeKernel.Enabled == true {
+ Skip("Skipping test - This test expects RT Kernel to be disabled. Found it to be enabled or nil.")
+ }
+
+ By("Applying changes in performance profile")
+ profile.Spec.RealTimeKernel = nil
+ spec, err := json.Marshal(profile.Spec)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Applying changes in performance profile and waiting until mcp will start updating")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
+ ),
+ )).ToNot(HaveOccurred())
+
+ Expect(profile.Spec.RealTimeKernel).To(BeNil(), "real time kernel setting expected in profile spec but missing")
+ By("Checking that the updating MCP status will consistently stay false")
+ Consistently(func() corev1.ConditionStatus {
+ return mcps.GetConditionStatus(performanceMCP, conditionUpdating)
+ }, 30, 5).Should(Equal(corev1.ConditionFalse))
+
+ for _, node := range workerRTNodes {
+ err := nodes.HasPreemptRTKernel(&node)
+ Expect(err).To(HaveOccurred())
+ }
+ })
+
+ It("Reverts back all profile configuration", func() {
+ // return initial configuration
+ spec, err := json.Marshal(initialProfile.Spec)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
+ ),
+ )).ToNot(HaveOccurred())
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ })
+ })
+
+ // TODO: we have a dependency between tests(that in general bad practice, but saves us some tests run time),
+ // once we will want to run tests in the random order or without failFast we will need to refactor tests
+ Context("Updating of nodeSelector parameter and node labels", func() {
+ var mcp *machineconfigv1.MachineConfigPool
+ var newCnfNode *corev1.Node
+
+ newRole := "worker-test"
+ newLabel := fmt.Sprintf("%s/%s", testutils.LabelRole, newRole)
+ newNodeSelector := map[string]string{newLabel: ""}
+
+ testutils.BeforeAll(func() {
+ nonPerformancesWorkers, err := nodes.GetNonPerformancesWorkers(profile.Spec.NodeSelector)
+ Expect(err).ToNot(HaveOccurred())
+ if len(nonPerformancesWorkers) != 0 {
+ newCnfNode = &nonPerformancesWorkers[0]
+ }
+ })
+
+ JustBeforeEach(func() {
+ if newCnfNode == nil {
+ Skip("Skipping the test - cluster does not have another available worker node ")
+ }
+ })
+
+ It("[test_id:28440]Verifies that nodeSelector can be updated in performance profile", func() {
+ nodeLabel = newNodeSelector
+ newCnfNode.Labels[newLabel] = ""
+ Expect(testclient.Client.Update(context.TODO(), newCnfNode)).ToNot(HaveOccurred())
+
+ By("Creating new MachineConfigPool")
+ mcp = mcps.New(newRole, newNodeSelector)
+ err = testclient.Client.Create(context.TODO(), mcp)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Updating Node Selector performance profile")
+ profile.Spec.NodeSelector = newNodeSelector
+ spec, err := json.Marshal(profile.Spec)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Applying changes in performance profile and waiting until mcp will start updating")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
+ ),
+ )).ToNot(HaveOccurred())
+ mcps.WaitForCondition(newRole, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+
+ By("Waiting when MCP finishes updates and verifying new node has updated configuration")
+ mcps.WaitForCondition(newRole, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+
+ kblcfg, err := nodes.ExecCommandOnNode(chkKubeletConfig, newCnfNode)
+ Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkKubeletConfig)
+ Expect(kblcfg).To(ContainSubstring("topologyManagerPolicy"))
+
+ cmdline, err := nodes.ExecCommandOnNode(chkCmdLine, newCnfNode)
+ Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine)
+ Expect(cmdline).To(ContainSubstring("tuned.non_isolcpus"))
+ })
+
+ It("[test_id:27484]Verifies that node is reverted to plain worker when the extra labels are removed", func() {
+ By("Deleting cnf labels from the node")
+ for l := range profile.Spec.NodeSelector {
+ delete(newCnfNode.Labels, l)
+ }
+ label, err := json.Marshal(newCnfNode.Labels)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(testclient.Client.Patch(context.TODO(), newCnfNode,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/metadata/labels", "value": %s }]`, label)),
+ ),
+ )).ToNot(HaveOccurred())
+ mcps.WaitForCondition(testutils.RoleWorker, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+
+ By("Waiting when MCP Worker complete updates and verifying that node reverted back configuration")
+ mcps.WaitForCondition(testutils.RoleWorker, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+
+ // Check if node is Ready
+ for i := range newCnfNode.Status.Conditions {
+ if newCnfNode.Status.Conditions[i].Type == corev1.NodeReady {
+ Expect(newCnfNode.Status.Conditions[i].Status).To(Equal(corev1.ConditionTrue))
+ }
+ }
+
+ // check that the configs reverted
+ err = nodes.HasPreemptRTKernel(newCnfNode)
+ Expect(err).To(HaveOccurred())
+
+ cmdline, err := nodes.ExecCommandOnNode(chkCmdLine, newCnfNode)
+ Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine)
+ Expect(cmdline).NotTo(ContainSubstring("tuned.non_isolcpus"))
+
+ kblcfg, err := nodes.ExecCommandOnNode(chkKubeletConfig, newCnfNode)
+ Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkKubeletConfig)
+ Expect(kblcfg).NotTo(ContainSubstring("reservedSystemCPUs"))
+
+ Expect(profile.Spec.CPU.Reserved).NotTo(BeNil())
+ reservedCPU := string(*profile.Spec.CPU.Reserved)
+ cpuMask, err := components.CPUListToHexMask(reservedCPU)
+ Expect(err).ToNot(HaveOccurred(), "failed to list in Hex %s", reservedCPU)
+ irqBal, err := nodes.ExecCommandOnNode(chkIrqbalance, newCnfNode)
+ Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkIrqbalance)
+ Expect(irqBal).NotTo(ContainSubstring(cpuMask))
+ })
+
+ It("Reverts back nodeSelector and cleaning up leftovers", func() {
+ var selectorLabels []string
+ for k, v := range testutils.NodeSelectorLabels {
+ selectorLabels = append(selectorLabels, fmt.Sprintf(`"%s":"%s"`, k, v))
+ }
+ nodeSelector := strings.Join(selectorLabels, ",")
+ Expect(testclient.Client.Patch(context.TODO(), profile,
+ client.RawPatch(
+ types.JSONPatchType,
+ []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec/nodeSelector", "value": {%s} }]`, nodeSelector)),
+ ),
+ )).ToNot(HaveOccurred())
+
+ updatedProfile := &performancev2.PerformanceProfile{}
+ Eventually(func() string {
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: profile.Namespace,
+ }
+ Expect(testclient.Client.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred())
+ var updatedSelectorLabels []string
+ for k, v := range updatedProfile.Spec.NodeSelector {
+ updatedSelectorLabels = append(updatedSelectorLabels, fmt.Sprintf(`"%s":"%s"`, k, v))
+ }
+ updatedNodeSelector := strings.Join(updatedSelectorLabels, ",")
+ return updatedNodeSelector
+ }, 2*time.Minute, 15*time.Second).Should(Equal(nodeSelector))
+
+ performanceMCP, err = mcps.GetByProfile(updatedProfile)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(testclient.Client.Delete(context.TODO(), mcp)).ToNot(HaveOccurred())
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+
+ // revert node label to have the expected value
+ nodeLabel = testutils.NodeSelectorLabels
+ })
+ })
+})
+
+func hugepagesPathForNode(nodeID, sizeINMb int) string {
+ return fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%dkB/nr_hugepages", nodeID, sizeINMb*1024)
+}
+
+func countHugepagesOnNode(node *corev1.Node, sizeInMb int) (int, error) {
+ numaInfo, err := nodes.GetNumaNodes(node)
+ if err != nil {
+ return 0, err
+ }
+ count := 0
+ for i := 0; i < len(numaInfo); i++ {
+ nodeCmd := []string{"cat", hugepagesPathForNode(i, sizeInMb)}
+ result, err := nodes.ExecCommandOnNode(nodeCmd, node)
+ if err != nil {
+ return 0, err
+ }
+ t, err := strconv.Atoi(result)
+ if err != nil {
+ return 0, err
+ }
+ count += t
+ }
+ return count, nil
+}
diff --git a/test/e2e/pao/functests/3_performance_status/status.go b/test/e2e/pao/functests/3_performance_status/status.go
new file mode 100644
index 000000000..bc6ba8b9b
--- /dev/null
+++ b/test/e2e/pao/functests/3_performance_status/status.go
@@ -0,0 +1,224 @@
+package __performance_status
+
+import (
+ "context"
+ "encoding/json"
+
+ ign2types "github.com/coreos/ignition/config/v2_2/types"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ v1 "github.com/openshift/custom-resource-status/conditions/v1"
+ machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+
+ corev1 "k8s.io/api/core/v1"
+ nodev1beta1 "k8s.io/api/node/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ utilrand "k8s.io/apimachinery/pkg/util/rand"
+)
+
+var _ = Describe("Status testing of performance profile", func() {
+ var (
+ workerCNFNodes []corev1.Node
+ err error
+ clean func() error
+ )
+
+ BeforeEach(func() {
+ if discovery.Enabled() && testutils.ProfileNotFound {
+ Skip("Discovery mode enabled, performance profile not found")
+ }
+ workerCNFNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ workerCNFNodes, err = nodes.MatchingOptionalSelector(workerCNFNodes)
+ Expect(err).ToNot(HaveOccurred(), "error looking for the optional selector: %v", err)
+ Expect(workerCNFNodes).ToNot(BeEmpty())
+ // initialized clean function handler to be nil on every It execution
+ clean = nil
+ })
+
+ AfterEach(func() {
+ if clean != nil {
+ clean()
+ }
+
+ })
+
+ Context("[rfe_id:28881][performance] Performance Addons detailed status", func() {
+
+ It("[test_id:30894] Tuned status name tied to Performance Profile", func() {
+ profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ key := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ tuned := &tunedv1.Tuned{}
+ err = testclient.GetWithRetry(context.TODO(), key, tuned)
+ Expect(err).ToNot(HaveOccurred(), "cannot find the Cluster Node Tuning Operator Tuned object "+key.String())
+ tunedNamespacedname := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance),
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+ tunedStatus := tunedNamespacedname.String()
+ Expect(profile.Status.Tuned).NotTo(BeNil())
+ Expect(*profile.Status.Tuned).To(Equal(tunedStatus))
+ })
+
+ It("[test_id:33791] Should include the generated runtime class name", func() {
+ profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+
+ key := types.NamespacedName{
+ Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix),
+ Namespace: metav1.NamespaceAll,
+ }
+ runtimeClass := &nodev1beta1.RuntimeClass{}
+ err = testclient.GetWithRetry(context.TODO(), key, runtimeClass)
+ Expect(err).ToNot(HaveOccurred(), "cannot find the RuntimeClass object "+key.String())
+
+ Expect(profile.Status.RuntimeClass).NotTo(BeNil())
+ Expect(*profile.Status.RuntimeClass).To(Equal(runtimeClass.Name))
+ })
+
+ It("[test_id:29673] Machine config pools status tied to Performance Profile", func() {
+ // Creating bad MC that leads to degraded state
+ By("Creating bad MachineConfig")
+ badMC := createBadMachineConfig("bad-mc")
+ err = testclient.Client.Create(context.TODO(), badMC)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Wait for MCP condition to be Degraded")
+ profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+ performanceMCP, err := mcps.GetByProfile(profile)
+ Expect(err).ToNot(HaveOccurred())
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolDegraded, corev1.ConditionTrue)
+ mcpConditionReason := mcps.GetConditionReason(performanceMCP, machineconfigv1.MachineConfigPoolDegraded)
+ profileConditionMessage := profiles.GetConditionMessage(testutils.NodeSelectorLabels, v1.ConditionDegraded)
+ // Verify the status reason of performance profile
+ Expect(profileConditionMessage).To(ContainSubstring(mcpConditionReason))
+
+ By("Deleting bad MachineConfig and waiting when Degraded state is removed")
+ err = testclient.Client.Delete(context.TODO(), badMC)
+ Expect(err).ToNot(HaveOccurred())
+
+ mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ })
+
+ It("[test_id:40402] Tuned profile status tied to Performance Profile", func() {
+ // During this test we're creating additional synthetic tuned CR by invoking the createrBadTuned function.
+ // This synthetic tuned will look for a tuned profile which doesn't exist.
+ // This tuned CR will be applied on the profiles.tuned.openshift.io CR (there is such profile per node)
+ // which is associate to the node object with the same name.
+ // The connection between the node object and the tuned object is via the MachineConfigLables, worker-cnf in our case.
+ ns := "openshift-cluster-node-tuning-operator"
+ tunedName := "openshift-cause-tuned-failure"
+
+ // Make sure to clean badTuned object even if the It threw an error
+ clean = func() error {
+ key := types.NamespacedName{
+ Name: tunedName,
+ Namespace: ns,
+ }
+ runtimeClass := &tunedv1.Tuned{}
+ err := testclient.Client.Get(context.TODO(), key, runtimeClass)
+ // if err != nil probably the resource were already deleted
+ if err == nil {
+ testclient.Client.Delete(context.TODO(), runtimeClass)
+ }
+ return err
+ }
+
+ // Creating bad Tuned object that leads to degraded state
+ badTuned := createBadTuned(tunedName, ns)
+ err = testclient.Client.Create(context.TODO(), badTuned)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Waiting for performance profile condition to be Degraded")
+ profiles.WaitForCondition(testutils.NodeSelectorLabels, v1.ConditionDegraded, corev1.ConditionTrue)
+
+ By("Deleting bad Tuned and waiting when Degraded state is removed")
+ err = testclient.Client.Delete(context.TODO(), badTuned)
+ profiles.WaitForCondition(testutils.NodeSelectorLabels, v1.ConditionAvailable, corev1.ConditionTrue)
+ })
+ })
+})
+
+func createBadMachineConfig(name string) *machineconfigv1.MachineConfig {
+ rawIgnition, _ := json.Marshal(
+ &ign2types.Config{
+ Ignition: ign2types.Ignition{
+ Version: ign2types.MaxVersion.String(),
+ },
+ Storage: ign2types.Storage{
+ Disks: []ign2types.Disk{
+ {
+ Device: "/one",
+ },
+ },
+ },
+ },
+ )
+
+ return &machineconfigv1.MachineConfig{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: machineconfigv1.SchemeGroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Labels: map[string]string{"machineconfiguration.openshift.io/role": testutils.RoleWorkerCNF},
+ UID: types.UID(utilrand.String(5)),
+ },
+ Spec: machineconfigv1.MachineConfigSpec{
+ OSImageURL: "",
+ Config: runtime.RawExtension{
+ Raw: rawIgnition,
+ },
+ },
+ }
+}
+
+func createBadTuned(name, ns string) *tunedv1.Tuned {
+ priority := uint64(20)
+ // include=profile-does-not-exist
+ // points to tuned profile which doesn't exist
+ data := "[main]\nsummary=A Tuned daemon profile that does not exist\ninclude=profile-does-not-exist"
+
+ return &tunedv1.Tuned{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: tunedv1.SchemeGroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: ns,
+ UID: types.UID(utilrand.String(5)),
+ },
+ Spec: tunedv1.TunedSpec{
+ Profile: []tunedv1.TunedProfile{
+ {
+ Name: &name,
+ Data: &data,
+ },
+ },
+ Recommend: []tunedv1.TunedRecommend{
+ {
+ MachineConfigLabels: map[string]string{"machineconfiguration.openshift.io/role": testutils.RoleWorkerCNF},
+ Priority: &priority,
+ Profile: &name,
+ },
+ },
+ },
+ }
+
+}
diff --git a/test/e2e/pao/functests/3_performance_status/test_suite_performance_status_test.go b/test/e2e/pao/functests/3_performance_status/test_suite_performance_status_test.go
new file mode 100644
index 000000000..a6ab9058c
--- /dev/null
+++ b/test/e2e/pao/functests/3_performance_status/test_suite_performance_status_test.go
@@ -0,0 +1,50 @@
+//go:build !unittests
+// +build !unittests
+
+package __performance_status_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+
+ ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters"
+
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces"
+)
+
+var _ = BeforeSuite(func() {
+ // create test namespace
+ err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace)
+ if errors.IsAlreadyExists(err) {
+ testlog.Warning("test namespace already exists, that is unexpected")
+ return
+ }
+ Expect(err).ToNot(HaveOccurred())
+})
+
+var _ = AfterSuite(func() {
+ err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute)
+})
+
+func TestPerformanceUpdate(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ rr := []Reporter{}
+ if ginkgo_reporters.Polarion.Run {
+ rr = append(rr, &ginkgo_reporters.Polarion)
+ }
+ rr = append(rr, junit.NewJUnitReporter("performance_status"))
+ RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator Status e2e tests", rr)
+}
diff --git a/test/e2e/pao/functests/4_latency/latency.go b/test/e2e/pao/functests/4_latency/latency.go
new file mode 100644
index 000000000..307ed08e9
--- /dev/null
+++ b/test/e2e/pao/functests/4_latency/latency.go
@@ -0,0 +1,511 @@
+package __latency
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "os"
+ "path"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/events"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+ "k8s.io/utils/pointer"
+)
+
+const (
+ oslatTestName = "oslat"
+ cyclictestTestName = "cyclictest"
+ hwlatdetectTestName = "hwlatdetect"
+ defaultTestDelay = 0
+ defaultTestRun = false
+ defaultTestRuntime = "300"
+ defaultMaxLatency = -1
+ defaultTestCpus = -1
+ minCpuAmountForOslat = 2
+)
+
+var (
+ latencyTestDelay = defaultTestDelay
+ latencyTestRun = defaultTestRun
+ latencyTestRuntime = defaultTestRuntime
+ maximumLatency = defaultMaxLatency
+ latencyTestCpus = defaultTestCpus
+)
+
+// LATENCY_TEST_DELAY delay the run of the binary, can be useful to give time to the CPU manager reconcile loop
+// to update the default CPU pool
+// LATENCY_TEST_RUN: indicates if the latency test should run
+// LATENCY_TEST_RUNTIME: the amount of time in seconds that the latency test should run
+// LATENCY_TEST_CPUS: the amount of CPUs the pod which run the latency test should request
+
+var _ = Describe("[performance] Latency Test", func() {
+ var workerRTNode *corev1.Node
+ var profile *performancev2.PerformanceProfile
+ var latencyTestPod *corev1.Pod
+ var err error
+ var logName string
+
+ BeforeEach(func() {
+ logName = time.Now().Format("20060102150405")
+
+ latencyTestRun, err = getLatencyTestRun()
+ Expect(err).ToNot(HaveOccurred())
+
+ latencyTestDelay, err = getLatencyTestDelay()
+ Expect(err).ToNot(HaveOccurred())
+
+ latencyTestCpus, err = getLatencyTestCpus()
+ Expect(err).ToNot(HaveOccurred())
+
+ latencyTestRuntime, err = getLatencyTestRuntime()
+ Expect(err).ToNot(HaveOccurred())
+
+ if !latencyTestRun {
+ Skip("Skip the latency test, the LATENCY_TEST_RUN set to false")
+ }
+
+ if discovery.Enabled() && testutils.ProfileNotFound {
+ Skip("Discovery mode enabled, performance profile not found")
+ }
+
+ profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+
+ workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels)
+ Expect(err).ToNot(HaveOccurred())
+
+ workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes)
+ Expect(err).ToNot(HaveOccurred(), "error looking for the optional selector: %v", err)
+
+ Expect(workerRTNodes).ToNot(BeEmpty())
+
+ //At least one worker node should have cpu.Allocatable greater than the quantity requested by each test, else skip the test
+ workerRTNodesWithSufficientCpu := nodes.GetByCpuAllocatable(workerRTNodes, latencyTestCpus)
+ if len(workerRTNodesWithSufficientCpu) == 0 {
+ Skip("Insufficient cpu to run the test")
+
+ }
+ workerRTNode = &workerRTNodesWithSufficientCpu[0]
+
+ })
+
+ AfterEach(func() {
+ removeLogfile(workerRTNode, logName)
+ err = testclient.Client.Delete(context.TODO(), latencyTestPod)
+ if err != nil {
+ testlog.Error(err)
+ }
+
+ err = pods.WaitForDeletion(latencyTestPod, pods.DefaultDeletionTimeout*time.Second)
+ if err != nil {
+ testlog.Error(err)
+ }
+
+ maximumLatency = -1
+ })
+
+ Context("with the oslat image", func() {
+ testName := oslatTestName
+
+ BeforeEach(func() {
+ maximumLatency, err = getMaximumLatency(testName)
+ Expect(err).ToNot(HaveOccurred())
+
+ if profile.Spec.CPU.Isolated == nil {
+ Skip(fmt.Sprintf("Skip the oslat test, the profile %q does not have isolated CPUs", profile.Name))
+ }
+
+ isolatedCpus := cpuset.MustParse(string(*profile.Spec.CPU.Isolated))
+ // we require at least two CPUs to run oslat test, because one CPU should be used to run the main oslat thread
+ // we can not use all isolated CPUs, because if reserved and isolated include all node CPUs, and reserved CPUs
+ // do not calculated into the Allocated, at least part of time of one of isolated CPUs will be used to run
+ // other node containers
+ // at least two isolated CPUs to run oslat + one isolated CPU used by other containers on the node = at least 3 isolated CPUs
+ if isolatedCpus.Size() < (minCpuAmountForOslat + 1) {
+ Skip(fmt.Sprintf("Skip the oslat test, the profile %q has less than %d isolated CPUs", profile.Name, minCpuAmountForOslat))
+ }
+ if latencyTestCpus < minCpuAmountForOslat && latencyTestCpus != defaultTestCpus {
+ Skip(fmt.Sprintf("Skip the oslat test, LATENCY_TEST_CPUS is less than the minimum CPUs amount %d", minCpuAmountForOslat))
+ }
+ })
+
+ It("should succeed", func() {
+ oslatArgs := []string{
+ fmt.Sprintf("-runtime=%s", latencyTestRuntime),
+ }
+ latencyTestPod = getLatencyTestPod(profile, workerRTNode, testName, oslatArgs, logName)
+ createLatencyTestPod(latencyTestPod, workerRTNode, logName)
+ logFileContent := getLogFile(workerRTNode, logName)
+
+ // verify the maximum latency only when it requested, because this value can be very different
+ // on different systems
+ if maximumLatency == -1 {
+ testlog.Info(logFileContent)
+ Skip("no maximum latency value provided, skip buckets latency check")
+ }
+
+ latencies := extractLatencyValues(logName, `Maximum:\t*([\s\d]*)\(us\)`, workerRTNode)
+ latenciesList := strings.Split(latencies, " ")
+ for _, lat := range latenciesList {
+ if lat == "" {
+ continue
+ }
+ curr, err := strconv.Atoi(lat)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(curr < maximumLatency).To(BeTrue(), "The current latency %d is bigger than the expected one %d : \n %s", curr, maximumLatency, logFileContent)
+
+ }
+ //Use Println here so that this output will be displayed upon executing the test binary
+ fmt.Println(logFileContent)
+ })
+ })
+
+ Context("with the cyclictest image", func() {
+ testName := cyclictestTestName
+
+ BeforeEach(func() {
+ maximumLatency, err = getMaximumLatency(testName)
+ Expect(err).ToNot(HaveOccurred())
+
+ if profile.Spec.CPU.Isolated == nil {
+ Skip(fmt.Sprintf("Skip the cyclictest test, the profile %q does not have isolated CPUs", profile.Name))
+ }
+ })
+
+ It("should succeed", func() {
+ cyclictestArgs := []string{
+ fmt.Sprintf("-duration=%s", latencyTestRuntime),
+ }
+ latencyTestPod = getLatencyTestPod(profile, workerRTNode, testName, cyclictestArgs, logName)
+ createLatencyTestPod(latencyTestPod, workerRTNode, logName)
+ logFileContent := getLogFile(workerRTNode, logName)
+
+ // verify the maximum latency only when it requested, because this value can be very different
+ // on different systems
+ if maximumLatency == -1 {
+ testlog.Info(logFileContent)
+ Skip("no maximum latency value provided, skip buckets latency check")
+ }
+ latencies := extractLatencyValues(logName, `# Max Latencies:\t*\s*(.*)\s*\t*`, workerRTNode)
+ for _, lat := range strings.Split(latencies, " ") {
+ if lat == "" {
+ continue
+ }
+
+ curr, err := strconv.Atoi(lat)
+ Expect(err).ToNot(HaveOccurred())
+
+ Expect(curr < maximumLatency).To(BeTrue(), "The current latency %d is bigger than the expected one %d : \n %s", curr, maximumLatency, logFileContent)
+
+ }
+ //Use Println here so that this output will be displayed upon executing the test binary
+ fmt.Println(logFileContent)
+ })
+ })
+
+ Context("with the hwlatdetect image", func() {
+ testName := hwlatdetectTestName
+
+ BeforeEach(func() {
+ maximumLatency, err = getMaximumLatency(testName)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("should succeed", func() {
+ hardLimit := maximumLatency
+ if hardLimit == -1 {
+ // This value should be > than max latency,
+ // in order to prevent the hwlatdetect return with error 1 in case latency value is bigger than expected.
+ // in case latency value is bigger than expected, it will be handled on different flow.
+ hardLimit = 1000
+ }
+
+ hwlatdetectArgs := []string{
+ fmt.Sprintf("-hardlimit=%d", hardLimit),
+ fmt.Sprintf("-duration=%s", latencyTestRuntime),
+ }
+
+ // set the maximum latency for the test if needed
+ if maximumLatency != -1 {
+ hwlatdetectArgs = append(hwlatdetectArgs, fmt.Sprintf("-threshold=%d", maximumLatency))
+ }
+
+ latencyTestPod = getLatencyTestPod(profile, workerRTNode, testName, hwlatdetectArgs, logName)
+ createLatencyTestPod(latencyTestPod, workerRTNode, logName)
+ logFileContent := getLogFile(workerRTNode, logName)
+
+ // here we don't need to parse the latency values.
+ // hwlatdetect will do that for us and exit with error if needed.
+ //Use Println here so that this output will be displayed upon executing the test binary
+ fmt.Println(logFileContent)
+ })
+ })
+})
+
+func getLatencyTestRun() (bool, error) {
+ if latencyTestRunEnv, ok := os.LookupEnv("LATENCY_TEST_RUN"); ok {
+ val, err := strconv.ParseBool(latencyTestRunEnv)
+ if err != nil {
+ return val, fmt.Errorf("the environment variable LATENCY_TEST_RUN has incorrect value %q: %w", latencyTestRunEnv, err)
+ }
+ return val, nil
+ }
+ return defaultTestRun, nil
+}
+
+func getLatencyTestRuntime() (string, error) {
+ if latencyTestRuntimeEnv, ok := os.LookupEnv("LATENCY_TEST_RUNTIME"); ok {
+ val, err := strconv.Atoi(latencyTestRuntimeEnv)
+ if err != nil {
+ return latencyTestRuntimeEnv, fmt.Errorf("the environment variable LATENCY_TEST_RUNTIME has incorrect value %q, it must be a positive integer with maximum value of %d", latencyTestRuntimeEnv, math.MaxInt32)
+ }
+ if val < 1 || val > math.MaxInt32 {
+ return "", fmt.Errorf("the environment variable LATENCY_TEST_RUNTIME has an invalid number %q, it must be a positive integer with maximum value of %d", latencyTestRuntimeEnv, math.MaxInt32)
+ }
+ return latencyTestRuntimeEnv, nil
+ }
+ return defaultTestRuntime, nil
+}
+
+func getLatencyTestDelay() (int, error) {
+ if latencyTestDelayEnv, ok := os.LookupEnv("LATENCY_TEST_DELAY"); ok {
+ val, err := strconv.Atoi(latencyTestDelayEnv)
+ if err != nil {
+ return val, fmt.Errorf("the environment variable LATENCY_TEST_DELAY has incorrect value %q, it must be a non-negative integer with maximum value of %d: %w", latencyTestDelayEnv, math.MaxInt32, err)
+ }
+ if val < 0 || val > math.MaxInt32 {
+ return val, fmt.Errorf("the environment variable LATENCY_TEST_DELAY has an invalid number %q, it must be a non-negative integer with maximum value of %d", latencyTestDelayEnv, math.MaxInt32)
+ }
+ return val, nil
+ }
+ return defaultTestDelay, nil
+}
+
+func getLatencyTestCpus() (int, error) {
+ if latencyTestCpusEnv, ok := os.LookupEnv("LATENCY_TEST_CPUS"); ok {
+ val, err := strconv.Atoi(latencyTestCpusEnv)
+ if err != nil {
+ return val, fmt.Errorf("the environment variable LATENCY_TEST_CPUS has incorrect value %q, it must be a positive integer with maximum value of %d: %w", latencyTestCpusEnv, math.MaxInt32, err)
+ }
+ if val < 0 || val > math.MaxInt32 {
+ return val, fmt.Errorf("the environment variable LATENCY_TEST_CPUS has an invalid number %q, it must be a positive integer with maximum value of %d", latencyTestCpusEnv, math.MaxInt32)
+ }
+ return val, nil
+ }
+ return defaultTestCpus, nil
+}
+
+// getMaximumLatency should look for one of the following environment variables:
+// OSLAT_MAXIMUM_LATENCY: the expected maximum latency for all buckets in us
+// CYCLICTEST_MAXIMUM_LATENCY: the expected maximum latency for all buckets in us
+// HWLATDETECT_MAXIMUM_LATENCY: the expected maximum latency for all buckets in us
+// MAXIMUM_LATENCY: unified expected maximum latency for all tests
+func getMaximumLatency(testName string) (int, error) {
+ var err error
+ val := defaultMaxLatency
+ if unifiedMaxLatencyEnv, ok := os.LookupEnv("MAXIMUM_LATENCY"); ok {
+ val, err = strconv.Atoi(unifiedMaxLatencyEnv)
+ if err != nil {
+ return val, fmt.Errorf("the environment variable MAXIMUM_LATENCY has incorrect value %q, it must be a non-negative integer with maximum value of %d: %w", unifiedMaxLatencyEnv, math.MaxInt32, err)
+ }
+ if val < 0 || val > math.MaxInt32 {
+ return val, fmt.Errorf("the environment variable MAXIMUM_LATENCY has an invalid number %q, it must be a non-negative integer with maximum value of %d", unifiedMaxLatencyEnv, math.MaxInt32)
+ }
+ }
+
+ // specific values will have precedence over the general one
+ envVariableName := fmt.Sprintf("%s_MAXIMUM_LATENCY", strings.ToUpper(testName))
+ if maximumLatencyEnv, ok := os.LookupEnv(envVariableName); ok {
+ val, err = strconv.Atoi(maximumLatencyEnv)
+ if err != nil {
+ err = fmt.Errorf("the environment variable %q has incorrect value %q, it must be a non-negative integer with maximum value of %d: %w", envVariableName, maximumLatencyEnv, math.MaxInt32, err)
+ }
+ if val < 0 || val > math.MaxInt32 {
+ err = fmt.Errorf("the environment variable %q has an invalid number %q, it must be a non-negative integer with maximum value of %d", envVariableName, maximumLatencyEnv, math.MaxInt32)
+ }
+ }
+ return val, err
+}
+
+func getLatencyTestPod(profile *performancev2.PerformanceProfile, node *corev1.Node, testName string, testSpecificArgs []string, logName string) *corev1.Pod {
+ runtimeClass := components.GetComponentName(profile.Name, components.ComponentNamePrefix)
+ testNamePrefix := fmt.Sprintf("%s-", testName)
+ runnerName := fmt.Sprintf("%srunner", testNamePrefix)
+ runnerPath := path.Join("usr", "bin", runnerName)
+
+ if latencyTestCpus == defaultTestCpus {
+ // we can not use all isolated CPUs, because if reserved and isolated include all node CPUs, and reserved CPUs
+ // do not calculated into the Allocated, at least part of time of one of isolated CPUs will be used to run
+ // other node containers
+ cpus := cpuset.MustParse(string(*profile.Spec.CPU.Isolated))
+ latencyTestCpus = cpus.Size() - 1
+ }
+
+ latencyTestRunnerArgs := []string{
+ "-logtostderr=false",
+ "-alsologtostderr=true",
+ fmt.Sprintf("-log_file=/host/%s.log", logName),
+ }
+
+ latencyTestRunnerArgs = append(latencyTestRunnerArgs, testSpecificArgs...)
+
+ if latencyTestDelay > 0 {
+ latencyTestRunnerArgs = append(latencyTestRunnerArgs, fmt.Sprintf("-%s-start-delay=%d", testName, latencyTestDelay))
+ }
+
+ volumeTypeDirectory := corev1.HostPathDirectory
+ return &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: testNamePrefix,
+ Annotations: map[string]string{
+ "irq-load-balancing.crio.io": "disable",
+ "cpu-load-balancing.crio.io": "disable",
+ "cpu-quota.crio.io": "disable",
+ },
+ Namespace: testutils.NamespaceTesting,
+ },
+ Spec: corev1.PodSpec{
+ RestartPolicy: corev1.RestartPolicyNever,
+ RuntimeClassName: &runtimeClass,
+ Containers: []corev1.Container{
+ {
+ Name: runnerName,
+ Image: images.Test(),
+ Command: []string{
+ runnerPath,
+ },
+ Args: latencyTestRunnerArgs,
+ Resources: corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse(strconv.Itoa(latencyTestCpus)),
+ corev1.ResourceMemory: resource.MustParse("1Gi"),
+ },
+ },
+ SecurityContext: &corev1.SecurityContext{
+ Privileged: pointer.BoolPtr(true),
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "logs",
+ MountPath: "/host",
+ },
+ },
+ },
+ },
+ NodeSelector: map[string]string{
+ "kubernetes.io/hostname": node.Labels["kubernetes.io/hostname"],
+ },
+ Volumes: []corev1.Volume{
+ {
+ Name: "logs",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/var/log",
+ Type: &volumeTypeDirectory,
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func logEventsForPod(testPod *corev1.Pod) {
+ events, err := events.GetEventsForObject(testclient.Client, testPod.Namespace, testPod.Name, string(testPod.UID))
+ if err != nil {
+ testlog.Error(err)
+ }
+ for _, event := range events.Items {
+ testlog.Warningf("-> %s %s %s", event.Action, event.Reason, event.Message)
+ }
+}
+
+func createLatencyTestPod(testPod *corev1.Pod, node *corev1.Node, logName string) {
+ err := testclient.Client.Create(context.TODO(), testPod)
+ Expect(err).ToNot(HaveOccurred())
+
+ timeout, err := strconv.Atoi(latencyTestRuntime)
+ Expect(err).ToNot(HaveOccurred())
+
+ By("Waiting two minutes to download the latencyTest image")
+ err = pods.WaitForPhase(testPod, corev1.PodRunning, 2*time.Minute)
+ if err != nil {
+ testlog.Error(err)
+ logEventsForPod(testPod)
+ }
+ Expect(err).ToNot(HaveOccurred())
+
+ if runtime, _ := strconv.Atoi(latencyTestRuntime); runtime > 1 {
+ By("Checking actual CPUs number for the running pod")
+ limitsCpusQuantity := testPod.Spec.Containers[0].Resources.Limits.Cpu()
+ RequestsCpusQuantity := testPod.Spec.Containers[0].Resources.Requests.Cpu()
+ //latency pod is guaranteed
+ Expect(isEqual(limitsCpusQuantity, latencyTestCpus)).To(BeTrue(), fmt.Sprintf("actual limits of cpus number used for the latency pod is not as set in LATENCY_TEST_CPUS, actual number is: %s", limitsCpusQuantity))
+ Expect(isEqual(RequestsCpusQuantity, latencyTestCpus)).To(BeTrue(), fmt.Sprintf("actual requests of cpus number used for the latency pod is not as set in LATENCY_TEST_CPUS, actual number is: %s", RequestsCpusQuantity))
+ }
+
+ By("Waiting another two minutes to give enough time for the cluster to move the pod to Succeeded phase")
+ podTimeout := time.Duration(timeout + 120)
+ err = pods.WaitForPhase(testPod, corev1.PodSucceeded, podTimeout*time.Second)
+ if err != nil {
+ testlog.Error(err)
+ logEventsForPod(testPod)
+ }
+ Expect(err).ToNot(HaveOccurred(), getLogFile(node, logName))
+}
+
+func extractLatencyValues(logName string, exp string, node *corev1.Node) string {
+ out := getLogFile(node, logName)
+
+ maximumRegex, err := regexp.Compile(exp)
+ Expect(err).ToNot(HaveOccurred())
+
+ latencies := maximumRegex.FindStringSubmatch(out)
+ Expect(len(latencies)).To(Equal(2))
+
+ return latencies[1]
+}
+
+func getLogFile(node *corev1.Node, logName string) string {
+ cmd := []string{"cat", fmt.Sprintf("/rootfs/var/log/%s.log", logName)}
+ out, err := nodes.ExecCommandOnNode(cmd, node)
+ if err != nil {
+ testlog.Error(err)
+ }
+ return out
+}
+
+func removeLogfile(node *corev1.Node, logName string) {
+ cmd := []string{"rm", "-f", fmt.Sprintf("/rootfs/var/log/%s.log", logName)}
+ _, err := nodes.ExecCommandOnNode(cmd, node)
+ if err != nil {
+ testlog.Error(err)
+ }
+
+}
+
+func isEqual(qty *resource.Quantity, amount int) bool {
+ return qty.CmpInt64(int64(amount)) == 0
+}
diff --git a/test/e2e/pao/functests/4_latency/test_suite_latency_test.go b/test/e2e/pao/functests/4_latency/test_suite_latency_test.go
new file mode 100644
index 000000000..dfdb3c235
--- /dev/null
+++ b/test/e2e/pao/functests/4_latency/test_suite_latency_test.go
@@ -0,0 +1,53 @@
+//go:build !unittests
+// +build !unittests
+
+package __latency_test
+
+import (
+ "context"
+ "os"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+
+ ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters"
+)
+
+var _ = BeforeSuite(func() {
+ Expect(testclient.ClientsEnabled).To(BeTrue())
+ // create test namespace
+ err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace)
+ if errors.IsAlreadyExists(err) {
+ testlog.Warning("test namespace already exists, that is unexpected")
+ return
+ }
+ Expect(err).ToNot(HaveOccurred())
+})
+
+var _ = AfterSuite(func() {
+ err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace)
+ Expect(err).ToNot(HaveOccurred())
+ err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute)
+})
+
+func TestLatency(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ testlog.Infof("KUBECONFIG=%q", os.Getenv("KUBECONFIG"))
+
+ rr := []Reporter{}
+ if ginkgo_reporters.Polarion.Run {
+ rr = append(rr, &ginkgo_reporters.Polarion)
+ }
+ rr = append(rr, junit.NewJUnitReporter("latency"))
+ RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator latency e2e tests", rr)
+}
diff --git a/test/e2e/pao/functests/5_latency_testing/5_latency_testing_suite_test.go b/test/e2e/pao/functests/5_latency_testing/5_latency_testing_suite_test.go
new file mode 100644
index 000000000..5ae99685d
--- /dev/null
+++ b/test/e2e/pao/functests/5_latency_testing/5_latency_testing_suite_test.go
@@ -0,0 +1,72 @@
+package __latency_testing_test
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces"
+ ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters"
+)
+
+var prePullNamespace = &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "testing-prepull",
+ },
+}
+
+var _ = AfterSuite(func() {
+ prePullNamespaceName := prePullNamespace.Name
+ err := testclient.Client.Delete(context.TODO(), prePullNamespace)
+ testlog.Infof("deleted namespace %q err=%v", prePullNamespace.Name, err)
+ Expect(err).ToNot(HaveOccurred())
+ err = namespaces.WaitForDeletion(prePullNamespaceName, 5*time.Minute)
+})
+
+func Test5LatencyTesting(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ if !testclient.ClientsEnabled {
+ t.Fatalf("client not enabled")
+ }
+
+ if err := createNamespace(); err != nil {
+ t.Fatalf("cannot create the namespace: %v", err)
+ }
+
+ ds, err := images.PrePull(testclient.Client, images.Test(), prePullNamespace.Name, "cnf-tests")
+ if err != nil {
+ data, _ := json.Marshal(ds) // we can safely skip errors
+ testlog.Infof("DaemonSet %s/%s image=%q status:\n%s", ds.Namespace, ds.Name, images.Test(), string(data))
+ t.Fatalf("cannot prepull image %q: %v", images.Test(), err)
+ }
+
+ rr := []Reporter{}
+ if ginkgo_reporters.Polarion.Run {
+ rr = append(rr, &ginkgo_reporters.Polarion)
+ }
+ rr = append(rr, junit.NewJUnitReporter("latency_testing"))
+ RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator latency tools testing", rr)
+}
+
+func createNamespace() error {
+ err := testclient.Client.Create(context.TODO(), prePullNamespace)
+ if errors.IsAlreadyExists(err) {
+ testlog.Warningf("%q namespace already exists, that is unexpected", prePullNamespace.Name)
+ return nil
+ }
+ testlog.Infof("created namespace %q err=%v", prePullNamespace.Name, err)
+ return err
+}
diff --git a/test/e2e/pao/functests/5_latency_testing/latency_testing.go b/test/e2e/pao/functests/5_latency_testing/latency_testing.go
new file mode 100644
index 000000000..076f89345
--- /dev/null
+++ b/test/e2e/pao/functests/5_latency_testing/latency_testing.go
@@ -0,0 +1,276 @@
+package __latency_testing
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "os"
+ "os/exec"
+ "regexp"
+
+ . "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/extensions/table"
+ . "github.com/onsi/gomega"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+)
+
+const (
+ //tool to test
+ oslat = "oslat"
+ cyclictest = "cyclictest"
+ hwlatdetect = "hwlatdetect"
+ //Environment variables names
+ latencyTestDelay = "LATENCY_TEST_DELAY"
+ latencyTestRun = "LATENCY_TEST_RUN"
+ latencyTestRuntime = "LATENCY_TEST_RUNTIME"
+ maximumLatency = "MAXIMUM_LATENCY"
+ oslatMaxLatency = "OSLAT_MAXIMUM_LATENCY"
+ hwlatdetecMaxLatency = "HWLATDETECT_MAXIMUM_LATENCY"
+ cyclictestMaxLatency = "CYCLICTEST_MAXIMUM_LATENCY"
+ latencyTestCpus = "LATENCY_TEST_CPUS"
+ //invalid values error messages
+ unexpectedError = "Unexpected error"
+ //incorrect values error messages
+ incorrectMsgPart1 = "the environment variable "
+ incorrectMsgPart2 = " has incorrect value"
+ invalidNumber = " has an invalid number"
+ maxInt = "2147483647"
+ minimumCpuForOslat = "2"
+ mustBePositiveInt = ".*it must be a positive integer with maximum value of " + maxInt
+ mustBeNonNegativeInt = ".*it must be a non-negative integer with maximum value of " + maxInt
+ incorrectCpuNumber = incorrectMsgPart1 + latencyTestCpus + incorrectMsgPart2 + mustBePositiveInt
+ invalidCpuNumber = incorrectMsgPart1 + latencyTestCpus + invalidNumber + mustBePositiveInt
+ incorrectDelay = incorrectMsgPart1 + latencyTestDelay + incorrectMsgPart2 + mustBeNonNegativeInt
+ invalidNumberDelay = incorrectMsgPart1 + latencyTestDelay + invalidNumber + mustBeNonNegativeInt
+ incorrectMaxLatency = incorrectMsgPart1 + maximumLatency + incorrectMsgPart2 + mustBeNonNegativeInt
+ invalidNumberMaxLatency = incorrectMsgPart1 + maximumLatency + invalidNumber + mustBeNonNegativeInt
+ incorrectOslatMaxLatency = incorrectMsgPart1 + "\"" + oslatMaxLatency + "\"" + incorrectMsgPart2 + mustBeNonNegativeInt
+ invalidNumberOslatMaxLatency = incorrectMsgPart1 + "\"" + oslatMaxLatency + "\"" + invalidNumber + mustBeNonNegativeInt
+ incorrectCyclictestMaxLatency = incorrectMsgPart1 + "\"" + cyclictestMaxLatency + "\"" + incorrectMsgPart2 + mustBeNonNegativeInt
+ invalidNumberCyclictestMaxLatency = incorrectMsgPart1 + "\"" + cyclictestMaxLatency + "\"" + invalidNumber + mustBeNonNegativeInt
+ incorrectHwlatdetectMaxLatency = incorrectMsgPart1 + "\"" + hwlatdetecMaxLatency + "\"" + incorrectMsgPart2 + mustBeNonNegativeInt
+ invalidNumberHwlatdetectMaxLatency = incorrectMsgPart1 + "\"" + hwlatdetecMaxLatency + "\"" + invalidNumber + mustBeNonNegativeInt
+ incorrectTestRun = incorrectMsgPart1 + latencyTestRun + incorrectMsgPart2
+ incorrectRuntime = incorrectMsgPart1 + latencyTestRuntime + incorrectMsgPart2 + mustBePositiveInt
+ invalidNumberRuntime = incorrectMsgPart1 + latencyTestRuntime + invalidNumber + mustBePositiveInt
+ //success messages regex
+ success = `SUCCESS.*1 Passed.*0 Failed.*2 Skipped`
+ //failure messages regex
+ latencyFail = `The current latency .* is bigger than the expected one`
+ fail = `FAIL.*0 Passed.*1 Failed.*2 Skipped`
+ //hwlatdetect fail message regex
+ hwlatdetectFail = `Samples exceeding threshold: [^0]`
+ //skip messages regex
+ skipTestRun = `Skip the latency test, the LATENCY_TEST_RUN set to false`
+ skipMaxLatency = `no maximum latency value provided, skip buckets latency check`
+ skipOslatCpuNumber = `Skip the oslat test, LATENCY_TEST_CPUS is less than the minimum CPUs amount ` + minimumCpuForOslat
+ skip = `SUCCESS.*0 Passed.*0 Failed.*3 Skipped`
+ skipInsufficientCpu = `Insufficient cpu to run the test`
+
+ //used values parameters
+ guaranteedLatency = "20000"
+ negativeTesting = false
+ positiveTesting = true
+)
+
+//Struct to hold each test parameters
+type latencyTest struct {
+ testDelay string
+ testRun string
+ testRuntime string
+ testMaxLatency string
+ oslatMaxLatency string
+ cyclictestMaxLatency string
+ hwlatdetectMaxLatency string
+ testCpus string
+ outputMsgs []string
+ toolToTest string
+}
+
+var _ = table.DescribeTable("Test latency measurement tools tests", func(testGroup []latencyTest, isPositiveTest bool) {
+ for _, test := range testGroup {
+ clearEnv()
+ testDescription := setEnvAndGetDescription(test)
+ By(testDescription)
+ if _, err := os.Stat("../../build/_output/bin/latency-e2e.test"); os.IsNotExist(err) {
+ Skip("The executable test file does not exist , skipping the test.")
+ }
+ output, err := exec.Command("../../build/_output/bin/latency-e2e.test", "-ginkgo.focus", test.toolToTest).Output()
+ if err != nil {
+ //we don't log Error level here because the test might be a negative check
+ testlog.Info(err.Error())
+ }
+
+ ok, matchErr := regexp.MatchString(skipInsufficientCpu, string(output))
+ if matchErr != nil {
+ testlog.Error(matchErr.Error())
+ }
+ if ok {
+ testlog.Info(skipInsufficientCpu)
+ continue
+ }
+
+ if isPositiveTest {
+ if err != nil {
+ testlog.Error(err.Error())
+ }
+ Expect(string(output)).NotTo(MatchRegexp(unexpectedError), "Unexpected error was detected in a positve test")
+ //Check runtime argument in the pod's log only if the tool is expected to be executed
+ ok, matchErr := regexp.MatchString(success, string(output))
+ if matchErr != nil {
+ testlog.Error(matchErr.Error())
+ }
+ if ok {
+ var commandRegex string
+ if test.toolToTest == oslat {
+ commandRegex = fmt.Sprintf("Running the oslat command with arguments .*--duration %s", test.testRuntime)
+ }
+ if test.toolToTest == cyclictest {
+ commandRegex = fmt.Sprintf("running the cyclictest command with arguments .*-D %s", test.testRuntime)
+ }
+ if test.toolToTest == hwlatdetect {
+ commandRegex = fmt.Sprintf("running the hwlatdetect command with arguments .*--duration %s", test.testRuntime)
+ }
+ Expect(string(output)).To(MatchRegexp(commandRegex), "The output of the executed tool is not as expected")
+ }
+ }
+ for _, msg := range test.outputMsgs {
+ Expect(string(output)).To(MatchRegexp(msg), "The output of the executed tool is not as expected")
+ }
+ }
+},
+ table.Entry("[test_id:42851] Latency tools shouldn't run with default environment variables values", []latencyTest{{outputMsgs: []string{skip, skipTestRun}}}, positiveTesting),
+ table.Entry("[test_id:42850] Oslat - Verify that the tool is working properly with valid environment variables values", getValidValuesTests(oslat), positiveTesting),
+ table.Entry("[test_id:42853] Oslat - Verify that the latency tool test should print an expected error message when passing invalid environment variables values", getNegativeTests(oslat), negativeTesting),
+ table.Entry("[test_id:42115] Cyclictest - Verify that the tool is working properly with valid environment variables values", getValidValuesTests(cyclictest), positiveTesting),
+ table.Entry("[test_id:42852] Cyclictest - Verify that the latency tool test should print an expected error message when passing invalid environment variables values", getNegativeTests(cyclictest), negativeTesting),
+ table.Entry("[test_id:42849] Hwlatdetect - Verify that the tool is working properly with valid environment variables values", getValidValuesTests(hwlatdetect), positiveTesting),
+ table.Entry("[test_id:42856] Hwlatdetect - Verify that the latency tool test should print an expected error message when passing invalid environment variables values", getNegativeTests(hwlatdetect), negativeTesting),
+)
+
+func setEnvAndGetDescription(tst latencyTest) string {
+ sb := bytes.NewBufferString("")
+ testName := tst.toolToTest
+ if tst.toolToTest == "" {
+ testName = "latency tools"
+ }
+ fmt.Fprintf(sb, "Run %s test : \n", testName)
+ nonDefaultValues := false
+ if tst.testDelay != "" {
+ setEnvWriteDescription(latencyTestDelay, tst.testDelay, sb, &nonDefaultValues)
+ }
+ if tst.testRun != "" {
+ setEnvWriteDescription(latencyTestRun, tst.testRun, sb, &nonDefaultValues)
+ }
+ if tst.testRuntime != "" {
+ setEnvWriteDescription(latencyTestRuntime, tst.testRuntime, sb, &nonDefaultValues)
+ }
+ if tst.testMaxLatency != "" {
+ setEnvWriteDescription(maximumLatency, tst.testMaxLatency, sb, &nonDefaultValues)
+ }
+ if tst.oslatMaxLatency != "" {
+ setEnvWriteDescription(oslatMaxLatency, tst.oslatMaxLatency, sb, &nonDefaultValues)
+ }
+ if tst.cyclictestMaxLatency != "" {
+ setEnvWriteDescription(cyclictestMaxLatency, tst.cyclictestMaxLatency, sb, &nonDefaultValues)
+ }
+ if tst.hwlatdetectMaxLatency != "" {
+ setEnvWriteDescription(hwlatdetecMaxLatency, tst.hwlatdetectMaxLatency, sb, &nonDefaultValues)
+ }
+ if tst.testCpus != "" {
+ setEnvWriteDescription(latencyTestCpus, tst.testCpus, sb, &nonDefaultValues)
+ }
+ if !nonDefaultValues {
+ fmt.Fprint(sb, "With default values of the environment variables")
+ }
+
+ return sb.String()
+}
+
+func setEnvWriteDescription(envVar string, val string, sb *bytes.Buffer, flag *bool) {
+ os.Setenv(envVar, val)
+ fmt.Fprintf(sb, "%s = %s \n", envVar, val)
+ *flag = true
+}
+
+func clearEnv() {
+ os.Unsetenv(latencyTestDelay)
+ os.Unsetenv(latencyTestRun)
+ os.Unsetenv(latencyTestRuntime)
+ os.Unsetenv(maximumLatency)
+ os.Unsetenv(oslatMaxLatency)
+ os.Unsetenv(cyclictestMaxLatency)
+ os.Unsetenv(hwlatdetecMaxLatency)
+ os.Unsetenv(latencyTestCpus)
+}
+
+func getValidValuesTests(toolToTest string) []latencyTest {
+ var testSet []latencyTest
+
+ //testRuntime: let runtime be 10 seconds for most of the tests and not less, that is to let the tools
+ //have their time to measure latency properly hence stabilizing the tests
+ //testCpus: for tests that expect a success output message, note that an even CPU number is needed, otherwise the test would fail with SMTAlignmentError
+ testSet = append(testSet, latencyTest{testDelay: "0", testRun: "true", testRuntime: "10", testMaxLatency: guaranteedLatency, testCpus: "2", outputMsgs: []string{success}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testDelay: "0", testRun: "true", testRuntime: "10", testMaxLatency: guaranteedLatency, testCpus: "6", outputMsgs: []string{success}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testDelay: "1", testRun: "true", testRuntime: "10", testMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testDelay: "60", testRun: "true", testRuntime: "2", testMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest})
+
+ if toolToTest != hwlatdetect {
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "1", outputMsgs: []string{skip, skipMaxLatency}, toolToTest: toolToTest})
+ }
+ if toolToTest == oslat {
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", testMaxLatency: "1", oslatMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", oslatMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", testMaxLatency: guaranteedLatency, testCpus: "1", outputMsgs: []string{skip, skipOslatCpuNumber}, toolToTest: toolToTest})
+ }
+ if toolToTest == cyclictest {
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", testMaxLatency: "1", cyclictestMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", cyclictestMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest})
+
+ }
+ if toolToTest == hwlatdetect {
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", testMaxLatency: "1", hwlatdetectMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", hwlatdetectMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", outputMsgs: []string{success}, toolToTest: toolToTest})
+ }
+ return testSet
+}
+
+func getNegativeTests(toolToTest string) []latencyTest {
+ var testSet []latencyTest
+ latencyFailureMsg := latencyFail
+ if toolToTest == hwlatdetect {
+ latencyFailureMsg = hwlatdetectFail
+ }
+ //TODO: add test to check odd CPU request.
+ testSet = append(testSet, latencyTest{testDelay: "0", testRun: "true", testRuntime: "5", testMaxLatency: "1", outputMsgs: []string{latencyFailureMsg, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "yes", testRuntime: "5", testMaxLatency: "1", outputMsgs: []string{incorrectTestRun, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberRuntime, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "-1", testMaxLatency: "1", outputMsgs: []string{invalidNumberRuntime, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "5", testMaxLatency: "-2", outputMsgs: []string{invalidNumberMaxLatency, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "1H", outputMsgs: []string{incorrectRuntime, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testMaxLatency: "&", outputMsgs: []string{incorrectMaxLatency, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testMaxLatency: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberMaxLatency, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testDelay: "J", testRun: "true", outputMsgs: []string{incorrectDelay, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testDelay: fmt.Sprint(math.MaxInt32 + 1), testRun: "true", outputMsgs: []string{invalidNumberDelay, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testDelay: "-5", testRun: "true", outputMsgs: []string{invalidNumberDelay, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testMaxLatency: "1", testCpus: "p", outputMsgs: []string{incorrectCpuNumber, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testMaxLatency: "1", testCpus: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidCpuNumber, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testCpus: "-1", outputMsgs: []string{invalidCpuNumber, fail}, toolToTest: toolToTest})
+ if toolToTest == oslat {
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", oslatMaxLatency: "&", outputMsgs: []string{incorrectOslatMaxLatency, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", oslatMaxLatency: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberOslatMaxLatency, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", oslatMaxLatency: "-3", outputMsgs: []string{invalidNumberOslatMaxLatency, fail}, toolToTest: toolToTest})
+ }
+ if toolToTest == cyclictest {
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", cyclictestMaxLatency: "&", outputMsgs: []string{incorrectCyclictestMaxLatency, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", cyclictestMaxLatency: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberCyclictestMaxLatency, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", cyclictestMaxLatency: "-3", outputMsgs: []string{invalidNumberCyclictestMaxLatency, fail}, toolToTest: toolToTest})
+ }
+ if toolToTest == hwlatdetect {
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", hwlatdetectMaxLatency: "&", outputMsgs: []string{incorrectHwlatdetectMaxLatency, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", hwlatdetectMaxLatency: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberHwlatdetectMaxLatency, fail}, toolToTest: toolToTest})
+ testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", hwlatdetectMaxLatency: "-3", outputMsgs: []string{invalidNumberHwlatdetectMaxLatency, fail}, toolToTest: toolToTest})
+ }
+ return testSet
+}
diff --git a/test/e2e/pao/functests/README.txt b/test/e2e/pao/functests/README.txt
new file mode 100644
index 000000000..ec620be8a
--- /dev/null
+++ b/test/e2e/pao/functests/README.txt
@@ -0,0 +1,5 @@
+HEADS UP!
+
+We have 2 test suites here, one running the "normal" performance tests, the other one performance profile update tests.
+The latter should run AFTER the former, and tests are executed in order of filenames.
+So be careful with renaming existing or adding new suites.
\ No newline at end of file
diff --git a/test/e2e/pao/functests/test.go b/test/e2e/pao/functests/test.go
new file mode 100644
index 000000000..56e540407
--- /dev/null
+++ b/test/e2e/pao/functests/test.go
@@ -0,0 +1 @@
+package test
diff --git a/test/e2e/pao/functests/utils/clean/clean.go b/test/e2e/pao/functests/utils/clean/clean.go
new file mode 100644
index 000000000..c4270ab9e
--- /dev/null
+++ b/test/e2e/pao/functests/utils/clean/clean.go
@@ -0,0 +1,70 @@
+package clean
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+ mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+var cleanPerformance bool
+
+func init() {
+ clean, found := os.LookupEnv("CLEAN_PERFORMANCE_PROFILE")
+ if !found || clean != "false" {
+ cleanPerformance = true
+ }
+}
+
+// All deletes any leftovers created when running the performance tests.
+func All() {
+ if !cleanPerformance {
+ testlog.Info("Performance cleaning disabled, skipping")
+ return
+ }
+
+ perfProfile := performancev2.PerformanceProfile{}
+ err := testclient.Client.Get(context.TODO(), types.NamespacedName{Name: utils.PerformanceProfileName}, &perfProfile)
+ if errors.IsNotFound(err) {
+ return
+ }
+ Expect(err).ToNot(HaveOccurred(), "Failed to find perf profile")
+ mcpLabel := profile.GetMachineConfigLabel(&perfProfile)
+ key, value := components.GetFirstKeyAndValue(mcpLabel)
+ mcpsByLabel, err := mcps.GetByLabel(key, value)
+ Expect(err).ToNot(HaveOccurred(), "Failed getting MCP")
+ Expect(len(mcpsByLabel)).To(Equal(1), fmt.Sprintf("Unexpected number of MCPs found: %v", len(mcpsByLabel)))
+
+ performanceMCP := &mcpsByLabel[0]
+
+ err = testclient.Client.Delete(context.TODO(), &perfProfile)
+ Expect(err).ToNot(HaveOccurred(), "Failed to delete perf profile")
+
+ By("Waiting for MCP starting to update")
+ mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
+
+ By("Waiting for MCP being updated")
+ mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
+ profileKey := types.NamespacedName{
+ Name: perfProfile.Name,
+ Namespace: perfProfile.Namespace,
+ }
+ err = profiles.WaitForDeletion(profileKey, 60*time.Second)
+ Expect(err).ToNot(HaveOccurred(), "Failed to wait for perf profile deletion")
+}
diff --git a/test/e2e/pao/functests/utils/client/clients.go b/test/e2e/pao/functests/utils/client/clients.go
new file mode 100644
index 000000000..87343fbbb
--- /dev/null
+++ b/test/e2e/pao/functests/utils/client/clients.go
@@ -0,0 +1,122 @@
+package client
+
+import (
+ "context"
+ "time"
+
+ . "github.com/onsi/gomega"
+
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/klog"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/config"
+
+ configv1 "github.com/openshift/api/config/v1"
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+ operatorsv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
+ apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+
+ performancev1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1"
+ performancev1alpha1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1alpha1"
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+)
+
+var (
+ // Client defines the API client to run CRUD operations, that will be used for testing
+ Client client.Client
+ // K8sClient defines k8s client to run subresource operations, for example you should use it to get pod logs
+ K8sClient *kubernetes.Clientset
+ // ClientsEnabled tells if the client from the package can be used
+ ClientsEnabled bool
+)
+
+func init() {
+ // Setup Scheme for all resources
+ if err := performancev2.AddToScheme(scheme.Scheme); err != nil {
+ klog.Exit(err.Error())
+ }
+
+ if err := performancev1.AddToScheme(scheme.Scheme); err != nil {
+ klog.Exit(err.Error())
+ }
+
+ if err := performancev1alpha1.AddToScheme(scheme.Scheme); err != nil {
+ klog.Exit(err.Error())
+ }
+
+ if err := configv1.AddToScheme(scheme.Scheme); err != nil {
+ klog.Exit(err.Error())
+ }
+
+ if err := mcov1.AddToScheme(scheme.Scheme); err != nil {
+ klog.Exit(err.Error())
+ }
+
+ if err := tunedv1.AddToScheme(scheme.Scheme); err != nil {
+ klog.Exit(err.Error())
+ }
+
+ if err := apiextensionsv1beta1.AddToScheme(scheme.Scheme); err != nil {
+ klog.Exit(err.Error())
+ }
+
+ if err := operatorsv1alpha1.AddToScheme(scheme.Scheme); err != nil {
+ klog.Exit(err.Error())
+ }
+
+ var err error
+ Client, err = New()
+ if err != nil {
+ testlog.Info("Failed to initialize client, check the KUBECONFIG env variable", err.Error())
+ ClientsEnabled = false
+ return
+ }
+ K8sClient, err = NewK8s()
+ if err != nil {
+ testlog.Info("Failed to initialize k8s client, check the KUBECONFIG env variable", err.Error())
+ ClientsEnabled = false
+ return
+ }
+ ClientsEnabled = true
+}
+
+// New returns a controller-runtime client.
+func New() (client.Client, error) {
+ cfg, err := config.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := client.New(cfg, client.Options{})
+ return c, err
+}
+
+// NewK8s returns a kubernetes clientset
+func NewK8s() (*kubernetes.Clientset, error) {
+ cfg, err := config.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ clientset, err := kubernetes.NewForConfig(cfg)
+ if err != nil {
+ klog.Exit(err.Error())
+ }
+ return clientset, nil
+}
+
+func GetWithRetry(ctx context.Context, key client.ObjectKey, obj client.Object) error {
+ var err error
+ EventuallyWithOffset(1, func() error {
+ err = Client.Get(ctx, key, obj)
+ if err != nil {
+ testlog.Infof("Getting %s failed, retrying: %v", key.Name, err)
+ }
+ return err
+ }, 1*time.Minute, 10*time.Second).ShouldNot(HaveOccurred(), "Max numbers of retries getting %v reached", key)
+ return err
+}
diff --git a/test/e2e/pao/functests/utils/cluster/cluster.go b/test/e2e/pao/functests/utils/cluster/cluster.go
new file mode 100644
index 000000000..9e0b091f0
--- /dev/null
+++ b/test/e2e/pao/functests/utils/cluster/cluster.go
@@ -0,0 +1,31 @@
+package cluster
+
+import (
+ "context"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+)
+
+// IsSingleNode validates if the environment is single node cluster
+func IsSingleNode() (bool, error) {
+ nodes := &corev1.NodeList{}
+ if err := testclient.Client.List(context.TODO(), nodes, &client.ListOptions{}); err != nil {
+ return false, err
+ }
+ return len(nodes.Items) == 1, nil
+}
+
+// ComputeTestTimeout returns the desired timeout for a test based on a given base timeout.
+// If the tested cluster is Single-Node it needs more time to react (due to being highly loaded) so we double the given timeout.
+func ComputeTestTimeout(baseTimeout time.Duration, isSno bool) time.Duration {
+ testTimeout := baseTimeout
+ if isSno {
+ testTimeout += baseTimeout
+ }
+
+ return testTimeout
+}
diff --git a/test/e2e/pao/functests/utils/consts.go b/test/e2e/pao/functests/utils/consts.go
new file mode 100644
index 000000000..ffeef99a9
--- /dev/null
+++ b/test/e2e/pao/functests/utils/consts.go
@@ -0,0 +1,99 @@
+package utils
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery"
+)
+
+// RoleWorkerCNF contains role name of cnf worker nodes
+var RoleWorkerCNF string
+
+// NodeSelectorLabels contains the node labels the perfomance profile should match
+var NodeSelectorLabels map[string]string
+
+// PerformanceProfileName contains the name of the PerformanceProfile created for tests
+// or an existing profile when discover mode is enabled
+var PerformanceProfileName string
+
+// NodesSelector represents the label selector used to filter impacted nodes.
+var NodesSelector string
+
+// ProfileNotFound is true when discovery mode is enabled and no valid profile was found
+var ProfileNotFound bool
+
+func init() {
+ RoleWorkerCNF = os.Getenv("ROLE_WORKER_CNF")
+ if RoleWorkerCNF == "" {
+ RoleWorkerCNF = "worker-cnf"
+ }
+
+ PerformanceProfileName = os.Getenv("PERF_TEST_PROFILE")
+ if PerformanceProfileName == "" {
+ PerformanceProfileName = "performance"
+ }
+
+ NodesSelector = os.Getenv("NODES_SELECTOR")
+
+ NodeSelectorLabels = map[string]string{
+ fmt.Sprintf("%s/%s", LabelRole, RoleWorkerCNF): "",
+ }
+
+ if discovery.Enabled() {
+ profile, err := discovery.GetDiscoveryPerformanceProfile(NodesSelector)
+ if err == discovery.ErrProfileNotFound {
+ ProfileNotFound = true
+ return
+ }
+
+ if err != nil {
+ fmt.Println("Failed to find profile in discovery mode", err)
+ ProfileNotFound = true
+ return
+ }
+
+ PerformanceProfileName = profile.Name
+
+ NodeSelectorLabels = profile.Spec.NodeSelector
+ if NodesSelector != "" {
+ keyValue := strings.Split(NodesSelector, "=")
+ if len(keyValue) == 1 {
+ keyValue = append(keyValue, "")
+ }
+ NodeSelectorLabels[keyValue[0]] = keyValue[1]
+ }
+ }
+}
+
+const (
+ // RoleWorker contains the worker role
+ RoleWorker = "worker"
+ // RoleMaster contains the master role
+ RoleMaster = "master"
+)
+
+const (
+ // LabelRole contains the key for the role label
+ LabelRole = "node-role.kubernetes.io"
+ // LabelHostname contains the key for the hostname label
+ LabelHostname = "kubernetes.io/hostname"
+)
+
+const (
+ // NamespaceMachineConfigOperator contains the namespace of the machine-config-opereator
+ NamespaceMachineConfigOperator = "openshift-machine-config-operator"
+ // NamespaceTesting contains the name of the testing namespace
+ NamespaceTesting = "performance-addon-operators-testing"
+)
+
+const (
+ // FilePathKubeletConfig contains the kubelet.conf file path
+ FilePathKubeletConfig = "/etc/kubernetes/kubelet.conf"
+)
+
+const (
+ // ContainerMachineConfigDaemon contains the name of the machine-config-daemon container
+ ContainerMachineConfigDaemon = "machine-config-daemon"
+)
diff --git a/test/e2e/pao/functests/utils/daemonset/daemonset.go b/test/e2e/pao/functests/utils/daemonset/daemonset.go
new file mode 100644
index 000000000..f0e83c803
--- /dev/null
+++ b/test/e2e/pao/functests/utils/daemonset/daemonset.go
@@ -0,0 +1,48 @@
+package daemonset
+
+import (
+ "context"
+ "time"
+
+ appsv1 "k8s.io/api/apps/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/util/wait"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+)
+
+func WaitToBeRunning(cli client.Client, namespace, name string) error {
+ return WaitToBeRunningWithTimeout(cli, namespace, name, 5*time.Minute)
+}
+
+func WaitToBeRunningWithTimeout(cli client.Client, namespace, name string, timeout time.Duration) error {
+ testlog.Infof("wait for the daemonset %q %q to be running", namespace, name)
+ return wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
+ return IsRunning(cli, namespace, name)
+ })
+}
+
+func GetByName(cli client.Client, namespace, name string) (*appsv1.DaemonSet, error) {
+ key := client.ObjectKey{
+ Namespace: namespace,
+ Name: name,
+ }
+ var ds appsv1.DaemonSet
+ err := cli.Get(context.TODO(), key, &ds)
+ return &ds, err
+}
+
+func IsRunning(cli client.Client, namespace, name string) (bool, error) {
+ ds, err := GetByName(cli, namespace, name)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ testlog.Warningf("daemonset %q %q not found - retrying", namespace, name)
+ return false, nil
+ }
+ return false, err
+ }
+ testlog.Infof("daemonset %q %q desired %d scheduled %d ready %d", namespace, name, ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady)
+ return (ds.Status.DesiredNumberScheduled > 0 && ds.Status.DesiredNumberScheduled == ds.Status.NumberReady), nil
+}
diff --git a/test/e2e/pao/functests/utils/discovery/discovery.go b/test/e2e/pao/functests/utils/discovery/discovery.go
new file mode 100644
index 000000000..bbbd9f8e7
--- /dev/null
+++ b/test/e2e/pao/functests/utils/discovery/discovery.go
@@ -0,0 +1,87 @@
+package discovery
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strconv"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var ErrProfileNotFound = fmt.Errorf("profile not found in discovery mode")
+
+// ConditionIterator is the function that accepts element of a PerformanceProfile and returns boolean
+type ConditionIterator func(performancev2.PerformanceProfile) bool
+
+// Enabled indicates whether test discovery mode is enabled.
+func Enabled() bool {
+ discoveryMode, _ := strconv.ParseBool(os.Getenv("DISCOVERY_MODE"))
+ return discoveryMode
+}
+
+// GetDiscoveryPerformanceProfile returns an existing profile matching nodesSelector, if nodesSelector is set.
+// Otherwise, it returns an existing profile with the most nodes using it.
+// In case no profile exists - return nil
+func GetDiscoveryPerformanceProfile(nodesSelector string) (*performancev2.PerformanceProfile, error) {
+ performanceProfiles, err := profiles.All()
+ if err != nil {
+ return nil, err
+ }
+ return getDiscoveryPerformanceProfile(performanceProfiles.Items, nodesSelector)
+}
+
+// GetFilteredDiscoveryPerformanceProfile returns an existing profile in the cluster with the most nodes using it
+// from a a filtered profiles list by the filter function passed as an argument.
+// In case no profile exists - return nil
+func GetFilteredDiscoveryPerformanceProfile(iterator ConditionIterator) (*performancev2.PerformanceProfile, error) {
+ performanceProfiles, err := profiles.All()
+ if err != nil {
+ return nil, err
+ }
+ return getDiscoveryPerformanceProfile(filter(performanceProfiles.Items, iterator), "")
+}
+
+func getDiscoveryPerformanceProfile(performanceProfiles []performancev2.PerformanceProfile, nodesSelector string) (*performancev2.PerformanceProfile, error) {
+ var currentProfile *performancev2.PerformanceProfile = nil
+ maxNodesNumber := 0
+ for _, profile := range performanceProfiles {
+ selector := labels.SelectorFromSet(profile.Spec.NodeSelector)
+
+ profileNodes := &corev1.NodeList{}
+ if err := testclient.Client.List(context.TODO(), profileNodes, &client.ListOptions{LabelSelector: selector}); err != nil {
+ return nil, err
+ }
+
+ if nodesSelector != "" {
+ if selector.String() == nodesSelector {
+ return &profile, nil
+ }
+ }
+
+ if len(profileNodes.Items) > maxNodesNumber {
+ currentProfile = &profile
+ maxNodesNumber = len(profileNodes.Items)
+ }
+ }
+
+ if currentProfile == nil {
+ return nil, ErrProfileNotFound
+ }
+ return currentProfile, nil
+}
+
+func filter(performanceProfiles []performancev2.PerformanceProfile, iterator ConditionIterator) []performancev2.PerformanceProfile {
+ var result = make([]performancev2.PerformanceProfile, 0)
+ for _, profile := range performanceProfiles {
+ if iterator(profile) {
+ result = append(result, profile)
+ }
+ }
+ return result
+}
diff --git a/test/e2e/pao/functests/utils/events/events.go b/test/e2e/pao/functests/utils/events/events.go
new file mode 100644
index 000000000..f3aee6a46
--- /dev/null
+++ b/test/e2e/pao/functests/utils/events/events.go
@@ -0,0 +1,19 @@
+package events
+
+import (
+ "context"
+
+ corev1 "k8s.io/api/core/v1"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func GetEventsForObject(cli client.Client, namespace, name, uid string) (corev1.EventList, error) {
+ eventList := corev1.EventList{}
+ match := client.MatchingFields{
+ "involvedObject.name": name,
+ "involvedObject.uid": uid,
+ }
+ err := cli.List(context.TODO(), &eventList, &client.ListOptions{Namespace: namespace}, match)
+ return eventList, err
+}
diff --git a/test/e2e/pao/functests/utils/images/images.go b/test/e2e/pao/functests/utils/images/images.go
new file mode 100644
index 000000000..eb3241d3e
--- /dev/null
+++ b/test/e2e/pao/functests/utils/images/images.go
@@ -0,0 +1,27 @@
+package images
+
+import (
+ "fmt"
+ "os"
+)
+
+var registry string
+var cnfTestsImage string
+
+func init() {
+ registry = os.Getenv("IMAGE_REGISTRY")
+ cnfTestsImage = os.Getenv("CNF_TESTS_IMAGE")
+
+ if cnfTestsImage == "" {
+ cnfTestsImage = "cnf-tests:4.9"
+ }
+
+ if registry == "" {
+ registry = "quay.io/openshift-kni/"
+ }
+}
+
+// Test returns the image to be used for tests
+func Test() string {
+ return fmt.Sprintf("%s%s", registry, cnfTestsImage)
+}
diff --git a/test/e2e/pao/functests/utils/images/prepull.go b/test/e2e/pao/functests/utils/images/prepull.go
new file mode 100644
index 000000000..fa9f2db4c
--- /dev/null
+++ b/test/e2e/pao/functests/utils/images/prepull.go
@@ -0,0 +1,94 @@
+package images
+
+import (
+ "context"
+ "encoding/json"
+ "os"
+ "strconv"
+ "time"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ testds "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/daemonset"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+)
+
+const (
+ PrePullPrefix = "prepull"
+ PrePullDefaultTimeoutMinutes = "5"
+)
+
+// GetPullTimeout returns the pull timeout
+func GetPullTimeout() (time.Duration, error) {
+ prePullTimeoutMins, ok := os.LookupEnv("PREPULL_IMAGE_TIMEOUT_MINUTES")
+ if !ok {
+ prePullTimeoutMins = PrePullDefaultTimeoutMinutes
+ }
+ timeout, err := strconv.Atoi(prePullTimeoutMins)
+ return time.Duration(timeout) * time.Minute, err
+}
+
+// PrePull makes sure the image is pre-pulled on the relevant nodes.
+func PrePull(cli client.Client, pullSpec, namespace, tag string) (*appsv1.DaemonSet, error) {
+ name := PrePullPrefix + tag
+ ds := appsv1.DaemonSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: appsv1.DaemonSetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "name": "prepull-daemonset-" + tag,
+ },
+ },
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "name": "prepull-daemonset-" + tag,
+ },
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "prepullcontainer",
+ Image: pullSpec,
+ Command: []string{"/bin/sleep"},
+ Args: []string{"inf"},
+ ImagePullPolicy: corev1.PullAlways,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ prePullTimeout, err := GetPullTimeout()
+ if err != nil {
+ return &ds, err
+ }
+ testlog.Infof("pull timeout: %v", prePullTimeout)
+
+ testlog.Infof("creating daemonset %s/%s to prepull %q", namespace, name, pullSpec)
+ ts := time.Now()
+ err = cli.Create(context.TODO(), &ds)
+ if err != nil {
+ return &ds, err
+ }
+ data, _ := json.Marshal(ds)
+ testlog.Infof("created daemonset %s/%s to prepull %q:\n%s", namespace, name, pullSpec, string(data))
+
+ err = testds.WaitToBeRunningWithTimeout(testclient.Client, ds.Namespace, ds.Name, prePullTimeout)
+ if err != nil {
+ // if this fails, no big deal, we are just trying to make the troubleshooting easier
+ updatedDs, _ := testds.GetByName(testclient.Client, ds.Namespace, ds.Name)
+ return updatedDs, err
+ }
+ testlog.Infof("prepulled %q in %v", pullSpec, time.Since(ts))
+ return nil, nil
+}
diff --git a/test/e2e/pao/functests/utils/junit/reporter.go b/test/e2e/pao/functests/utils/junit/reporter.go
new file mode 100644
index 000000000..85b62af18
--- /dev/null
+++ b/test/e2e/pao/functests/utils/junit/reporter.go
@@ -0,0 +1,18 @@
+package junit
+
+import (
+ "flag"
+ "fmt"
+ "github.com/onsi/ginkgo/reporters"
+)
+
+var junitDir *string
+
+func init() {
+ junitDir = flag.String("junitDir", ".", "the directory for the junit format report")
+}
+
+// NewJUnitReporter with the given name. testSuiteName must be a valid filename part
+func NewJUnitReporter(testSuiteName string) *reporters.JUnitReporter {
+ return reporters.NewJUnitReporter(fmt.Sprintf("%s/%s_%s.xml", *junitDir, "unit_report", testSuiteName))
+}
diff --git a/test/e2e/pao/functests/utils/log/log.go b/test/e2e/pao/functests/utils/log/log.go
new file mode 100644
index 000000000..2df2061db
--- /dev/null
+++ b/test/e2e/pao/functests/utils/log/log.go
@@ -0,0 +1,52 @@
+package log
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/onsi/ginkgo"
+)
+
+func nowStamp() string {
+ return time.Now().Format(time.StampMilli)
+}
+
+func logf(level string, format string, args ...interface{}) {
+ fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
+}
+
+func log(level string, args ...interface{}) {
+ fmt.Fprint(ginkgo.GinkgoWriter, nowStamp()+": "+level+": ")
+ fmt.Fprint(ginkgo.GinkgoWriter, args...)
+ fmt.Fprint(ginkgo.GinkgoWriter, "\n")
+}
+
+// Info logs the info
+func Info(args ...interface{}) {
+ log("[INFO]", args...)
+}
+
+// Infof logs the info with arguments
+func Infof(format string, args ...interface{}) {
+ logf("[INFO]", format, args...)
+}
+
+// Warning logs the warning
+func Warning(args ...interface{}) {
+ log("[WARNING]", args...)
+}
+
+// Warningf logs the warning with arguments
+func Warningf(format string, args ...interface{}) {
+ logf("[WARNING]", format, args...)
+}
+
+// Error logs the warning
+func Error(args ...interface{}) {
+ log("[ERROR]", args...)
+}
+
+// Errorf logs the warning with arguments
+func Errorf(format string, args ...interface{}) {
+ logf("[ERROR]", format, args...)
+}
diff --git a/test/e2e/pao/functests/utils/mcps/mcps.go b/test/e2e/pao/functests/utils/mcps/mcps.go
new file mode 100644
index 000000000..b42f1ab0e
--- /dev/null
+++ b/test/e2e/pao/functests/utils/mcps/mcps.go
@@ -0,0 +1,235 @@
+package mcps
+
+import (
+ "context"
+ "time"
+
+ . "github.com/onsi/gomega"
+ "github.com/pkg/errors"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/selection"
+ "k8s.io/apimachinery/pkg/types"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig"
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes"
+ machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
+)
+
+const (
+ mcpUpdateTimeoutPerNode = 30
+)
+
+// GetByLabel returns all MCPs with the specified label
+func GetByLabel(key, value string) ([]machineconfigv1.MachineConfigPool, error) {
+ selector := labels.NewSelector()
+ req, err := labels.NewRequirement(key, selection.Equals, []string{value})
+ if err != nil {
+ return nil, err
+ }
+ selector = selector.Add(*req)
+ mcps := &machineconfigv1.MachineConfigPoolList{}
+ if err := testclient.Client.List(context.TODO(), mcps, &client.ListOptions{LabelSelector: selector}); err != nil {
+ return nil, err
+ }
+ if len(mcps.Items) > 0 {
+ return mcps.Items, nil
+ }
+ // fallback to look for a mcp with the same nodeselector.
+ // key value may come from a node selector, so looking for a mcp
+ // that targets the same nodes is legit
+ if err := testclient.Client.List(context.TODO(), mcps); err != nil {
+ return nil, err
+ }
+ res := []machineconfigv1.MachineConfigPool{}
+ for _, item := range mcps.Items {
+ if item.Spec.NodeSelector.MatchLabels[key] == value {
+ res = append(res, item)
+ }
+ nodeRoleKey := components.NodeRoleLabelPrefix + value
+
+ if _, ok := item.Spec.NodeSelector.MatchLabels[nodeRoleKey]; ok {
+ res = append(res, item)
+ }
+ }
+ return res, nil
+}
+
+// GetByName returns the MCP with the specified name
+func GetByName(name string) (*machineconfigv1.MachineConfigPool, error) {
+ mcp := &machineconfigv1.MachineConfigPool{}
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: metav1.NamespaceNone,
+ }
+ err := testclient.GetWithRetry(context.TODO(), key, mcp)
+ return mcp, err
+}
+
+// GetByNameNoRetry returns the MCP with the specified name without retrying to poke
+// the api server
+func GetByNameNoRetry(name string) (*machineconfigv1.MachineConfigPool, error) {
+ mcp := &machineconfigv1.MachineConfigPool{}
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: metav1.NamespaceNone,
+ }
+ err := testclient.Client.Get(context.TODO(), key, mcp)
+ return mcp, err
+}
+
+// GetByProfile returns the MCP by a given performance profile
+func GetByProfile(performanceProfile *performancev2.PerformanceProfile) (string, error) {
+ mcpLabel := profile.GetMachineConfigLabel(performanceProfile)
+ key, value := components.GetFirstKeyAndValue(mcpLabel)
+ mcpsByLabel, err := GetByLabel(key, value)
+ if err != nil {
+ return "", err
+ }
+ performanceMCP := &mcpsByLabel[0]
+ return performanceMCP.Name, nil
+}
+
+// New creates a new MCP with the given name and node selector
+func New(mcpName string, nodeSelector map[string]string) *machineconfigv1.MachineConfigPool {
+ return &machineconfigv1.MachineConfigPool{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: mcpName,
+ Namespace: metav1.NamespaceNone,
+ Labels: map[string]string{components.MachineConfigRoleLabelKey: mcpName},
+ },
+ Spec: machineconfigv1.MachineConfigPoolSpec{
+ MachineConfigSelector: &metav1.LabelSelector{
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: components.MachineConfigRoleLabelKey,
+ Operator: "In",
+ Values: []string{"worker", mcpName},
+ },
+ },
+ },
+ NodeSelector: &metav1.LabelSelector{
+ MatchLabels: nodeSelector,
+ },
+ },
+ }
+}
+
+// GetConditionStatus return the condition status of the given MCP and condition type
+func GetConditionStatus(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType) corev1.ConditionStatus {
+ mcp, err := GetByNameNoRetry(mcpName)
+ if err != nil {
+ // In case of any error we just retry, as in case of single node cluster
+ // the only node may be rebooting
+ return corev1.ConditionUnknown
+ }
+ for _, condition := range mcp.Status.Conditions {
+ if condition.Type == conditionType {
+ return condition.Status
+ }
+ }
+ return corev1.ConditionUnknown
+}
+
+// GetConditionReason return the reason of the given MCP
+func GetConditionReason(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType) string {
+ mcp, err := GetByName(mcpName)
+ ExpectWithOffset(1, err).ToNot(HaveOccurred(), "Failed getting MCP %q by name", mcpName)
+ for _, condition := range mcp.Status.Conditions {
+ if condition.Type == conditionType {
+ return condition.Reason
+ }
+ }
+ return ""
+}
+
+// WaitForCondition waits for the MCP with given name having a condition of given type with given status
+func WaitForCondition(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType, conditionStatus corev1.ConditionStatus) {
+
+ var cnfNodes []corev1.Node
+ runningOnSingleNode, err := cluster.IsSingleNode()
+ ExpectWithOffset(1, err).ToNot(HaveOccurred())
+ // checking in eventually as in case of single node cluster the only node may
+ // be rebooting
+ EventuallyWithOffset(1, func() error {
+ mcp, err := GetByName(mcpName)
+ if err != nil {
+ return errors.Wrap(err, "Failed getting MCP by name")
+ }
+
+ nodeLabels := mcp.Spec.NodeSelector.MatchLabels
+ key, _ := components.GetFirstKeyAndValue(nodeLabels)
+ req, err := labels.NewRequirement(key, selection.Exists, []string{})
+ if err != nil {
+ return errors.Wrap(err, "Failed creating node selector")
+ }
+
+ selector := labels.NewSelector()
+ selector = selector.Add(*req)
+ cnfNodes, err = nodes.GetBySelector(selector)
+ if err != nil {
+ return errors.Wrap(err, "Failed getting nodes by selector")
+ }
+
+ testlog.Infof("MCP %q is targeting %v node(s)", mcp.Name, len(cnfNodes))
+ return nil
+ }, cluster.ComputeTestTimeout(10*time.Minute, runningOnSingleNode), 5*time.Second).ShouldNot(HaveOccurred(), "Failed to find CNF nodes by MCP %q", mcpName)
+
+ // timeout should be based on the number of worker-cnf nodes
+ timeout := time.Duration(len(cnfNodes)*mcpUpdateTimeoutPerNode) * time.Minute
+ if len(cnfNodes) == 0 {
+ timeout = 2 * time.Minute
+ }
+
+ EventuallyWithOffset(1, func() corev1.ConditionStatus {
+ return GetConditionStatus(mcpName, conditionType)
+ }, cluster.ComputeTestTimeout(timeout, runningOnSingleNode), 30*time.Second).Should(Equal(conditionStatus), "Failed to find condition status by MCP %q", mcpName)
+}
+
+// WaitForProfilePickedUp waits for the MCP with given name containing the MC created for the PerformanceProfile with the given name
+func WaitForProfilePickedUp(mcpName string, profile *performancev2.PerformanceProfile) {
+ runningOnSingleNode, err := cluster.IsSingleNode()
+ ExpectWithOffset(1, err).ToNot(HaveOccurred())
+ testlog.Infof("Waiting for profile %s to be picked up by the %s machine config pool", profile.Name, mcpName)
+ defer testlog.Infof("Profile %s picked up by the %s machine config pool", profile.Name, mcpName)
+ EventuallyWithOffset(1, func() bool {
+ mcp, err := GetByName(mcpName)
+ // we ignore the error and just retry in case of single node cluster
+ if err != nil {
+ return false
+ }
+ for _, source := range mcp.Spec.Configuration.Source {
+ if source.Name == machineconfig.GetMachineConfigName(profile) {
+ return true
+ }
+ }
+ return false
+ }, cluster.ComputeTestTimeout(10*time.Minute, runningOnSingleNode), 30*time.Second).Should(BeTrue(), "PerformanceProfile's %q MC was not picked up by MCP %q in time", profile.Name, mcpName)
+}
+
+func Delete(name string) error {
+ mcp := &machineconfigv1.MachineConfigPool{}
+ if err := testclient.Client.Get(context.TODO(), types.NamespacedName{Name: name}, mcp); err != nil {
+ if apierrors.IsNotFound(err) {
+ return nil
+ }
+ return err
+ }
+
+ if err := testclient.Client.Delete(context.TODO(), mcp); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/test/e2e/pao/functests/utils/namespaces/namespaces.go b/test/e2e/pao/functests/utils/namespaces/namespaces.go
new file mode 100644
index 000000000..46995a85c
--- /dev/null
+++ b/test/e2e/pao/functests/utils/namespaces/namespaces.go
@@ -0,0 +1,49 @@
+package namespaces
+
+import (
+ "context"
+ "os"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+)
+
+// PerformanceOperator contains the name of the performance operator namespace
+// default as recommended in
+// https://docs.openshift.com/container-platform/4.6/scalability_and_performance/cnf-performance-addon-operator-for-low-latency-nodes.html#install-operator-cli_cnf-master
+var PerformanceOperator string = "openshift-cluster-node-tuning-operator"
+
+func init() {
+ if operatorNS, ok := os.LookupEnv("PERFORMANCE_OPERATOR_NAMESPACE"); ok {
+ PerformanceOperator = operatorNS
+ }
+}
+
+// TestingNamespace is the namespace the tests will use for running test pods
+var TestingNamespace = &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: testutils.NamespaceTesting,
+ },
+}
+
+// WaitForDeletion waits until the namespace will be removed from the cluster
+func WaitForDeletion(name string, timeout time.Duration) error {
+ key := types.NamespacedName{
+ Name: name,
+ Namespace: metav1.NamespaceNone,
+ }
+ return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
+ ns := &corev1.Namespace{}
+ if err := testclient.Client.Get(context.TODO(), key, ns); errors.IsNotFound(err) {
+ return true, nil
+ }
+ return false, nil
+ })
+}
diff --git a/test/e2e/pao/functests/utils/nodes/nodes.go b/test/e2e/pao/functests/utils/nodes/nodes.go
new file mode 100644
index 000000000..8b140babc
--- /dev/null
+++ b/test/e2e/pao/functests/utils/nodes/nodes.go
@@ -0,0 +1,342 @@
+package nodes
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ . "github.com/onsi/gomega"
+
+ "github.com/ghodss/yaml"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/types"
+ kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
+ "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ testpods "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods"
+)
+
+const (
+ testTimeout = 480
+ testPollInterval = 2
+)
+
+const (
+ sysDevicesOnlineCPUs = "/sys/devices/system/cpu/online"
+)
+
+// NumaNodes defines cpus in each numa node
+type NumaNodes struct {
+ Cpus []NodeCPU `json:"cpus"`
+}
+
+// NodeCPU Structure
+type NodeCPU struct {
+ CPU string `json:"cpu"`
+ Node string `json:"node"`
+}
+
+// GetByRole returns all nodes with the specified role
+func GetByRole(role string) ([]corev1.Node, error) {
+ selector, err := labels.Parse(fmt.Sprintf("%s/%s=", testutils.LabelRole, role))
+ if err != nil {
+ return nil, err
+ }
+ return GetBySelector(selector)
+}
+
+// GetBySelector returns all nodes with the specified selector
+func GetBySelector(selector labels.Selector) ([]corev1.Node, error) {
+ nodes := &corev1.NodeList{}
+ if err := testclient.Client.List(context.TODO(), nodes, &client.ListOptions{LabelSelector: selector}); err != nil {
+ return nil, err
+ }
+ return nodes.Items, nil
+}
+
+// GetByLabels returns all nodes with the specified labels
+func GetByLabels(nodeLabels map[string]string) ([]corev1.Node, error) {
+ selector := labels.SelectorFromSet(nodeLabels)
+ return GetBySelector(selector)
+}
+
+// GetByName returns a node object by for a node name
+func GetByName(nodeName string) (*corev1.Node, error) {
+ node := &corev1.Node{}
+ key := types.NamespacedName{
+ Name: nodeName,
+ }
+ if err := testclient.Client.Get(context.TODO(), key, node); err != nil {
+ return nil, fmt.Errorf("failed to get node for the node %q", node.Name)
+ }
+ return node, nil
+}
+
+// GetNonPerformancesWorkers returns list of nodes with non matching perfomance profile labels
+func GetNonPerformancesWorkers(nodeSelectorLabels map[string]string) ([]corev1.Node, error) {
+ nonPerformanceWorkerNodes := []corev1.Node{}
+ workerNodes, err := GetByRole(testutils.RoleWorker)
+ for _, node := range workerNodes {
+ for label := range nodeSelectorLabels {
+ if _, ok := node.Labels[label]; !ok {
+ nonPerformanceWorkerNodes = append(nonPerformanceWorkerNodes, node)
+ break
+ }
+ }
+ }
+ return nonPerformanceWorkerNodes, err
+}
+
+// GetMachineConfigDaemonByNode returns the machine-config-daemon pod that runs on the specified node
+func GetMachineConfigDaemonByNode(node *corev1.Node) (*corev1.Pod, error) {
+ listOptions := &client.ListOptions{
+ Namespace: testutils.NamespaceMachineConfigOperator,
+ FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}),
+ LabelSelector: labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}),
+ }
+
+ mcds := &corev1.PodList{}
+ if err := testclient.Client.List(context.TODO(), mcds, listOptions); err != nil {
+ return nil, err
+ }
+
+ if len(mcds.Items) < 1 {
+ return nil, fmt.Errorf("failed to get machine-config-daemon pod for the node %q", node.Name)
+ }
+ return &mcds.Items[0], nil
+}
+
+// ExecCommandOnMachineConfigDaemon returns the output of the command execution on the machine-config-daemon pod that runs on the specified node
+func ExecCommandOnMachineConfigDaemon(node *corev1.Node, command []string) ([]byte, error) {
+ mcd, err := GetMachineConfigDaemonByNode(node)
+ if err != nil {
+ return nil, err
+ }
+ testlog.Infof("found mcd %s for node %s", mcd.Name, node.Name)
+
+ return testpods.WaitForPodOutput(testclient.K8sClient, mcd, command)
+}
+
+// ExecCommandOnNode executes given command on given node and returns the result
+func ExecCommandOnNode(cmd []string, node *corev1.Node) (string, error) {
+ out, err := ExecCommandOnMachineConfigDaemon(node, cmd)
+ if err != nil {
+ return "", err
+ }
+
+ trimmedString := strings.Trim(string(out), "\n")
+ return strings.ReplaceAll(trimmedString, "\r", ""), nil
+}
+
+// GetKubeletConfig returns KubeletConfiguration loaded from the node /etc/kubernetes/kubelet.conf
+func GetKubeletConfig(node *corev1.Node) (*kubeletconfigv1beta1.KubeletConfiguration, error) {
+ command := []string{"cat", path.Join("/rootfs", testutils.FilePathKubeletConfig)}
+ kubeletBytes, err := ExecCommandOnMachineConfigDaemon(node, command)
+ if err != nil {
+ return nil, err
+ }
+
+ testlog.Infof("command output: %s", string(kubeletBytes))
+ kubeletConfig := &kubeletconfigv1beta1.KubeletConfiguration{}
+ if err := yaml.Unmarshal(kubeletBytes, kubeletConfig); err != nil {
+ return nil, err
+ }
+ return kubeletConfig, err
+}
+
+// MatchingOptionalSelector filter the given slice with only the nodes matching the optional selector.
+// If no selector is set, it returns the same list.
+// The NODES_SELECTOR must be set with a labelselector expression.
+// For example: NODES_SELECTOR="sctp=true"
+// Inspired from: https://github.com/fedepaol/sriov-network-operator/blob/master/test/util/nodes/nodes.go
+func MatchingOptionalSelector(toFilter []corev1.Node) ([]corev1.Node, error) {
+ if testutils.NodesSelector == "" {
+ return toFilter, nil
+ }
+
+ selector, err := labels.Parse(testutils.NodesSelector)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing the %s label selector, %v", testutils.NodesSelector, err)
+ }
+
+ toMatch, err := GetBySelector(selector)
+ if err != nil {
+ return nil, fmt.Errorf("Error in getting nodes matching the %s label selector, %v", testutils.NodesSelector, err)
+ }
+ if len(toMatch) == 0 {
+ return nil, fmt.Errorf("Failed to get nodes matching %s label selector", testutils.NodesSelector)
+ }
+
+ res := make([]corev1.Node, 0)
+ for _, n := range toFilter {
+ for _, m := range toMatch {
+ if n.Name == m.Name {
+ res = append(res, n)
+ break
+ }
+ }
+ }
+
+ return res, nil
+}
+
+// HasPreemptRTKernel returns no error if the node booted with PREEMPT RT kernel
+func HasPreemptRTKernel(node *corev1.Node) error {
+ // verify that the kernel-rt-core installed it also means the the machine booted with the RT kernel
+ // because the machine-config-daemon uninstalls regular kernel once you install the RT one and
+ // on traditional yum systems, rpm -q kernel can be completely different from what you're booted
+ // because yum keeps multiple kernels but only one userspace;
+ // with rpm-ostree rpm -q is telling you what you're booted into always,
+ // because ostree binds together (kernel, userspace) as a single commit.
+ cmd := []string{"chroot", "/rootfs", "rpm", "-q", "kernel-rt-core"}
+ if _, err := ExecCommandOnNode(cmd, node); err != nil {
+ return err
+ }
+
+ cmd = []string{"/bin/bash", "-c", "cat /rootfs/sys/kernel/realtime"}
+ out, err := ExecCommandOnNode(cmd, node)
+ if err != nil {
+ return err
+ }
+
+ if out != "1" {
+ return fmt.Errorf("RT kernel disabled")
+ }
+
+ return nil
+}
+
+func BannedCPUs(node corev1.Node) (banned cpuset.CPUSet, err error) {
+ cmd := []string{"sed", "-n", "s/^IRQBALANCE_BANNED_CPUS=\\(.*\\)/\\1/p", "/rootfs/etc/sysconfig/irqbalance"}
+ bannedCPUs, err := ExecCommandOnNode(cmd, &node)
+ if err != nil {
+ return cpuset.NewCPUSet(), fmt.Errorf("failed to execute %v: %v", cmd, err)
+ }
+
+ if bannedCPUs == "" {
+ testlog.Infof("Banned CPUs on node %q returned empty set", node.Name)
+ return cpuset.NewCPUSet(), nil // TODO: should this be a error?
+ }
+
+ banned, err = components.CPUMaskToCPUSet(bannedCPUs)
+ if err != nil {
+ return cpuset.NewCPUSet(), fmt.Errorf("failed to parse the banned CPUs: %v", err)
+ }
+
+ return banned, nil
+}
+
+// GetDefaultSmpAffinitySet returns the default smp affinity mask for the node
+func GetDefaultSmpAffinitySet(node *corev1.Node) (cpuset.CPUSet, error) {
+ command := []string{"cat", "/proc/irq/default_smp_affinity"}
+ defaultSmpAffinity, err := ExecCommandOnNode(command, node)
+ if err != nil {
+ return cpuset.NewCPUSet(), err
+ }
+ return components.CPUMaskToCPUSet(defaultSmpAffinity)
+}
+
+// GetOnlineCPUsSet returns the list of online (being scheduled) CPUs on the node
+func GetOnlineCPUsSet(node *corev1.Node) (cpuset.CPUSet, error) {
+ command := []string{"cat", sysDevicesOnlineCPUs}
+ onlineCPUs, err := ExecCommandOnNode(command, node)
+ if err != nil {
+ return cpuset.NewCPUSet(), err
+ }
+ return cpuset.Parse(onlineCPUs)
+}
+
+// GetSMTLevel returns the SMT level on the node using the given cpuID as target
+// Use a random cpuID from the return value of GetOnlineCPUsSet if not sure
+func GetSMTLevel(cpuID int, node *corev1.Node) int {
+ cmd := []string{"/bin/sh", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/topology/thread_siblings_list | tr -d \"\n\r\"", cpuID)}
+ threadSiblingsList, err := ExecCommandOnNode(cmd, node)
+ ExpectWithOffset(1, err).ToNot(HaveOccurred())
+ // how many thread sibling you have = SMT level
+ // example: 2-way SMT means 2 threads sibling for each thread
+ cpus, err := cpuset.Parse(strings.TrimSpace(string(threadSiblingsList)))
+ ExpectWithOffset(1, err).ToNot(HaveOccurred())
+ return cpus.Size()
+}
+
+// GetNumaNodes returns the number of numa nodes and the associated cpus as list on the node
+func GetNumaNodes(node *corev1.Node) (map[int][]int, error) {
+ lscpuCmd := []string{"lscpu", "-e=cpu,node", "-J"}
+ cmdout, err := ExecCommandOnNode(lscpuCmd, node)
+ var numaNode, cpu int
+ if err != nil {
+ return nil, err
+ }
+ numaCpus := make(map[int][]int)
+ var result NumaNodes
+ err = json.Unmarshal([]byte(cmdout), &result)
+ if err != nil {
+ return nil, err
+ }
+ for _, value := range result.Cpus {
+ if numaNode, err = strconv.Atoi(value.Node); err != nil {
+ break
+ }
+ if cpu, err = strconv.Atoi(value.CPU); err != nil {
+ break
+ }
+ numaCpus[numaNode] = append(numaCpus[numaNode], cpu)
+ }
+ return numaCpus, err
+}
+
+//TunedForNode find tuned pod for appropriate node
+func TunedForNode(node *corev1.Node, sno bool) *corev1.Pod {
+
+ listOptions := &client.ListOptions{
+ Namespace: components.NamespaceNodeTuningOperator,
+ FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}),
+ LabelSelector: labels.SelectorFromSet(labels.Set{"openshift-app": "tuned"}),
+ }
+
+ tunedList := &corev1.PodList{}
+ Eventually(func() bool {
+ if err := testclient.Client.List(context.TODO(), tunedList, listOptions); err != nil {
+ return false
+ }
+
+ if len(tunedList.Items) == 0 {
+ return false
+ }
+ for _, s := range tunedList.Items[0].Status.ContainerStatuses {
+ if s.Ready == false {
+ return false
+ }
+ }
+ return true
+
+ }, cluster.ComputeTestTimeout(testTimeout*time.Second, sno), testPollInterval*time.Second).Should(BeTrue(),
+ "there should be one tuned daemon per node")
+
+ return &tunedList.Items[0]
+}
+
+func GetByCpuAllocatable(nodesList []corev1.Node, cpuQty int) []corev1.Node {
+ nodesWithSufficientCpu := []corev1.Node{}
+ for _, node := range nodesList {
+ allocatableCPU, _ := node.Status.Allocatable.Cpu().AsInt64()
+ if allocatableCPU >= int64(cpuQty) {
+ nodesWithSufficientCpu = append(nodesWithSufficientCpu, node)
+ }
+ }
+ return nodesWithSufficientCpu
+}
diff --git a/test/e2e/pao/functests/utils/pods/pods.go b/test/e2e/pao/functests/utils/pods/pods.go
new file mode 100644
index 000000000..a813c823f
--- /dev/null
+++ b/test/e2e/pao/functests/utils/pods/pods.go
@@ -0,0 +1,219 @@
+package pods
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "sigs.k8s.io/controller-runtime/pkg/client/config"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/tools/remotecommand"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images"
+)
+
+// DefaultDeletionTimeout contains the default pod deletion timeout in seconds
+const DefaultDeletionTimeout = 120
+
+// GetTestPod returns pod with the busybox image
+func GetTestPod() *corev1.Pod {
+ return &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "test-",
+ Labels: map[string]string{
+ "test": "",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "test",
+ Image: images.Test(),
+ Command: []string{"sleep", "10h"},
+ },
+ },
+ },
+ }
+}
+
+// WaitForDeletion waits until the pod will be removed from the cluster
+func WaitForDeletion(pod *corev1.Pod, timeout time.Duration) error {
+ key := types.NamespacedName{
+ Name: pod.Name,
+ Namespace: pod.Namespace,
+ }
+ return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
+ pod := &corev1.Pod{}
+ if err := testclient.Client.Get(context.TODO(), key, pod); errors.IsNotFound(err) {
+ return true, nil
+ }
+ return false, nil
+ })
+}
+
+// WaitForCondition waits until the pod will have specified condition type with the expected status
+func WaitForCondition(pod *corev1.Pod, conditionType corev1.PodConditionType, conditionStatus corev1.ConditionStatus, timeout time.Duration) error {
+ key := types.NamespacedName{
+ Name: pod.Name,
+ Namespace: pod.Namespace,
+ }
+ return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
+ updatedPod := &corev1.Pod{}
+ if err := testclient.Client.Get(context.TODO(), key, updatedPod); err != nil {
+ return false, nil
+ }
+
+ for _, c := range updatedPod.Status.Conditions {
+ if c.Type == conditionType && c.Status == conditionStatus {
+ return true, nil
+ }
+ }
+ return false, nil
+ })
+}
+
+// WaitForPredicate waits until the given predicate against the pod returns true or error.
+func WaitForPredicate(pod *corev1.Pod, timeout time.Duration, pred func(pod *corev1.Pod) (bool, error)) error {
+ return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
+ updatedPod := &corev1.Pod{}
+ if err := testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(pod), updatedPod); err != nil {
+ return false, nil
+ }
+
+ ret, err := pred(updatedPod)
+ if err != nil {
+ return false, err
+ }
+ return ret, nil
+ })
+}
+
+// WaitForPhase waits until the pod will have specified phase
+func WaitForPhase(pod *corev1.Pod, phase corev1.PodPhase, timeout time.Duration) error {
+ key := types.NamespacedName{
+ Name: pod.Name,
+ Namespace: pod.Namespace,
+ }
+ return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
+ updatedPod := &corev1.Pod{}
+ if err := testclient.Client.Get(context.TODO(), key, updatedPod); err != nil {
+ return false, nil
+ }
+
+ if updatedPod.Status.Phase == phase {
+ return true, nil
+ }
+
+ return false, nil
+ })
+}
+
+// GetLogs returns logs of the specified pod
+func GetLogs(c *kubernetes.Clientset, pod *corev1.Pod) (string, error) {
+ logStream, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{}).Stream(context.TODO())
+ if err != nil {
+ return "", err
+ }
+ defer logStream.Close()
+
+ buf := new(bytes.Buffer)
+ if _, err := io.Copy(buf, logStream); err != nil {
+ return "", err
+ }
+
+ return buf.String(), nil
+}
+
+// ExecCommandOnPod runs command in the pod and returns buffer output
+func ExecCommandOnPod(c *kubernetes.Clientset, pod *corev1.Pod, command []string) ([]byte, error) {
+ var outputBuf bytes.Buffer
+ var errorBuf bytes.Buffer
+
+ req := c.CoreV1().RESTClient().
+ Post().
+ Namespace(pod.Namespace).
+ Resource("pods").
+ Name(pod.Name).
+ SubResource("exec").
+ VersionedParams(&corev1.PodExecOptions{
+ Container: pod.Spec.Containers[0].Name,
+ Command: command,
+ Stdin: true,
+ Stdout: true,
+ Stderr: true,
+ TTY: true,
+ }, scheme.ParameterCodec)
+
+ cfg, err := config.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ exec, err := remotecommand.NewSPDYExecutor(cfg, "POST", req.URL())
+ if err != nil {
+ return nil, err
+ }
+
+ err = exec.Stream(remotecommand.StreamOptions{
+ Stdin: os.Stdin,
+ Stdout: &outputBuf,
+ Stderr: &errorBuf,
+ Tty: true,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to run command %v: output %s; error %s", command, outputBuf.String(), errorBuf.String())
+ }
+
+ if errorBuf.Len() != 0 {
+ return nil, fmt.Errorf("failed to run command %v: output %s; error %s", command, outputBuf.String(), errorBuf.String())
+ }
+
+ return outputBuf.Bytes(), nil
+}
+
+func WaitForPodOutput(c *kubernetes.Clientset, pod *corev1.Pod, command []string) ([]byte, error) {
+ var out []byte
+ if err := wait.PollImmediate(15*time.Second, time.Minute, func() (done bool, err error) {
+ out, err = ExecCommandOnPod(c, pod, command)
+ if err != nil {
+ return false, err
+ }
+
+ return len(out) != 0, nil
+ }); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
+
+// GetContainerIDByName returns container ID under the pod by the container name
+func GetContainerIDByName(pod *corev1.Pod, containerName string) (string, error) {
+ updatedPod := &corev1.Pod{}
+ key := types.NamespacedName{
+ Name: pod.Name,
+ Namespace: pod.Namespace,
+ }
+ if err := testclient.Client.Get(context.TODO(), key, updatedPod); err != nil {
+ return "", err
+ }
+ for _, containerStatus := range updatedPod.Status.ContainerStatuses {
+ if containerStatus.Name == containerName {
+ return strings.Trim(containerStatus.ContainerID, "cri-o://"), nil
+ }
+ }
+ return "", fmt.Errorf("failed to find the container ID for the container %q under the pod %q", containerName, pod.Name)
+}
diff --git a/test/e2e/pao/functests/utils/profiles/profiles.go b/test/e2e/pao/functests/utils/profiles/profiles.go
new file mode 100644
index 000000000..7915dee7e
--- /dev/null
+++ b/test/e2e/pao/functests/utils/profiles/profiles.go
@@ -0,0 +1,147 @@
+package profiles
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "time"
+
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+
+ performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+ v1 "github.com/openshift/custom-resource-status/conditions/v1"
+)
+
+// GetByNodeLabels gets the performance profile that must have node selector equals to passed node labels
+func GetByNodeLabels(nodeLabels map[string]string) (*performancev2.PerformanceProfile, error) {
+ profiles, err := All()
+ if err != nil {
+ return nil, err
+ }
+
+ var result *performancev2.PerformanceProfile
+ for i := 0; i < len(profiles.Items); i++ {
+ if reflect.DeepEqual(profiles.Items[i].Spec.NodeSelector, nodeLabels) {
+ if result != nil {
+ return nil, fmt.Errorf("found more than one performance profile with specified node selector %v", nodeLabels)
+ }
+ result = &profiles.Items[i]
+ }
+ }
+
+ if result == nil {
+ return nil, fmt.Errorf("failed to find performance profile with specified node selector %v", nodeLabels)
+ }
+
+ return result, nil
+}
+
+// WaitForDeletion waits until the pod will be removed from the cluster
+func WaitForDeletion(profileKey types.NamespacedName, timeout time.Duration) error {
+ return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
+ prof := &performancev2.PerformanceProfile{}
+ if err := testclient.Client.Get(context.TODO(), profileKey, prof); errors.IsNotFound(err) {
+ return true, nil
+ }
+ return false, nil
+ })
+}
+
+// GetCondition the performance profile condition for the given type
+func GetCondition(nodeLabels map[string]string, conditionType v1.ConditionType) *v1.Condition {
+ profile, err := GetByNodeLabels(nodeLabels)
+ ExpectWithOffset(1, err).ToNot(HaveOccurred(), "Failed getting profile by nodelabels %v", nodeLabels)
+ for _, condition := range profile.Status.Conditions {
+ if condition.Type == conditionType {
+ return &condition
+ }
+ }
+ return nil
+}
+
+// GetConditionMessage gets the performance profile message for the given type
+func GetConditionMessage(nodeLabels map[string]string, conditionType v1.ConditionType) string {
+ cond := GetCondition(nodeLabels, conditionType)
+ if cond != nil {
+ return cond.Message
+ }
+ return ""
+}
+
+func GetConditionWithStatus(nodeLabels map[string]string, conditionType v1.ConditionType) *v1.Condition {
+ var cond *v1.Condition
+ EventuallyWithOffset(1, func() bool {
+ cond = GetCondition(nodeLabels, conditionType)
+ if cond == nil {
+ return false
+ }
+ return cond.Status == corev1.ConditionTrue
+ }, 30, 5).Should(BeTrue(), "condition %q not matched: %#v", conditionType, cond)
+ return cond
+}
+
+// All gets all the exiting profiles in the cluster
+func All() (*performancev2.PerformanceProfileList, error) {
+ profiles := &performancev2.PerformanceProfileList{}
+ if err := testclient.Client.List(context.TODO(), profiles); err != nil {
+ return nil, err
+ }
+ return profiles, nil
+}
+
+func UpdateWithRetry(profile *performancev2.PerformanceProfile) {
+ EventuallyWithOffset(1, func() error {
+ updatedProfile := &performancev2.PerformanceProfile{}
+ key := types.NamespacedName{
+ Name: profile.Name,
+ Namespace: profile.Namespace,
+ }
+ // We should get the updated version of the performance profile.
+ // Otherwise, we will always try to update the profile with the old resource version
+ // and will always get the conflict error
+ if err := testclient.Client.Get(context.TODO(), key, updatedProfile); err != nil {
+ return err
+ }
+
+ updatedProfile.Spec = *profile.Spec.DeepCopy()
+ if err := testclient.Client.Update(context.TODO(), profile); err != nil {
+ if !errors.IsConflict(err) {
+ testlog.Errorf("failed to update the profile %q: %v", profile.Name, err)
+ }
+
+ return err
+ }
+
+ return nil
+ }, time.Minute, 5*time.Second).Should(BeNil())
+}
+
+func WaitForCondition(nodeLabels map[string]string, conditionType v1.ConditionType, conditionStatus corev1.ConditionStatus) {
+ EventuallyWithOffset(1, func() corev1.ConditionStatus {
+ return (GetCondition(nodeLabels, conditionType)).Status
+ }, 15*time.Minute, 30*time.Second).Should(Equal(conditionStatus), "Failed to met performance profile condition %v", conditionType)
+}
+
+// Delete delete the existing profile by name
+func Delete(name string) error {
+ profile := &performancev2.PerformanceProfile{}
+ if err := testclient.Client.Get(context.TODO(), types.NamespacedName{Name: name}, profile); err != nil {
+ if errors.IsNotFound(err) {
+ return nil
+ }
+ return err
+ }
+
+ if err := testclient.Client.Delete(context.TODO(), profile); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/test/e2e/pao/functests/utils/tuned/tuned.go b/test/e2e/pao/functests/utils/tuned/tuned.go
new file mode 100644
index 000000000..b13dd887f
--- /dev/null
+++ b/test/e2e/pao/functests/utils/tuned/tuned.go
@@ -0,0 +1,55 @@
+package tuned
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/klog"
+
+ "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components"
+ testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client"
+)
+
+func WaitForAppliedCondition(tunedProfileNames []string, conditionStatus corev1.ConditionStatus, timeout time.Duration) error {
+ return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
+ for _, tunedProfileName := range tunedProfileNames {
+ profile := &tunedv1.Profile{}
+ key := types.NamespacedName{
+ Name: tunedProfileName,
+ Namespace: components.NamespaceNodeTuningOperator,
+ }
+
+ if err := testclient.Client.Get(context.TODO(), key, profile); err != nil {
+ klog.Errorf("failed to get tuned profile %q: %v", tunedProfileName, err)
+ return false, nil
+ }
+
+ appliedCondition, err := GetConditionByType(profile.Status.Conditions, tunedv1.TunedProfileApplied)
+ if err != nil {
+ klog.Errorf("failed to get applied condition for profile %q: %v", tunedProfileName, err)
+ return false, nil
+ }
+
+ if appliedCondition.Status != conditionStatus {
+ return false, nil
+ }
+ }
+
+ return true, nil
+ })
+}
+
+func GetConditionByType(conditions []tunedv1.ProfileStatusCondition, conditionType tunedv1.ProfileConditionType) (*tunedv1.ProfileStatusCondition, error) {
+ for i := range conditions {
+ c := &conditions[i]
+ if c.Type == conditionType {
+ return c, nil
+ }
+ }
+ return nil, fmt.Errorf("failed to found applied condition under conditions %v", conditions)
+}
diff --git a/test/e2e/pao/functests/utils/utils.go b/test/e2e/pao/functests/utils/utils.go
new file mode 100644
index 000000000..ce6858e77
--- /dev/null
+++ b/test/e2e/pao/functests/utils/utils.go
@@ -0,0 +1,59 @@
+package utils
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ . "github.com/onsi/ginkgo"
+
+ testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log"
+)
+
+const defaultExecTimeout = 2 * time.Minute
+
+func BeforeAll(fn func()) {
+ first := true
+ BeforeEach(func() {
+ if first {
+ fn()
+ first = false
+ }
+ })
+}
+
+func ExecAndLogCommand(name string, arg ...string) ([]byte, error) {
+ outData, _, err := ExecAndLogCommandWithStderr(name, arg...)
+ return outData, err
+}
+
+func ExecAndLogCommandWithStderr(name string, arg ...string) ([]byte, []byte, error) {
+ // Create a new context and add a timeout to it
+ ctx, cancel := context.WithTimeout(context.Background(), defaultExecTimeout)
+ defer cancel() // The cancel should be deferred so resources are cleaned up
+
+ var stdout bytes.Buffer
+ var stderr bytes.Buffer
+ cmd := exec.CommandContext(ctx, name, arg...)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ err := cmd.Run()
+ outData := stdout.Bytes()
+ errData := stderr.Bytes()
+ testlog.Infof("run command '%s %v' (err=%v):\n stdout=%q\n stderr=%q", name, arg, err, outData, errData)
+
+ // We want to check the context error to see if the timeout was executed.
+ // The error returned by cmd.Output() will be OS specific based on what
+ // happens when a process is killed.
+ if ctx.Err() == context.DeadlineExceeded {
+ return nil, nil, fmt.Errorf("command '%s %v' failed because of the timeout", name, arg)
+ }
+
+ if _, ok := err.(*exec.ExitError); ok {
+ testlog.Infof("run command '%s %v' (err=%v):\n stderr=%s", name, arg, err, string(errData))
+ }
+ return outData, errData, err
+}
diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master1.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master1.yaml
new file mode 100755
index 000000000..6b2569577
--- /dev/null
+++ b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master1.yaml
@@ -0,0 +1,456 @@
+---
+apiVersion: v1
+kind: Node
+metadata:
+ annotations:
+ machineconfiguration.openshift.io/currentConfig: rendered-master-e92b311a0208749dbba5c4458afcc653
+ machineconfiguration.openshift.io/desiredConfig: rendered-master-e92b311a0208749dbba5c4458afcc653
+ machineconfiguration.openshift.io/reason: ""
+ machineconfiguration.openshift.io/state: Done
+ volumes.kubernetes.io/controller-managed-attach-detach: "true"
+ creationTimestamp: "2020-11-25T07:56:34Z"
+ finalizers:
+ - metal3.io/capbm
+ labels:
+ beta.kubernetes.io/arch: amd64
+ beta.kubernetes.io/os: linux
+ kubernetes.io/arch: amd64
+ kubernetes.io/hostname: master1
+ kubernetes.io/os: linux
+ node-role.kubernetes.io/master: ""
+ node-role.kubernetes.io/virtual: ""
+ node.openshift.io/os_id: rhcos
+ managedFields:
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:labels:
+ f:beta.kubernetes.io/arch: {}
+ f:beta.kubernetes.io/os: {}
+ f:spec:
+ f:podCIDR: {}
+ f:podCIDRs:
+ .: {}
+ v:"10.132.2.0/24": {}
+ f:taints: {}
+ manager: kube-controller-manager
+ operation: Update
+ time: "2020-11-25T07:59:04Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:machineconfiguration.openshift.io/currentConfig: {}
+ f:machineconfiguration.openshift.io/desiredConfig: {}
+ f:machineconfiguration.openshift.io/reason: {}
+ f:machineconfiguration.openshift.io/state: {}
+ manager: machine-config-daemon
+ operation: Update
+ time: "2020-11-25T08:01:08Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:machine.openshift.io/machine: {}
+ manager: nodelink-controller
+ operation: Update
+ time: "2020-11-25T08:14:20Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:finalizers:
+ .: {}
+ v:"metal3.io/capbm": {}
+ f:spec:
+ f:providerID: {}
+ manager: machine-controller-manager
+ operation: Update
+ time: "2020-11-25T08:14:22Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:labels:
+ f:node-role.kubernetes.io/virtual: {}
+ manager: kubectl-label
+ operation: Update
+ time: "2020-11-25T08:35:46Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:k8s.ovn.org/l3-gateway-config: {}
+ f:k8s.ovn.org/node-chassis-id: {}
+ f:k8s.ovn.org/node-join-subnets: {}
+ f:k8s.ovn.org/node-local-nat-ip: {}
+ f:k8s.ovn.org/node-mgmt-port-mac-address: {}
+ f:k8s.ovn.org/node-primary-ifaddr: {}
+ f:k8s.ovn.org/node-subnets: {}
+ manager: ovnkube
+ operation: Update
+ time: "2020-12-23T11:34:05Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ .: {}
+ f:volumes.kubernetes.io/controller-managed-attach-detach: {}
+ f:labels:
+ .: {}
+ f:kubernetes.io/arch: {}
+ f:kubernetes.io/hostname: {}
+ f:kubernetes.io/os: {}
+ f:node-role.kubernetes.io/master: {}
+ f:node.openshift.io/os_id: {}
+ f:status:
+ f:addresses:
+ .: {}
+ k:{"type":"Hostname"}:
+ .: {}
+ f:address: {}
+ f:type: {}
+ k:{"type":"InternalIP"}:
+ .: {}
+ f:address: {}
+ f:type: {}
+ f:allocatable:
+ .: {}
+ f:cpu: {}
+ f:ephemeral-storage: {}
+ f:hugepages-1Gi: {}
+ f:hugepages-2Mi: {}
+ f:memory: {}
+ f:pods: {}
+ f:capacity:
+ .: {}
+ f:cpu: {}
+ f:ephemeral-storage: {}
+ f:hugepages-1Gi: {}
+ f:hugepages-2Mi: {}
+ f:memory: {}
+ f:pods: {}
+ f:conditions:
+ .: {}
+ k:{"type":"DiskPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"MemoryPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"PIDPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"Ready"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ f:daemonEndpoints:
+ f:kubeletEndpoint:
+ f:Port: {}
+ f:images: {}
+ f:nodeInfo:
+ f:architecture: {}
+ f:bootID: {}
+ f:containerRuntimeVersion: {}
+ f:kernelVersion: {}
+ f:kubeProxyVersion: {}
+ f:kubeletVersion: {}
+ f:machineID: {}
+ f:operatingSystem: {}
+ f:osImage: {}
+ f:systemUUID: {}
+ manager: kubelet
+ operation: Update
+ time: "2021-02-19T00:31:02Z"
+ name: master1
+ resourceVersion: "38562668"
+ selfLink: /api/v1/nodes/master1
+ uid: 0feb1cf4-d396-4568-a79e-0fd7e771e966
+spec:
+ providerID: baremetalhost:///openshift-machine-api/cnfd1-master-1
+ taints:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+status:
+ addresses:
+ - address: master1
+ type: Hostname
+ allocatable:
+ cpu: 7500m
+ ephemeral-storage: "94993016264"
+ hugepages-1Gi: "0"
+ hugepages-2Mi: "0"
+ memory: 31782368Ki
+ pods: "250"
+ capacity:
+ cpu: "8"
+ ephemeral-storage: 101796Mi
+ hugepages-1Gi: "0"
+ hugepages-2Mi: "0"
+ memory: 32933344Ki
+ pods: "250"
+ conditions:
+ - lastHeartbeatTime: "2021-02-19T00:31:02Z"
+ lastTransitionTime: "2020-11-25T07:56:34Z"
+ message: kubelet has sufficient memory available
+ reason: KubeletHasSufficientMemory
+ status: "False"
+ type: MemoryPressure
+ - lastHeartbeatTime: "2021-02-19T00:31:02Z"
+ lastTransitionTime: "2020-11-25T07:56:34Z"
+ message: kubelet has no disk pressure
+ reason: KubeletHasNoDiskPressure
+ status: "False"
+ type: DiskPressure
+ - lastHeartbeatTime: "2021-02-19T00:31:02Z"
+ lastTransitionTime: "2020-11-25T07:56:34Z"
+ message: kubelet has sufficient PID available
+ reason: KubeletHasSufficientPID
+ status: "False"
+ type: PIDPressure
+ - lastHeartbeatTime: "2021-02-19T00:31:02Z"
+ lastTransitionTime: "2020-11-25T07:59:04Z"
+ message: kubelet is posting ready status
+ reason: KubeletReady
+ status: "True"
+ type: Ready
+ daemonEndpoints:
+ kubeletEndpoint:
+ Port: 10250
+ images:
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f786db417224d3b4a5456f0a545f2a53b31ee9cc0f559a5738a93154a6367d0
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 884336421
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f27a23cd9f23951711f8aa7d66d4a6a1fd68071fa98ac0d5077a160a5d05f922
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 774713580
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d849673f6cc38712f0add9d478a6326f1f6c2d3e739f6b81574a403dabba0bd3
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 687443805
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:29e3f55ba5be8cc2f8a431411fc75c8bf2f07a5b55f4ab9a81c603052c82c5dd
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 505930943
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4cffc88a97ba39c1a6a9ce45cf406bb29011a72766dc6f4deb0d76f7cd6eb02a
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 486536450
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e21960829179b702d31bb220f8b61b9715b8e0fd91d671b8615b0a8599cf1f0
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 478316539
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c8f0b9a2852b15b45487c08c158e10f3b803d7a77538d6dbc1d991994f58bcee
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 418066712
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:456dff747967eadbbfc6e9c53b180049bbba09e85cba7d77abe0e36bfc02817a
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 375119644
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3b5f75e0bb4f4e2e74aee6016030bfcce9cf71e52244f6fa689a673e619b35a4
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 372122608
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:326516b79a528dc627e5a5d84c986fd35e5f8ff5cbd74ff0ef802473efccd285
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 342541880
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a02341afe177329a39e49836e4c49d194affc8c4754fb360f0f760806f3bc2f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 341937980
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a95399379b48916216c49b1e4d4786b19081378ccd5821b27d97b496edd56a86
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 341611087
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b9d4f93a8f4c88792d7040fa3c0572150197cee01bbe97595068a778f99e5a1
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 341519660
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e626fa44d64c4b9bf6dc66fafa7fa4e640eaeb15359d2f40bb0772c351b4dab5
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 340736830
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bb2bd6e3755a5523c0ed2d27f159218501dac1c4978e9bf37de475caa7eb9279
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 340548837
+ - names:
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:3a9e0e5bccf522e7e9537bf626dd01f9894228b7a16573d209bf4856798e8e57
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:
+ sizeBytes: 339726486
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7ea1a439cf30c216e0c201ceb5e6b51baf200e4df8353d8274449d682f5c82bc
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 339650807
+ - names:
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:63c162756ed6b5e67daafbd34f636ca461a18ea12f1352ae6172d27c9c95aff8
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:
+ sizeBytes: 339116800
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c052581031d9cb44b7e5a571db1cea25854733a977584a67718100cac56e2160
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 338045804
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c02fd4013a52b3d3047ae566f4e7e50c82c1087cb3acc59945cd01d718235e94
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 337522552
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a501d66461b0f24d2d551ec141617577ed417fdc4bc69db39f919e4e668a2889
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 331223794
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:02656bc5f5d78919453f86fa63c6531c6e0f1bbb4f3abcc662b6726b5619acec
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 330508411
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:759062d6a339a6915d499fc875f365cc1d3e52ededb6249ac19047b98dac9771
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 326516257
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98e66fae1973761fe4e11262a548f57495cea9db5279fb74be19e7debce21ada
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 324987061
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a9e1f154dc3826cac4cffe8b6a0b5b7b3e4630f50e87cc93a8ff18d72917242e
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 323395352
+ - names:
+ - registry.svc.ci.openshift.org/ocp/release@sha256:6681fc3f83dda0856b43cecd25f2d226c3f90e8a42c7144dbc499f6ee0a086fc
+ - registry.svc.ci.openshift.org/ocp/release@sha256:
+ sizeBytes: 322031372
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f6746db8ee59600c8c3936d035aa30ad81890de42814ec0fafd12809a0c8eb39
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 321353407
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:15d31443dbc6830af67840c6a199e3b93b03168d4d993e453bbacde702d4c25e
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 320374187
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:12531b40785d46fde636bedbe93f549c7a9bd5eab146468927ae8347fb9e4aac
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 320369930
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8cecd313f1a3ae30002be126597012302b54f2ae7d89b96c8bccc3eca2a06422
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 320158358
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cad230fbee655fa6a021a06d0a7e0888f7fec60127e467b18ec6ba93bcfc1d98
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 319394632
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b93d895f9b0733c924651a7f2ab3d0bb3854f4202eb55cb2086f13a4ce7aae84
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 318520120
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6aece72b8448aaf5533f64674acbddf8b51d21355336807e85e59f3bac25d3e7
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 317658369
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc542a6eba6c4f4660d1575b30c35bb567a1778cce74475e64ed433721774b10
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 317575019
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9a2d75eb606e8cbf2fa0d203bfbc92e3db822286357c46d039ba74080c2dc08f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 317484263
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6004374d28dc981473c392e52ff4d9d8ea1a753d560c6a2876f0aa84522f310c
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 317310170
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:60ff0a413ba64ee38c13f13902071fc7306f24eb46edcacc8778507cf78f15ef
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 317018973
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fa21f4b6288e4665090901e4904f12b0eae1a23d24fefaa06ba951b2b4ce017f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 316357508
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:04b8d9018fdffac86149bdc49dcf68bc4bbd58ab784dffef1d6d4bb33b901fb3
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 311142601
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8cc28a3ed35c28e3184f33e3a7e8f4755af57ea1c321b3874d18acba711a8104
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 309676114
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54e61ec737612af1eb38c01fb7829dcba44f4de3cbcb52f029a95c73b9c2d7fb
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 305778268
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7cdb835bfea7798987ac6db71bdc8b9f771cc4bfff1e56fa51369667161b7e7c
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 305316771
+ - names:
+ - quay.io/openshift/origin-sriov-network-device-plugin@sha256:cb260fd8bd6914e52c3f2899be14990cab15afdd3620194b68013bea7b0e2826
+ - quay.io/openshift/origin-sriov-network-device-plugin@sha256:
+ sizeBytes: 305304768
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0bbfa15a10b4308f07490670fb94203a5f63d9ad6b40e55b3bb959e9bf731d2
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 305027413
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7211fbc109efa51e20b4be2f5f327e00127076423ef384bde250d909da95257f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 304947009
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d8bdb0125b1287565b5464a6d6181fd9fe706641efd68063bdb317270bd11887
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 304593198
+ - names:
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:5981ec1e4592b082b0a3e20b95da65863505b602867b0550772bd8f28e1cfd10
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:
+ sizeBytes: 297518232
+ - names:
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:57a58e1b2d8d3bd34555375f8f06b805745010f77854fada89a8219ad0237635
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:
+ sizeBytes: 295059596
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1855db32ca2d846c9ad9af104d2e27ffa41b1054af031ac3d19e412c330fc66e
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 278561358
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f80375a7ad29fb23de302b0e82ae460580681d1805829c214bad13e84d94b784
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 275648050
+ nodeInfo:
+ architecture: amd64
+ bootID: 49325cb9-9cdd-49cf-94c4-ef3e9e44a6bc
+ containerRuntimeVersion: cri-o://1.19.0-22.rhaos4.6.gitc0306f1.el8
+ kernelVersion: 4.18.0-193.29.1.el8_2.x86_64
+ kubeProxyVersion: v1.19.0+9f84db3
+ kubeletVersion: v1.19.0+9f84db3
+ machineID: 0d99e882a90649948d3a34973a6a2a50
+ operatingSystem: linux
+ osImage: Red Hat Enterprise Linux CoreOS 46.82.202011061621-0 (Ootpa)
+ systemUUID: 0d99e882-a906-4994-8d3a-34973a6a2a50
diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master2.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master2.yaml
new file mode 100755
index 000000000..64ecda005
--- /dev/null
+++ b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master2.yaml
@@ -0,0 +1,457 @@
+---
+apiVersion: v1
+kind: Node
+metadata:
+ annotations:
+ machine.openshift.io/machine: master2
+ machineconfiguration.openshift.io/currentConfig: rendered-master-e92b311a0208749dbba5c4458afcc653
+ machineconfiguration.openshift.io/desiredConfig: rendered-master-e92b311a0208749dbba5c4458afcc653
+ machineconfiguration.openshift.io/reason: ""
+ machineconfiguration.openshift.io/state: Done
+ volumes.kubernetes.io/controller-managed-attach-detach: "true"
+ creationTimestamp: "2020-11-25T07:56:25Z"
+ finalizers:
+ - metal3.io/capbm
+ labels:
+ beta.kubernetes.io/arch: amd64
+ beta.kubernetes.io/os: linux
+ kubernetes.io/arch: amd64
+ kubernetes.io/hostname: master2
+ kubernetes.io/os: linux
+ node-role.kubernetes.io/master: ""
+ node-role.kubernetes.io/virtual: ""
+ node.openshift.io/os_id: rhcos
+ managedFields:
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:k8s.ovn.org/l3-gateway-config: {}
+ f:k8s.ovn.org/node-chassis-id: {}
+ f:k8s.ovn.org/node-join-subnets: {}
+ f:k8s.ovn.org/node-local-nat-ip: {}
+ f:k8s.ovn.org/node-mgmt-port-mac-address: {}
+ f:k8s.ovn.org/node-primary-ifaddr: {}
+ f:k8s.ovn.org/node-subnets: {}
+ manager: ovnkube
+ operation: Update
+ time: "2020-11-25T07:59:01Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:labels:
+ f:beta.kubernetes.io/arch: {}
+ f:beta.kubernetes.io/os: {}
+ f:spec:
+ f:podCIDR: {}
+ f:podCIDRs:
+ .: {}
+ v:"10.132.0.0/24": {}
+ f:taints: {}
+ manager: kube-controller-manager
+ operation: Update
+ time: "2020-11-25T07:59:15Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:machineconfiguration.openshift.io/currentConfig: {}
+ f:machineconfiguration.openshift.io/desiredConfig: {}
+ f:machineconfiguration.openshift.io/reason: {}
+ f:machineconfiguration.openshift.io/state: {}
+ manager: machine-config-daemon
+ operation: Update
+ time: "2020-11-25T08:01:34Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:machine.openshift.io/machine: {}
+ manager: nodelink-controller
+ operation: Update
+ time: "2020-11-25T08:14:28Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:finalizers:
+ .: {}
+ v:"metal3.io/capbm": {}
+ f:spec:
+ f:providerID: {}
+ manager: machine-controller-manager
+ operation: Update
+ time: "2020-11-25T08:14:29Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:labels:
+ f:node-role.kubernetes.io/virtual: {}
+ manager: kubectl-label
+ operation: Update
+ time: "2020-11-25T08:35:46Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ .: {}
+ f:volumes.kubernetes.io/controller-managed-attach-detach: {}
+ f:labels:
+ .: {}
+ f:kubernetes.io/arch: {}
+ f:kubernetes.io/hostname: {}
+ f:kubernetes.io/os: {}
+ f:node-role.kubernetes.io/master: {}
+ f:node.openshift.io/os_id: {}
+ f:status:
+ f:addresses:
+ .: {}
+ k:{"type":"Hostname"}:
+ .: {}
+ f:address: {}
+ f:type: {}
+ k:{"type":"InternalIP"}:
+ .: {}
+ f:address: {}
+ f:type: {}
+ f:allocatable:
+ .: {}
+ f:cpu: {}
+ f:ephemeral-storage: {}
+ f:hugepages-1Gi: {}
+ f:hugepages-2Mi: {}
+ f:memory: {}
+ f:pods: {}
+ f:capacity:
+ .: {}
+ f:cpu: {}
+ f:ephemeral-storage: {}
+ f:hugepages-1Gi: {}
+ f:hugepages-2Mi: {}
+ f:memory: {}
+ f:pods: {}
+ f:conditions:
+ .: {}
+ k:{"type":"DiskPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"MemoryPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"PIDPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"Ready"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ f:daemonEndpoints:
+ f:kubeletEndpoint:
+ f:Port: {}
+ f:images: {}
+ f:nodeInfo:
+ f:architecture: {}
+ f:bootID: {}
+ f:containerRuntimeVersion: {}
+ f:kernelVersion: {}
+ f:kubeProxyVersion: {}
+ f:kubeletVersion: {}
+ f:machineID: {}
+ f:operatingSystem: {}
+ f:osImage: {}
+ f:systemUUID: {}
+ manager: kubelet
+ operation: Update
+ time: "2021-02-19T00:30:04Z"
+ name: master2
+ resourceVersion: "38562354"
+ selfLink: /api/v1/nodes/master2
+ uid: 10695ccf-ea74-42ae-aef8-f1056bd428ef
+spec:
+ providerID: baremetalhost:///openshift-machine-api/cnfd1-master-2
+ taints:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+status:
+ addresses:
+ - address: master2
+ type: Hostname
+ allocatable:
+ cpu: 7500m
+ ephemeral-storage: "94993016264"
+ hugepages-1Gi: "0"
+ hugepages-2Mi: "0"
+ memory: 31782572Ki
+ pods: "250"
+ capacity:
+ cpu: "8"
+ ephemeral-storage: 101796Mi
+ hugepages-1Gi: "0"
+ hugepages-2Mi: "0"
+ memory: 32933548Ki
+ pods: "250"
+ conditions:
+ - lastHeartbeatTime: "2021-02-19T00:30:04Z"
+ lastTransitionTime: "2020-11-25T07:56:25Z"
+ message: kubelet has sufficient memory available
+ reason: KubeletHasSufficientMemory
+ status: "False"
+ type: MemoryPressure
+ - lastHeartbeatTime: "2021-02-19T00:30:04Z"
+ lastTransitionTime: "2020-11-25T07:56:25Z"
+ message: kubelet has no disk pressure
+ reason: KubeletHasNoDiskPressure
+ status: "False"
+ type: DiskPressure
+ - lastHeartbeatTime: "2021-02-19T00:30:04Z"
+ lastTransitionTime: "2020-11-25T07:56:25Z"
+ message: kubelet has sufficient PID available
+ reason: KubeletHasSufficientPID
+ status: "False"
+ type: PIDPressure
+ - lastHeartbeatTime: "2021-02-19T00:30:04Z"
+ lastTransitionTime: "2020-11-25T07:59:15Z"
+ message: kubelet is posting ready status
+ reason: KubeletReady
+ status: "True"
+ type: Ready
+ daemonEndpoints:
+ kubeletEndpoint:
+ Port: 10250
+ images:
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f786db417224d3b4a5456f0a545f2a53b31ee9cc0f559a5738a93154a6367d0
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 884336421
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ad6d87a8e1eee9fac58a6c85e34d8186509075f8ed2f1fe5efc9c9dda5138e00
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 783126428
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d849673f6cc38712f0add9d478a6326f1f6c2d3e739f6b81574a403dabba0bd3
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 687443805
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:796617722a915d79b478c9623b1d152a397478a7f6ba7ec71d39b9df2668cc80
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 673038171
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:29e3f55ba5be8cc2f8a431411fc75c8bf2f07a5b55f4ab9a81c603052c82c5dd
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 505930943
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4cffc88a97ba39c1a6a9ce45cf406bb29011a72766dc6f4deb0d76f7cd6eb02a
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 486536450
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c8f0b9a2852b15b45487c08c158e10f3b803d7a77538d6dbc1d991994f58bcee
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 418066712
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8b90545c9921788719f6653263fd8ba124b4545a9c0c078fdb8534b9ba5fa4f3
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 410819926
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:456dff747967eadbbfc6e9c53b180049bbba09e85cba7d77abe0e36bfc02817a
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 375119644
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3b5f75e0bb4f4e2e74aee6016030bfcce9cf71e52244f6fa689a673e619b35a4
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 372122608
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f08231b8e9948d4894ff6e9a0f1b4aee1534ddb11bea8a9d9b53b2473e83a880
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 363172829
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9df19f010a2d4369d31278a842488e11b3cd24d3134efe335cea5884f63c501e
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 344855408
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:326516b79a528dc627e5a5d84c986fd35e5f8ff5cbd74ff0ef802473efccd285
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 342541880
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a02341afe177329a39e49836e4c49d194affc8c4754fb360f0f760806f3bc2f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 341937980
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a95399379b48916216c49b1e4d4786b19081378ccd5821b27d97b496edd56a86
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 341611087
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e626fa44d64c4b9bf6dc66fafa7fa4e640eaeb15359d2f40bb0772c351b4dab5
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 340736830
+ - names:
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:3a9e0e5bccf522e7e9537bf626dd01f9894228b7a16573d209bf4856798e8e57
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:
+ sizeBytes: 339726486
+ - names:
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:63c162756ed6b5e67daafbd34f636ca461a18ea12f1352ae6172d27c9c95aff8
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:
+ sizeBytes: 339116800
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c052581031d9cb44b7e5a571db1cea25854733a977584a67718100cac56e2160
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 338045804
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4aa2cfd65a6d1ae112f591eb59336a05df72b59f6e053c418bfd5424ed372608
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 337811610
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a501d66461b0f24d2d551ec141617577ed417fdc4bc69db39f919e4e668a2889
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 331223794
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:02656bc5f5d78919453f86fa63c6531c6e0f1bbb4f3abcc662b6726b5619acec
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 330508411
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:759062d6a339a6915d499fc875f365cc1d3e52ededb6249ac19047b98dac9771
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 326516257
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f6746db8ee59600c8c3936d035aa30ad81890de42814ec0fafd12809a0c8eb39
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 321353407
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:15d31443dbc6830af67840c6a199e3b93b03168d4d993e453bbacde702d4c25e
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 320374187
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:12531b40785d46fde636bedbe93f549c7a9bd5eab146468927ae8347fb9e4aac
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 320369930
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cad230fbee655fa6a021a06d0a7e0888f7fec60127e467b18ec6ba93bcfc1d98
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 319394632
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b93d895f9b0733c924651a7f2ab3d0bb3854f4202eb55cb2086f13a4ce7aae84
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 318520120
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6aece72b8448aaf5533f64674acbddf8b51d21355336807e85e59f3bac25d3e7
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 317658369
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4e580c9df1a09ab8e0647fc0e378d792a4c3078b4a06120264ab00917e71e783
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 316367713
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fa21f4b6288e4665090901e4904f12b0eae1a23d24fefaa06ba951b2b4ce017f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 316357508
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8cc28a3ed35c28e3184f33e3a7e8f4755af57ea1c321b3874d18acba711a8104
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 309676114
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:423e5b0624ed0bb736c5320c37611b72dcbb2094e785c2ab588f584f65157289
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 308423009
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54e61ec737612af1eb38c01fb7829dcba44f4de3cbcb52f029a95c73b9c2d7fb
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 305778268
+ - names:
+ - quay.io/openshift/origin-sriov-network-device-plugin@sha256:cb260fd8bd6914e52c3f2899be14990cab15afdd3620194b68013bea7b0e2826
+ - quay.io/openshift/origin-sriov-network-device-plugin@sha256:
+ sizeBytes: 305304768
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0bbfa15a10b4308f07490670fb94203a5f63d9ad6b40e55b3bb959e9bf731d2
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 305027413
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7211fbc109efa51e20b4be2f5f327e00127076423ef384bde250d909da95257f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 304947009
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d8bdb0125b1287565b5464a6d6181fd9fe706641efd68063bdb317270bd11887
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 304593198
+ - names:
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:5981ec1e4592b082b0a3e20b95da65863505b602867b0550772bd8f28e1cfd10
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:
+ sizeBytes: 297518232
+ - names:
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:57a58e1b2d8d3bd34555375f8f06b805745010f77854fada89a8219ad0237635
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:
+ sizeBytes: 295059596
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9f2296b282ae835a2345ca15bb2aa36d0a0178283cf76ebb2f3d26b34ac493bf
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 291968312
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1855db32ca2d846c9ad9af104d2e27ffa41b1054af031ac3d19e412c330fc66e
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 278561358
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f80375a7ad29fb23de302b0e82ae460580681d1805829c214bad13e84d94b784
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 275648050
+ - names:
+ - quay.io/openshift/origin-sriov-infiniband-cni@sha256:1b2878bcf2834fc94311680c51be12b0035f843cfe17ce1a2cfeae6823e49d14
+ - quay.io/openshift/origin-sriov-infiniband-cni@sha256:
+ sizeBytes: 271428718
+ - names:
+ - quay.io/openshift/origin-sriov-cni@sha256:122413b37f91bfb890f50ad435f93b247ef3a8f6fabb441c634750567d1781b4
+ - quay.io/openshift/origin-sriov-cni@sha256:
+ sizeBytes: 269939282
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:00900d48c5796ecb8c0599ab6a0946347947dbcd2acc883665240c2ec9b33fd5
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 269278836
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:931e5b5dd5e6e36ed70cd72a07574f74408dfd371e0b3f8d41f78b4d99790bc1
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 257341738
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:db5160ca401c5e0a59d5488f41ab78177bacb4d0369a8c9c96149ef196d95852
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 252463529
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:531dcce5496318f2f32e008bf6cd03e713a36e73ea6fa8bdbf560a9c6c7f5b14
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 249896230
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cb639ce34790f2eb0bfae7bbe13806028d0d75f55d3eea63fd1f62677082c17c
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 241441090
+ nodeInfo:
+ architecture: amd64
+ bootID: ff6ea3bb-8793-46f3-b95d-b2b631961ebb
+ containerRuntimeVersion: cri-o://1.19.0-22.rhaos4.6.gitc0306f1.el8
+ kernelVersion: 4.18.0-193.29.1.el8_2.x86_64
+ kubeProxyVersion: v1.19.0+9f84db3
+ kubeletVersion: v1.19.0+9f84db3
+ machineID: 3b1f2fe4d89b47789345c4f6bd8b0cf1
+ operatingSystem: linux
+ osImage: Red Hat Enterprise Linux CoreOS 46.82.202011061621-0 (Ootpa)
+ systemUUID: 3b1f2fe4-d89b-4778-9345-c4f6bd8b0cf1
diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master3.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master3.yaml
new file mode 100755
index 000000000..0251741ba
--- /dev/null
+++ b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master3.yaml
@@ -0,0 +1,457 @@
+---
+apiVersion: v1
+kind: Node
+metadata:
+ annotations:
+ machine.openshift.io/machine: openshift-machine-api/master3
+ machineconfiguration.openshift.io/currentConfig: rendered-master-e92b311a0208749dbba5c4458afcc653
+ machineconfiguration.openshift.io/desiredConfig: rendered-master-e92b311a0208749dbba5c4458afcc653
+ machineconfiguration.openshift.io/reason: ""
+ machineconfiguration.openshift.io/state: Done
+ volumes.kubernetes.io/controller-managed-attach-detach: "true"
+ creationTimestamp: "2020-11-25T07:56:27Z"
+ finalizers:
+ - metal3.io/capbm
+ labels:
+ beta.kubernetes.io/arch: amd64
+ beta.kubernetes.io/os: linux
+ kubernetes.io/arch: amd64
+ kubernetes.io/hostname: master3
+ kubernetes.io/os: linux
+ node-role.kubernetes.io/master: ""
+ node-role.kubernetes.io/virtual: ""
+ node.openshift.io/os_id: rhcos
+ managedFields:
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:labels:
+ f:beta.kubernetes.io/arch: {}
+ f:beta.kubernetes.io/os: {}
+ f:spec:
+ f:podCIDR: {}
+ f:podCIDRs:
+ .: {}
+ v:"10.132.1.0/24": {}
+ f:taints: {}
+ manager: kube-controller-manager
+ operation: Update
+ time: "2020-11-25T07:59:08Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:machineconfiguration.openshift.io/currentConfig: {}
+ f:machineconfiguration.openshift.io/desiredConfig: {}
+ f:machineconfiguration.openshift.io/reason: {}
+ f:machineconfiguration.openshift.io/state: {}
+ manager: machine-config-daemon
+ operation: Update
+ time: "2020-11-25T08:01:36Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:machine.openshift.io/machine: {}
+ manager: nodelink-controller
+ operation: Update
+ time: "2020-11-25T08:14:20Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:finalizers:
+ .: {}
+ v:"metal3.io/capbm": {}
+ f:spec:
+ f:providerID: {}
+ manager: machine-controller-manager
+ operation: Update
+ time: "2020-11-25T08:14:21Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:labels:
+ f:node-role.kubernetes.io/virtual: {}
+ manager: kubectl-label
+ operation: Update
+ time: "2020-11-25T08:35:46Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:k8s.ovn.org/l3-gateway-config: {}
+ f:k8s.ovn.org/node-chassis-id: {}
+ f:k8s.ovn.org/node-join-subnets: {}
+ f:k8s.ovn.org/node-local-nat-ip: {}
+ f:k8s.ovn.org/node-mgmt-port-mac-address: {}
+ f:k8s.ovn.org/node-primary-ifaddr: {}
+ f:k8s.ovn.org/node-subnets: {}
+ manager: ovnkube
+ operation: Update
+ time: "2020-12-23T11:34:09Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ .: {}
+ f:volumes.kubernetes.io/controller-managed-attach-detach: {}
+ f:labels:
+ .: {}
+ f:kubernetes.io/arch: {}
+ f:kubernetes.io/hostname: {}
+ f:kubernetes.io/os: {}
+ f:node-role.kubernetes.io/master: {}
+ f:node.openshift.io/os_id: {}
+ f:status:
+ f:addresses:
+ .: {}
+ k:{"type":"Hostname"}:
+ .: {}
+ f:address: {}
+ f:type: {}
+ k:{"type":"InternalIP"}:
+ .: {}
+ f:address: {}
+ f:type: {}
+ f:allocatable:
+ .: {}
+ f:cpu: {}
+ f:ephemeral-storage: {}
+ f:hugepages-1Gi: {}
+ f:hugepages-2Mi: {}
+ f:memory: {}
+ f:pods: {}
+ f:capacity:
+ .: {}
+ f:cpu: {}
+ f:ephemeral-storage: {}
+ f:hugepages-1Gi: {}
+ f:hugepages-2Mi: {}
+ f:memory: {}
+ f:pods: {}
+ f:conditions:
+ .: {}
+ k:{"type":"DiskPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"MemoryPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"PIDPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"Ready"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ f:daemonEndpoints:
+ f:kubeletEndpoint:
+ f:Port: {}
+ f:images: {}
+ f:nodeInfo:
+ f:architecture: {}
+ f:bootID: {}
+ f:containerRuntimeVersion: {}
+ f:kernelVersion: {}
+ f:kubeProxyVersion: {}
+ f:kubeletVersion: {}
+ f:machineID: {}
+ f:operatingSystem: {}
+ f:osImage: {}
+ f:systemUUID: {}
+ manager: kubelet
+ operation: Update
+ time: "2021-02-19T00:31:12Z"
+ name: master3
+ resourceVersion: "38562754"
+ selfLink: /api/v1/nodes/master3
+ uid: 94aabdc5-8e6f-4965-896d-3d68c8043fbf
+spec:
+ providerID: baremetalhost:///openshift-machine-api/cnfd1-master-0
+ taints:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+status:
+ addresses:
+ - address: master3
+ type: Hostname
+ allocatable:
+ cpu: 7500m
+ ephemeral-storage: "94993016264"
+ hugepages-1Gi: "0"
+ hugepages-2Mi: "0"
+ memory: 31782368Ki
+ pods: "250"
+ capacity:
+ cpu: "8"
+ ephemeral-storage: 101796Mi
+ hugepages-1Gi: "0"
+ hugepages-2Mi: "0"
+ memory: 32933344Ki
+ pods: "250"
+ conditions:
+ - lastHeartbeatTime: "2021-02-19T00:31:12Z"
+ lastTransitionTime: "2020-11-25T07:56:27Z"
+ message: kubelet has sufficient memory available
+ reason: KubeletHasSufficientMemory
+ status: "False"
+ type: MemoryPressure
+ - lastHeartbeatTime: "2021-02-19T00:31:12Z"
+ lastTransitionTime: "2020-11-25T07:56:27Z"
+ message: kubelet has no disk pressure
+ reason: KubeletHasNoDiskPressure
+ status: "False"
+ type: DiskPressure
+ - lastHeartbeatTime: "2021-02-19T00:31:12Z"
+ lastTransitionTime: "2020-11-25T07:56:27Z"
+ message: kubelet has sufficient PID available
+ reason: KubeletHasSufficientPID
+ status: "False"
+ type: PIDPressure
+ - lastHeartbeatTime: "2021-02-19T00:31:12Z"
+ lastTransitionTime: "2020-11-25T07:59:08Z"
+ message: kubelet is posting ready status
+ reason: KubeletReady
+ status: "True"
+ type: Ready
+ daemonEndpoints:
+ kubeletEndpoint:
+ Port: 10250
+ images:
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f786db417224d3b4a5456f0a545f2a53b31ee9cc0f559a5738a93154a6367d0
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 884336421
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f27a23cd9f23951711f8aa7d66d4a6a1fd68071fa98ac0d5077a160a5d05f922
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 774713580
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d849673f6cc38712f0add9d478a6326f1f6c2d3e739f6b81574a403dabba0bd3
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 687443805
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:102e8cced32335144b5567ed3159d31aa267d0b1f2e8de8454d53b175e1df718
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 519118014
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:29e3f55ba5be8cc2f8a431411fc75c8bf2f07a5b55f4ab9a81c603052c82c5dd
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 505930943
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4cffc88a97ba39c1a6a9ce45cf406bb29011a72766dc6f4deb0d76f7cd6eb02a
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 486536450
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e21960829179b702d31bb220f8b61b9715b8e0fd91d671b8615b0a8599cf1f0
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 478316539
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c8f0b9a2852b15b45487c08c158e10f3b803d7a77538d6dbc1d991994f58bcee
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 418066712
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:456dff747967eadbbfc6e9c53b180049bbba09e85cba7d77abe0e36bfc02817a
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 375119644
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3b5f75e0bb4f4e2e74aee6016030bfcce9cf71e52244f6fa689a673e619b35a4
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 372122608
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:248079028275bd57deb5c810fc91b5c4e9138f706084bfa953aa64c833652ef0
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 348879632
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:326516b79a528dc627e5a5d84c986fd35e5f8ff5cbd74ff0ef802473efccd285
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 342541880
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a02341afe177329a39e49836e4c49d194affc8c4754fb360f0f760806f3bc2f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 341937980
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47c2f751ab0d5ee88e2826749f1372e6a24db3d0c0c942136ae84db17cb7f086
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 341659335
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a95399379b48916216c49b1e4d4786b19081378ccd5821b27d97b496edd56a86
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 341611087
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e626fa44d64c4b9bf6dc66fafa7fa4e640eaeb15359d2f40bb0772c351b4dab5
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 340736830
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:831f29043e6a2933169c6595281c58c3c7e31232866e1ffe1130845d7b7744af
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 340684520
+ - names:
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:3a9e0e5bccf522e7e9537bf626dd01f9894228b7a16573d209bf4856798e8e57
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:
+ sizeBytes: 339726486
+ - names:
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:63c162756ed6b5e67daafbd34f636ca461a18ea12f1352ae6172d27c9c95aff8
+ - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:
+ sizeBytes: 339116800
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c052581031d9cb44b7e5a571db1cea25854733a977584a67718100cac56e2160
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 338045804
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4aa2cfd65a6d1ae112f591eb59336a05df72b59f6e053c418bfd5424ed372608
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 337811610
+ - names:
+ - quay.io/openshift/origin-must-gather@sha256:c42733fdc4d028c582f745822cd5bc4cfb924ebba62e2a9fb410e7bc255fe1f9
+ - quay.io/openshift/origin-must-gather:latest
+ sizeBytes: 335337156
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a501d66461b0f24d2d551ec141617577ed417fdc4bc69db39f919e4e668a2889
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 331223794
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:02656bc5f5d78919453f86fa63c6531c6e0f1bbb4f3abcc662b6726b5619acec
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 330508411
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:759062d6a339a6915d499fc875f365cc1d3e52ededb6249ac19047b98dac9771
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 326516257
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f6746db8ee59600c8c3936d035aa30ad81890de42814ec0fafd12809a0c8eb39
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 321353407
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:15d31443dbc6830af67840c6a199e3b93b03168d4d993e453bbacde702d4c25e
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 320374187
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:12531b40785d46fde636bedbe93f549c7a9bd5eab146468927ae8347fb9e4aac
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 320369930
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cad230fbee655fa6a021a06d0a7e0888f7fec60127e467b18ec6ba93bcfc1d98
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 319394632
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b93d895f9b0733c924651a7f2ab3d0bb3854f4202eb55cb2086f13a4ce7aae84
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 318520120
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6aece72b8448aaf5533f64674acbddf8b51d21355336807e85e59f3bac25d3e7
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 317658369
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4e580c9df1a09ab8e0647fc0e378d792a4c3078b4a06120264ab00917e71e783
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 316367713
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fa21f4b6288e4665090901e4904f12b0eae1a23d24fefaa06ba951b2b4ce017f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 316357508
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:34fbc764dec54739ee9d466ef5f56cfdbd6d6e5784534e28da9eb0d1f011ef72
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 314684850
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:73f54514498459ae65d33619ab9208248bf217b67c115b74aa6688662e9e111a
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 311647548
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8cc28a3ed35c28e3184f33e3a7e8f4755af57ea1c321b3874d18acba711a8104
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 309676114
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:904e22f8c8970422b10208295ce05cacd9dc15fa0433806cb1b2035c74db193e
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 308789532
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54e61ec737612af1eb38c01fb7829dcba44f4de3cbcb52f029a95c73b9c2d7fb
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 305778268
+ - names:
+ - quay.io/openshift/origin-sriov-network-device-plugin@sha256:cb260fd8bd6914e52c3f2899be14990cab15afdd3620194b68013bea7b0e2826
+ - quay.io/openshift/origin-sriov-network-device-plugin@sha256:
+ sizeBytes: 305304768
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0bbfa15a10b4308f07490670fb94203a5f63d9ad6b40e55b3bb959e9bf731d2
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 305027413
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7211fbc109efa51e20b4be2f5f327e00127076423ef384bde250d909da95257f
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 304947009
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d8bdb0125b1287565b5464a6d6181fd9fe706641efd68063bdb317270bd11887
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 304593198
+ - names:
+ - quay.io/openshift/origin-sriov-network-operator@sha256:b125f8f9990b7341c704d365ad65b4ef17d3903c6be223a3301c7492b4e16b02
+ - quay.io/openshift/origin-sriov-network-operator@sha256:
+ sizeBytes: 299461624
+ - names:
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:5981ec1e4592b082b0a3e20b95da65863505b602867b0550772bd8f28e1cfd10
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:
+ sizeBytes: 297518232
+ - names:
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:57a58e1b2d8d3bd34555375f8f06b805745010f77854fada89a8219ad0237635
+ - quay.io/openshift/origin-sriov-network-webhook@sha256:
+ sizeBytes: 295059596
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1855db32ca2d846c9ad9af104d2e27ffa41b1054af031ac3d19e412c330fc66e
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 278561358
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f80375a7ad29fb23de302b0e82ae460580681d1805829c214bad13e84d94b784
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 275648050
+ - names:
+ - quay.io/openshift/origin-sriov-infiniband-cni@sha256:1b2878bcf2834fc94311680c51be12b0035f843cfe17ce1a2cfeae6823e49d14
+ - quay.io/openshift/origin-sriov-infiniband-cni@sha256:
+ sizeBytes: 271428718
+ - names:
+ - quay.io/openshift/origin-sriov-cni@sha256:122413b37f91bfb890f50ad435f93b247ef3a8f6fabb441c634750567d1781b4
+ - quay.io/openshift/origin-sriov-cni@sha256:
+ sizeBytes: 269939282
+ - names:
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:00900d48c5796ecb8c0599ab6a0946347947dbcd2acc883665240c2ec9b33fd5
+ - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:
+ sizeBytes: 269278836
+ nodeInfo:
+ architecture: amd64
+ bootID: bd5c30ee-8db5-49fa-a85a-59f1c78d217e
+ containerRuntimeVersion: cri-o://1.19.0-22.rhaos4.6.gitc0306f1.el8
+ kernelVersion: 4.18.0-193.29.1.el8_2.x86_64
+ kubeProxyVersion: v1.19.0+9f84db3
+ kubeletVersion: v1.19.0+9f84db3
+ machineID: ec4898e8241e4438aabada39dcbb6568
+ operatingSystem: linux
+ osImage: Red Hat Enterprise Linux CoreOS 46.82.202011061621-0 (Ootpa)
+ systemUUID: ec4898e8-241e-4438-aaba-da39dcbb6568
diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker1.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker1.yaml
new file mode 100755
index 000000000..dcbb8b563
--- /dev/null
+++ b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker1.yaml
@@ -0,0 +1,490 @@
+apiVersion: v1
+kind: Node
+metadata:
+ annotations:
+ machine.openshift.io/machine: worker1
+ machineconfiguration.openshift.io/currentConfig: rendered-worker-cnf-7c30be8313249d4d85afa8cc3f538b3a
+ machineconfiguration.openshift.io/desiredConfig: rendered-worker-cnf-7c30be8313249d4d85afa8cc3f538b3a
+ machineconfiguration.openshift.io/reason: ""
+ machineconfiguration.openshift.io/state: Done
+ sriovnetwork.openshift.io/state: Idle
+ volumes.kubernetes.io/controller-managed-attach-detach: "true"
+ creationTimestamp: "2020-11-25T09:31:11Z"
+ finalizers:
+ - metal3.io/capbm
+ labels:
+ beta.kubernetes.io/arch: amd64
+ beta.kubernetes.io/os: linux
+ kubernetes.io/arch: amd64
+ kubernetes.io/hostname: worker1
+ kubernetes.io/os: linux
+ node-role.kubernetes.io/worker: ""
+ node-role.kubernetes.io/worker-cnf: ""
+ node.openshift.io/os_id: rhcos
+ sriov: "true"
+ sriov1: "true"
+ managedFields:
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:finalizers:
+ .: {}
+ v:"metal3.io/capbm": {}
+ f:spec:
+ f:providerID: {}
+ manager: machine-controller-manager
+ operation: Update
+ time: "2020-11-25T09:31:12Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:machine.openshift.io/machine: {}
+ manager: nodelink-controller
+ operation: Update
+ time: "2020-11-25T09:31:12Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:k8s.ovn.org/l3-gateway-config: {}
+ f:k8s.ovn.org/node-chassis-id: {}
+ f:k8s.ovn.org/node-join-subnets: {}
+ f:k8s.ovn.org/node-local-nat-ip: {}
+ f:k8s.ovn.org/node-mgmt-port-mac-address: {}
+ f:k8s.ovn.org/node-primary-ifaddr: {}
+ f:k8s.ovn.org/node-subnets: {}
+ manager: ovnkube
+ operation: Update
+ time: "2020-11-25T09:31:54Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:labels:
+ f:sriov: {}
+ f:sriov1: {}
+ manager: oc
+ operation: Update
+ time: "2020-11-26T09:51:05Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:labels:
+ f:node-role.kubernetes.io/worker-cnf: {}
+ manager: kubectl-label
+ operation: Update
+ time: "2021-01-14T13:44:04Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:machineconfiguration.openshift.io/desiredConfig: {}
+ manager: machine-config-controller
+ operation: Update
+ time: "2021-02-09T08:46:19Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:machineconfiguration.openshift.io/currentConfig: {}
+ f:machineconfiguration.openshift.io/reason: {}
+ f:machineconfiguration.openshift.io/state: {}
+ manager: machine-config-daemon
+ operation: Update
+ time: "2021-02-09T08:54:28Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:labels:
+ f:beta.kubernetes.io/arch: {}
+ f:beta.kubernetes.io/os: {}
+ f:spec:
+ f:podCIDR: {}
+ f:podCIDRs:
+ .: {}
+ v:"10.132.4.0/24": {}
+ manager: kube-controller-manager
+ operation: Update
+ time: "2021-02-09T08:54:37Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ f:sriovnetwork.openshift.io/state: {}
+ manager: sriov-network-config-daemon
+ operation: Update
+ time: "2021-02-09T08:58:16Z"
+ - apiVersion: v1
+ fieldsType: FieldsV1
+ fieldsV1:
+ f:metadata:
+ f:annotations:
+ .: {}
+ f:volumes.kubernetes.io/controller-managed-attach-detach: {}
+ f:labels:
+ .: {}
+ f:kubernetes.io/arch: {}
+ f:kubernetes.io/hostname: {}
+ f:kubernetes.io/os: {}
+ f:node-role.kubernetes.io/worker: {}
+ f:node.openshift.io/os_id: {}
+ f:status:
+ f:addresses:
+ .: {}
+ k:{"type":"Hostname"}:
+ .: {}
+ f:address: {}
+ f:type: {}
+ k:{"type":"InternalIP"}:
+ .: {}
+ f:address: {}
+ f:type: {}
+ f:allocatable:
+ .: {}
+ f:cpu: {}
+ f:ephemeral-storage: {}
+ f:hugepages-1Gi: {}
+ f:hugepages-2Mi: {}
+ f:memory: {}
+ f:openshift.io/mainpfresource: {}
+ f:openshift.io/sriovresource: {}
+ f:pods: {}
+ f:capacity:
+ .: {}
+ f:cpu: {}
+ f:ephemeral-storage: {}
+ f:hugepages-1Gi: {}
+ f:hugepages-2Mi: {}
+ f:memory: {}
+ f:openshift.io/mainpfresource: {}
+ f:openshift.io/sriovresource: {}
+ f:pods: {}
+ f:conditions:
+ .: {}
+ k:{"type":"DiskPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"MemoryPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"PIDPressure"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ k:{"type":"Ready"}:
+ .: {}
+ f:lastHeartbeatTime: {}
+ f:lastTransitionTime: {}
+ f:message: {}
+ f:reason: {}
+ f:status: {}
+ f:type: {}
+ f:daemonEndpoints:
+ f:kubeletEndpoint:
+ f:Port: {}
+ f:images: {}
+ f:nodeInfo:
+ f:architecture: {}
+ f:bootID: {}
+ f:containerRuntimeVersion: {}
+ f:kernelVersion: {}
+ f:kubeProxyVersion: {}
+ f:kubeletVersion: {}
+ f:machineID: {}
+ f:operatingSystem: {}
+ f:osImage: {}
+ f:systemUUID: {}
+ manager: kubelet
+ operation: Update
+ time: "2021-02-19T00:29:17Z"
+ name: worker1
+ resourceVersion: "38561991"
+ selfLink: /api/v1/nodes/worker1
+ uid: a81c784d-04ca-4674-b50a-fc34586f92b8
+spec:
+ providerID: baremetalhost:///openshift-machine-api/cnfd1-worker-1
+status:
+ addresses:
+ - address: worker1
+ type: Hostname
+ allocatable:
+ cpu: "75"
+ ephemeral-storage: "429960199479"
+ hugepages-1Gi: 1Gi
+ hugepages-2Mi: 256Mi
+ memory: 392289704Ki
+ openshift.io/mainpfresource: "0"
+ openshift.io/sriovresource: "16"
+ pods: "250"
+ capacity:
+ cpu: "80"
+ ephemeral-storage: 456740Mi
+ hugepages-1Gi: 1Gi
+ hugepages-2Mi: 256Mi
+ memory: 394726824Ki
+ openshift.io/mainpfresource: "0"
+ openshift.io/sriovresource: "16"
+ pods: "250"
+ conditions:
+ - lastHeartbeatTime: "2021-02-19T00:29:17Z"
+ lastTransitionTime: "2021-02-09T08:53:47Z"
+ message: kubelet has sufficient memory available
+ reason: KubeletHasSufficientMemory
+ status: "False"
+ type: MemoryPressure
+ - lastHeartbeatTime: "2021-02-19T00:29:17Z"
+ lastTransitionTime: "2021-02-09T08:53:47Z"
+ message: kubelet has no disk pressure
+ reason: KubeletHasNoDiskPressure
+ status: "False"
+ type: DiskPressure
+ - lastHeartbeatTime: "2021-02-19T00:29:17Z"
+ lastTransitionTime: "2021-02-09T08:53:47Z"
+ message: kubelet has sufficient PID available
+ reason: KubeletHasSufficientPID
+ status: "False"
+ type: PIDPressure
+ - lastHeartbeatTime: "2021-02-19T00:29:17Z"
+ lastTransitionTime: "2021-02-09T08:53:47Z"
+ message: kubelet is posting ready status
+ reason: KubeletReady
+ status: "True"
+ type: Ready
+ daemonEndpoints:
+ kubeletEndpoint:
+ Port: 10250
+ images:
+ - names:
+ - quay.io/openshift-kni/cnf-tests@sha256:23d5b605bd234802b4923a7fb45ca12ae1ebcebc965900a1903593f5e6f6e64e
+ - quay.io/openshift-kni/cnf-tests:4.5
+ sizeBytes: 1097313747
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053932844
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053932842
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053756716
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053629741
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053609261
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601070
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601069
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601069
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601069
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601069
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601069
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601069
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601068
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601068
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601068
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601068
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053601065
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053531438
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053531438
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053531438
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053531438
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index:
+ sizeBytes: 1053531438
+ - names:
+ - @
+ - registry.redhat.io/redhat/certified-operator-index: