diff --git a/pkg/operator/controller.go b/pkg/operator/controller.go index 54aacdf97a..07ce83855b 100644 --- a/pkg/operator/controller.go +++ b/pkg/operator/controller.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "reflect" - "strings" "time" corev1 "k8s.io/api/core/v1" @@ -60,9 +59,6 @@ const ( wqKindProfile = "profile" wqKindConfigMap = "configmap" wqKindMachineConfigPool = "machineconfigpool" - - tunedConfigMapLabel = "hypershift.openshift.io/tuned-config" - tunedConfigMapConfigKey = "tuned" ) // Controller is the controller implementation for Tuned resources @@ -304,7 +300,18 @@ func (c *Controller) sync(key wqKey) error { // This should only happen in HyperShift klog.V(2).Infof("sync(): wqKindConfigMap %s", key.name) err = c.syncHostedClusterTuneds() - return err + if err != nil { + return err + } + + // If NTO-generated ConfigMap for MachineConfig is deleted, + // we need to recreate it by syncing Profile. + err = c.enqueueProfileUpdates() + if err != nil { + return err + } + + return nil case key.kind == wqKindMachineConfigPool: klog.V(2).Infof("sync(): MachineConfigPool %s", key.name) @@ -402,7 +409,12 @@ func (c *Controller) sync(key wqKey) error { // Tuned CR change can also mean some MachineConfigs the operator created are no longer needed; // removal of these will also rollback host settings such as kernel boot parameters. - if !ntoconfig.InHyperShift() { + if ntoconfig.InHyperShift() { + err = c.pruneMachineConfigsHyperShift() + if err != nil { + return err + } + } else { err = c.pruneMachineConfigs() if err != nil { return err @@ -665,13 +677,24 @@ func (c *Controller) syncProfile(tuned *tunedv1.Tuned, nodeName string) error { } if ntoconfig.InHyperShift() { - // In HyperShift - if profile.Status.TunedProfile == tunedProfileName && profileApplied(profile) { - klog.V(2).Infof("MachineConfigs not yet supported in HyperShift. Skipping for profile %s on node %s for NodePool %s", tunedProfileName, nodeName, nodePoolName) + // nodePoolName is the name of the NodePool which the Node corresponding to this Profile + // is a part of. If nodePoolName is the empty string, it either means that Node label + // based matching was used, or we don't know the NodePool, so we should not sync the + // MachineConfigs. + if nodePoolName != "" { + // In HyperShift + if profile.Status.TunedProfile == tunedProfileName && profileApplied(profile) { + // Synchronize MachineConfig only once the (calculated) TuneD profile 'tunedProfileName' + // has been successfully applied. + err := c.syncMachineConfigHyperShift(nodePoolName, profile) + if err != nil { + return fmt.Errorf("failed to update Profile %s: %v", profile.Name, err) + } + } } } else { if mcLabels != nil { - // The Tuned daemon profile 'tunedProfileName' for nodeName matched with MachineConfig + // The TuneD daemon profile 'tunedProfileName' for nodeName matched with MachineConfig // labels set for additional machine configuration. Sync the operator-created // MachineConfig for MachineConfigPools 'pools'. if profile.Status.TunedProfile == tunedProfileName && profileApplied(profile) { @@ -730,26 +753,6 @@ func (c *Controller) syncMachineConfig(name string, labels map[string]string, pr } bootcmdline := profile.Status.Bootcmdline - logline := func(bIgn, bCmdline bool, bootcmdline string) string { - var ( - sb strings.Builder - ) - - if bIgn { - sb.WriteString(" ignition") - if bCmdline { - sb.WriteString(" and") - } - } - - if bCmdline { - sb.WriteString(" kernel parameters: [") - sb.WriteString(bootcmdline) - sb.WriteString("]") - } - - return sb.String() - } kernelArguments = util.SplitKernelArguments(bootcmdline) annotations := map[string]string{GeneratedByControllerVersionAnnotationKey: version.Version} @@ -769,7 +772,7 @@ func (c *Controller) syncMachineConfig(name string, labels map[string]string, pr if err != nil { return fmt.Errorf("failed to create MachineConfig %s: %v", mc.ObjectMeta.Name, err) } - klog.Infof("created MachineConfig %s with%s", mc.ObjectMeta.Name, logline(false, len(bootcmdline) != 0, bootcmdline)) + klog.Infof("created MachineConfig %s with%s", mc.ObjectMeta.Name, machineConfigGenerationLogLine(false, len(bootcmdline) != 0, bootcmdline)) return nil } return err @@ -788,7 +791,7 @@ func (c *Controller) syncMachineConfig(name string, labels map[string]string, pr mc.Spec.KernelArguments = kernelArguments mc.Spec.Config = mcNew.Spec.Config - l := logline(false, !kernelArgsEq, bootcmdline) + l := machineConfigGenerationLogLine(false, !kernelArgsEq, bootcmdline) klog.V(2).Infof("syncMachineConfig(): updating MachineConfig %s with%s", mc.ObjectMeta.Name, l) _, err = c.clients.MC.MachineconfigurationV1().MachineConfigs().Update(context.TODO(), mc, metav1.UpdateOptions{}) if err != nil { @@ -800,7 +803,113 @@ func (c *Controller) syncMachineConfig(name string, labels map[string]string, pr return nil } -// pruneMachineConfigs removes any MachineConfigs created by the operator that are not selected by any of the Tuned daemon profile. +func (c *Controller) syncMachineConfigHyperShift(nodePoolName string, profile *tunedv1.Profile) error { + var ( + kernelArguments []string + ) + + mcName := MachineConfigPrefix + "-" + nodePoolName + configMapName := mcConfigMapName(nodePoolName) + + if v := profile.ObjectMeta.Annotations[tunedv1.GeneratedByOperandVersionAnnotationKey]; v != os.Getenv("RELEASE_VERSION") { + // This looks like an update triggered by an old (not-yet-upgraded) operand. Ignore it. + klog.Infof("refusing to sync MachineConfig ConfigMap %q due to Profile %q change generated by operand version %q", configMapName, profile.Name, v) + return nil + } + + bootcmdline := profile.Status.Bootcmdline + kernelArguments = util.SplitKernelArguments(bootcmdline) + + annotations := map[string]string{GeneratedByControllerVersionAnnotationKey: version.Version} + + mcConfigMap, err := c.clients.ManagementKube.CoreV1().ConfigMaps(ntoconfig.OperatorNamespace()).Get(context.TODO(), configMapName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + klog.V(2).Infof("syncMachineConfigHyperShift(): ConfigMap %s not found, creating one", configMapName) + if len(bootcmdline) == 0 { + // Creating a new MachineConfig with empty kernelArguments/Ignition only causes unnecessary node + // reboots. + klog.V(2).Infof("not creating a MachineConfig with empty kernelArguments") + return nil + } + mc := newMachineConfig(mcName, annotations, nil, kernelArguments, nil, nil) + + // put the MC into a ConfigMap and create that instead + mcConfigMap, err = newConfigMapForMachineConfig(configMapName, nodePoolName, mc) + if err != nil { + klog.Errorf("failed to generate ConfigMap %s for MachineConfig %s: %v", configMapName, mc.ObjectMeta.Name, err) + return nil + } + _, err = c.clients.ManagementKube.CoreV1().ConfigMaps(ntoconfig.OperatorNamespace()).Create(context.TODO(), mcConfigMap, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create ConfigMap %s for MachineConfig %s: %v", configMapName, mc.ObjectMeta.Name, err) + } + klog.Infof("created ConfigMap %s for MachineConfig %s with%s", mc.ObjectMeta.Name, machineConfigGenerationLogLine(false, len(bootcmdline) != 0, bootcmdline)) + return nil + } + return err + } + + // A ConfigMap with the same name was found + // we need to make sure the contents are up-to-date. + mc, err := getMachineConfigFromConfigMap(mcConfigMap) + if err != nil { + klog.Errorf("failed to get MachineConfig from ConfigMap %s: %v", mcConfigMap.Name, err) + return nil + } + + mcNew := newMachineConfig(mcName, annotations, nil, kernelArguments, nil, nil) + + // Compare kargs between existing and new mcfg + kernelArgsEq := util.StringSlicesEqual(mc.Spec.KernelArguments, kernelArguments) + + // Check ConfigMap labels and annotations + neededLabels := generatedConfigMapLabels(nodePoolName) + cmLabels := mcConfigMap.GetLabels() + neededAnnotations := generatedConfigMapAnnotations(nodePoolName) + cmAnnotations := mcConfigMap.GetAnnotations() + cmLabelsAndAnnotationsCorrect := util.MapOfStringsContains(cmLabels, neededLabels) && util.MapOfStringsContains(cmAnnotations, neededAnnotations) + + // If mcfgs are equivalent don't update + if kernelArgsEq && cmLabelsAndAnnotationsCorrect { + // No update needed + klog.V(2).Infof("syncMachineConfig(): MachineConfig %s doesn't need updating", mc.ObjectMeta.Name) + return nil + } + + // If mcfgs are not equivalent do update + mc = mc.DeepCopy() // never update the objects from cache + mc.ObjectMeta.Annotations = mcNew.ObjectMeta.Annotations + mc.Spec.KernelArguments = kernelArguments + mc.Spec.Config = mcNew.Spec.Config + + l := machineConfigGenerationLogLine(false, !kernelArgsEq, bootcmdline) + klog.V(2).Infof("syncMachineConfig(): updating MachineConfig %s with%s", mc.ObjectMeta.Name, l) + + newData, err := serializeMachineConfig(mc) + if err != nil { + klog.Errorf("failed to serialize ConfigMap for MachineConfig %s: %v", mc.Name, err) + return nil + } + mcConfigMap.Data[mcConfigMapDataKey] = string(newData) + for k, v := range neededLabels { + mcConfigMap.Labels[k] = v + } + for k, v := range neededAnnotations { + mcConfigMap.Annotations[k] = v + } + + _, err = c.clients.ManagementKube.CoreV1().ConfigMaps(ntoconfig.OperatorNamespace()).Update(context.TODO(), mcConfigMap, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update ConfigMap for MachineConfig %s: %v", mcConfigMap.Name, err) + } + + klog.Infof("updated ConfigMap %s for MachineConfig %s with%s", mcConfigMap.Name, mc.ObjectMeta.Name, l) + + return nil +} + +// pruneMachineConfigs removes any MachineConfigs created by the operator that are not selected by any of the TuneD daemon profile. func (c *Controller) pruneMachineConfigs() error { mcList, err := c.listers.MachineConfigs.List(labels.Everything()) if err != nil { @@ -838,7 +947,47 @@ func (c *Controller) pruneMachineConfigs() error { return nil } -// Get all operator MachineConfig names for all Tuned daemon profiles. +// pruneMachineConfigs removes any MachineConfigs created by the operator that are not selected by any of the TuneD daemon profile. +func (c *Controller) pruneMachineConfigsHyperShift() error { + cmListOptions := metav1.ListOptions{ + LabelSelector: operatorGeneratedMachineConfig + "=true", + } + cmList, err := c.clients.ManagementKube.CoreV1().ConfigMaps(ntoconfig.OperatorNamespace()).List(context.TODO(), cmListOptions) + if err != nil { + return err + } + + mcNames, err := c.getConfigMapNamesForTuned() + if err != nil { + return err + } + + for _, cm := range cmList.Items { + if cm.ObjectMeta.Annotations != nil { + if _, ok := cm.ObjectMeta.Annotations[GeneratedByControllerVersionAnnotationKey]; !ok { + continue + } + // mc's annotations have the controller/operator key + if mcNames[cm.ObjectMeta.Name] { + continue + } + + // This ConfigMap has this operator's annotations and it is not currently used by any + // Tuned CR; remove it and let MCO roll-back any changes + klog.V(2).Infof("pruneMachineConfigsHyperShift(): deleting ConfigMap %s", cm.ObjectMeta.Name) + err = c.clients.ManagementKube.CoreV1().ConfigMaps(ntoconfig.OperatorNamespace()).Delete(context.TODO(), cm.ObjectMeta.Name, metav1.DeleteOptions{}) + if err != nil { + // Unable to delete the ConfigMap + return err + } + klog.Infof("deleted MachineConfig ConfigMap %s", cm.ObjectMeta.Name) + } + } + + return nil +} + +// Get all operator MachineConfig names for all TuneD daemon profiles. func (c *Controller) getMachineConfigNamesForTuned() (map[string]bool, error) { tunedList, err := c.listers.TunedResources.List(labels.Everything()) if err != nil { @@ -864,6 +1013,33 @@ func (c *Controller) getMachineConfigNamesForTuned() (map[string]bool, error) { return mcNames, nil } +// Get all operator ConfigMap names for all TuneD daemon profiles. +func (c *Controller) getConfigMapNamesForTuned() (map[string]bool, error) { + // In HyperShift, we only consider the default profile and + // the Tuned profiles from Tuneds referenced in this Nodes NodePool spec. + tunedList, err := c.listers.TunedResources.List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("failed to list Tuneds: %v", err) + } + + cmNames := map[string]bool{} + for _, tuned := range tunedList { + nodePoolName := tuned.Labels[hypershiftNodePoolNameLabel] + for _, recommend := range tuned.Spec.Recommend { + // nodePoolName may be an empty string in the case of the default profile + if recommend.Profile == nil || recommend.Match != nil || nodePoolName == "" { + continue + } + + // recommend.Profile not nil, recommend.Match is nil, and we have nodePoolName + cmName := mcConfigMapName(nodePoolName) + cmNames[cmName] = true + } + } + + return cmNames, nil +} + func getDefaultTunedRefs(tuned *tunedv1.Tuned) []metav1.OwnerReference { return []metav1.OwnerReference{ *metav1.NewControllerRef(tuned, tunedv1.SchemeGroupVersion.WithKind("Tuned")), @@ -1079,19 +1255,31 @@ func (c *Controller) run(ctx context.Context) { tpInformer.Informer().HasSynced, } - var configMapInformerFactory kubeinformers.SharedInformerFactory + var tunedConfigMapInformerFactory kubeinformers.SharedInformerFactory + var mcfgConfigMapInformerFactory kubeinformers.SharedInformerFactory var mcfgInformerFactory mcfginformers.SharedInformerFactory if ntoconfig.InHyperShift() { - labelOptions := kubeinformers.WithTweakListOptions(func(opts *metav1.ListOptions) { - opts.LabelSelector = tunedConfigMapLabel + "=true" - }) - configMapInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(c.clients.ManagementKube, ntoconfig.ResyncPeriod(), kubeinformers.WithNamespace(ntoconfig.OperatorNamespace()), labelOptions) - - configMapInformer := configMapInformerFactory.Core().V1().ConfigMaps() - c.listers.ConfigMaps = configMapInformer.Lister().ConfigMaps(ntoconfig.OperatorNamespace()) - configMapInformer.Informer().AddEventHandler(c.informerEventHandler(wqKey{kind: wqKindConfigMap})) - InformerFuncs = append(InformerFuncs, configMapInformer.Informer().HasSynced) - + tunedConfigMapInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(c.clients.ManagementKube, + ntoconfig.ResyncPeriod(), + kubeinformers.WithNamespace(ntoconfig.OperatorNamespace()), + kubeinformers.WithTweakListOptions(func(opts *metav1.ListOptions) { + opts.LabelSelector = tunedConfigMapLabel + "=true" + })) + tunedConfigMapInformer := tunedConfigMapInformerFactory.Core().V1().ConfigMaps() + c.listers.ConfigMaps = tunedConfigMapInformer.Lister().ConfigMaps(ntoconfig.OperatorNamespace()) + tunedConfigMapInformer.Informer().AddEventHandler(c.informerEventHandler(wqKey{kind: wqKindConfigMap})) + + mcfgConfigMapInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(c.clients.ManagementKube, + ntoconfig.ResyncPeriod(), + kubeinformers.WithNamespace(ntoconfig.OperatorNamespace()), + kubeinformers.WithTweakListOptions(func(opts *metav1.ListOptions) { + opts.LabelSelector = operatorGeneratedMachineConfig + "=true" + })) + mcfgConfigMapInformer := mcfgConfigMapInformerFactory.Core().V1().ConfigMaps() + c.listers.ConfigMaps = mcfgConfigMapInformer.Lister().ConfigMaps(ntoconfig.OperatorNamespace()) + mcfgConfigMapInformer.Informer().AddEventHandler(c.informerEventHandler(wqKey{kind: wqKindConfigMap})) + + InformerFuncs = append(InformerFuncs, tunedConfigMapInformer.Informer().HasSynced, mcfgConfigMapInformer.Informer().HasSynced) } else { mcfgInformerFactory = mcfginformers.NewSharedInformerFactory(c.clients.MC, ntoconfig.ResyncPeriod()) mcInformer := mcfgInformerFactory.Machineconfiguration().V1().MachineConfigs() @@ -1101,7 +1289,7 @@ func (c *Controller) run(ctx context.Context) { mcpInformer := mcfgInformerFactory.Machineconfiguration().V1().MachineConfigPools() c.listers.MachineConfigPools = mcpInformer.Lister() mcpInformer.Informer().AddEventHandler(c.informerEventHandler(wqKey{kind: wqKindMachineConfigPool})) - InformerFuncs = append(InformerFuncs, mcInformer.Informer().HasSynced, mcInformer.Informer().HasSynced) + InformerFuncs = append(InformerFuncs, mcInformer.Informer().HasSynced, mcpInformer.Informer().HasSynced) } configInformerFactory.Start(ctx.Done()) // ClusterOperator @@ -1109,7 +1297,8 @@ func (c *Controller) run(ctx context.Context) { tunedInformerFactory.Start(ctx.Done()) // Tuned/Profile if ntoconfig.InHyperShift() { - configMapInformerFactory.Start(ctx.Done()) + tunedConfigMapInformerFactory.Start(ctx.Done()) + mcfgConfigMapInformerFactory.Start(ctx.Done()) } else { mcfgInformerFactory.Start(ctx.Done()) // MachineConfig/MachineConfigPool } diff --git a/pkg/operator/hypershift.go b/pkg/operator/hypershift.go index 07d57a99ac..48b7066e0b 100644 --- a/pkg/operator/hypershift.go +++ b/pkg/operator/hypershift.go @@ -9,13 +9,17 @@ import ( "reflect" "strings" + tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" + ntoconfig "github.com/openshift/cluster-node-tuning-operator/pkg/config" + "github.com/openshift/cluster-node-tuning-operator/version" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + serializer "k8s.io/apimachinery/pkg/runtime/serializer/json" yamlutil "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/klog/v2" - - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - ntoconfig "github.com/openshift/cluster-node-tuning-operator/pkg/config" ) const ( @@ -23,6 +27,13 @@ const ( hypershiftNodeOwnerKindLabel = "cluster.x-k8s.io/owner-kind" hypershiftNodePoolLabel = "hypershift.openshift.io/nodePool" hypershiftNodePoolNameLabel = "hypershift.openshift.io/nodePoolName" + + tunedConfigMapLabel = "hypershift.openshift.io/tuned-config" + tunedConfigMapConfigKey = "tuned" + + operatorGeneratedMachineConfig = "hypershift.openshift.io/nto-generated-machine-config" + mcConfigMapDataKey = "config" + generatedConfigMapPrefix = "nto-mc-" ) // syncHostedClusterTuneds synchronizes Tuned objects embedded in ConfigMaps @@ -182,6 +193,10 @@ func parseTunedManifests(data []byte, nodePoolName string) ([]tunedv1.Tuned, err } } +func mcConfigMapName(name string) string { + return generatedConfigMapPrefix + name +} + func hashStruct(o interface{}) string { hash := fnv.New32a() hash.Write([]byte(fmt.Sprintf("%v", o))) @@ -199,3 +214,80 @@ func parseNamespacedName(namespacedName string) string { } return parts[0] } + +func getMachineConfigFromConfigMap(config *corev1.ConfigMap) (*mcfgv1.MachineConfig, error) { + scheme := runtime.NewScheme() + mcfgv1.Install(scheme) + + YamlSerializer := serializer.NewSerializerWithOptions( + serializer.DefaultMetaFactory, scheme, scheme, + serializer.SerializerOptions{Yaml: true, Pretty: true, Strict: true}, + ) + + manifest := []byte(config.Data[mcConfigMapDataKey]) + cr, _, err := YamlSerializer.Decode(manifest, nil, nil) + if err != nil { + return nil, fmt.Errorf("error decoding MachineConfig from ConfigMap: %s, %v", config.Name, err) + } + + mcObj, ok := cr.(*mcfgv1.MachineConfig) + if !ok { + return nil, fmt.Errorf("unexpected type in ConfigMap: %T, must be MachineConfig", cr) + } + return mcObj, nil +} + +func newConfigMapForMachineConfig(configMapName string, nodePoolName string, mc *mcfgv1.MachineConfig) (*corev1.ConfigMap, error) { + mcManifest, err := serializeMachineConfig(mc) + if err != nil { + return nil, fmt.Errorf("failed to serialize ConfigMap for MachineConfig %s: %v", mc.Name, err) + } + + ret := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: ntoconfig.OperatorNamespace(), + Labels: generatedConfigMapLabels(nodePoolName), + Annotations: map[string]string{ + GeneratedByControllerVersionAnnotationKey: version.Version, + }, + }, + Data: map[string]string{ + mcConfigMapDataKey: string(mcManifest), + }, + } + + return ret, nil +} + +func serializeMachineConfig(mc *mcfgv1.MachineConfig) ([]byte, error) { + scheme := runtime.NewScheme() + mcfgv1.Install(scheme) + + YamlSerializer := serializer.NewSerializerWithOptions( + serializer.DefaultMetaFactory, scheme, scheme, + serializer.SerializerOptions{Yaml: true, Pretty: true, Strict: true}, + ) + buff := bytes.Buffer{} + if err := YamlSerializer.Encode(mc, &buff); err != nil { + return nil, fmt.Errorf("failed to encode ConfigMap for MachineConfig %s: %v", mc.Name, err) + } + return buff.Bytes(), nil +} + +func generatedConfigMapLabels(nodePoolName string) map[string]string { + return map[string]string{ + operatorGeneratedMachineConfig: "true", + hypershiftNodePoolLabel: nodePoolName, + } +} + +func generatedConfigMapAnnotations(nodePoolName string) map[string]string { + return map[string]string{ + hypershiftNodePoolLabel: nodePoolName, + } +} diff --git a/pkg/operator/mc.go b/pkg/operator/mc.go index d7c70bad72..d5befd2265 100644 --- a/pkg/operator/mc.go +++ b/pkg/operator/mc.go @@ -55,6 +55,7 @@ func newMachineConfig(name string, annotations map[string]string, labels map[str return &mcfgv1.MachineConfig{ TypeMeta: metav1.TypeMeta{ APIVersion: mcfgv1.SchemeGroupVersion.String(), + Kind: "MachineConfig", }, ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -260,3 +261,24 @@ func (pc *ProfileCalculator) getPrimaryPoolForNode(node *corev1.Node) (*mcfgv1.M } return pools[0], nil } + +func machineConfigGenerationLogLine(bIgn, bCmdline bool, bootcmdline string) string { + var ( + sb strings.Builder + ) + + if bIgn { + sb.WriteString(" ignition") + if bCmdline { + sb.WriteString(" and") + } + } + + if bCmdline { + sb.WriteString(" kernel parameters: [") + sb.WriteString(bootcmdline) + sb.WriteString("]") + } + + return sb.String() +} diff --git a/pkg/operator/profilecalculator.go b/pkg/operator/profilecalculator.go index edb8d0f8c6..14c42a4cc6 100644 --- a/pkg/operator/profilecalculator.go +++ b/pkg/operator/profilecalculator.go @@ -249,14 +249,14 @@ func (pc *ProfileCalculator) calculateProfileHyperShift(nodeName string) (string for _, recommend := range tunedRecommend(tunedList) { // Start with node/pod label based matching if recommend.Match != nil && pc.profileMatches(recommend.Match, nodeName) { - klog.V(2).Infof("calculateProfileHyperShift: node / pod label matching used. node: %s, tunedProfileName: %s, nodePoolName: %s, operand: %v", nodeName, *recommend.Profile, "", recommend.Operand) + klog.V(3).Infof("calculateProfileHyperShift: node / pod label matching used for node: %s, tunedProfileName: %s, nodePoolName: %s, operand: %v", nodeName, *recommend.Profile, "", recommend.Operand) return *recommend.Profile, "", recommend.Operand, nil } // If recommend.Match is empty, NodePool based matching is assumed // or this is the default profile if recommend.Match == nil { - klog.V(2).Infof("calculateProfileHyperShift: NodePool based matching used. node: %s, tunedProfileName: %s, nodePoolName: %s", nodeName, *recommend.Profile, nodePoolName) + klog.V(3).Infof("calculateProfileHyperShift: NodePool based matching or default profile used for node: %s, tunedProfileName: %s, nodePoolName: %s", nodeName, *recommend.Profile, nodePoolName) return *recommend.Profile, nodePoolName, recommend.Operand, nil } } @@ -519,7 +519,7 @@ func (pc *ProfileCalculator) tunedsUsePodLabels(tunedSlice []*tunedv1.Tuned) boo // getNodePoolNameForNode returns the NodePool name from a label on the hosted cluster Node func (pc *ProfileCalculator) getNodePoolNameForNode(node *corev1.Node) (string, error) { nodePoolName := node.GetLabels()[hypershiftNodePoolLabel] - klog.Infof("calculated nodePoolName: %s for node %s", nodePoolName, node.Name) + klog.V(3).Infof("calculated nodePoolName: %s for node %s", nodePoolName, node.Name) return nodePoolName, nil } diff --git a/pkg/util/strings_map.go b/pkg/util/strings_map.go index 250ad0b0ce..8b0aaa0d6e 100644 --- a/pkg/util/strings_map.go +++ b/pkg/util/strings_map.go @@ -26,3 +26,20 @@ func MapOfStringsEqual(a, b map[string]string) bool { return true } + +// MapOfStringsContains returns true if map of strings a contains all +// entries of the map of strings b. Use MapOfStringsEqual for checking +// if two maps of strings are equal. +// For example +// MapOfStringsContains(map[string]string{"a": "a","b": "b"}, map[string]string{"a": "a"}) +// will return true, but +// MapOfStringsContains(map[string]string{"a": "a"}, map[string]string{"a": "a","b": "b"}) +// will return false. +func MapOfStringsContains(a, b map[string]string) bool { + for k, v := range b { + if w, ok := a[k]; !ok || v != w { + return false + } + } + return true +}