diff --git a/bootstrap/bootstrap-pod.yaml b/bootstrap/bootstrap-pod.yaml index 328829f77..62af10306 100644 --- a/bootstrap/bootstrap-pod.yaml +++ b/bootstrap/bootstrap-pod.yaml @@ -16,7 +16,7 @@ spec: - "--enable-auto-update=false" - "--enable-default-cluster-version=false" - "--listen=" - - "--v=5" + - "--v=2" - "--kubeconfig=/etc/kubernetes/kubeconfig" securityContext: privileged: true diff --git a/install/0000_00_cluster-version-operator_03_deployment.yaml b/install/0000_00_cluster-version-operator_03_deployment.yaml index f197ba956..570feb012 100644 --- a/install/0000_00_cluster-version-operator_03_deployment.yaml +++ b/install/0000_00_cluster-version-operator_03_deployment.yaml @@ -34,7 +34,7 @@ spec: - "--listen=0.0.0.0:9099" - "--serving-cert-file=/etc/tls/serving-cert/tls.crt" - "--serving-key-file=/etc/tls/serving-cert/tls.key" - - "--v=5" + - "--v=2" resources: requests: cpu: 20m diff --git a/lib/resourceapply/apiext.go b/lib/resourceapply/apiext.go index 93e0068df..13d6f6b9d 100644 --- a/lib/resourceapply/apiext.go +++ b/lib/resourceapply/apiext.go @@ -35,7 +35,7 @@ func ApplyCustomResourceDefinitionv1(ctx context.Context, client apiextclientv1. } if reconciling { - klog.V(4).Infof("Updating CRD %s due to diff: %v", required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating CRD %s due to diff: %v", required.Name, cmp.Diff(existing, required)) } actual, err := client.CustomResourceDefinitions().Update(ctx, existing, metav1.UpdateOptions{}) diff --git a/lib/resourceapply/apps.go b/lib/resourceapply/apps.go index a4ec9dc45..32fcd26eb 100644 --- a/lib/resourceapply/apps.go +++ b/lib/resourceapply/apps.go @@ -35,7 +35,7 @@ func ApplyDeploymentv1(ctx context.Context, client appsclientv1.DeploymentsGette return existing, false, nil } if reconciling { - klog.V(4).Infof("Updating Deployment %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating Deployment %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) } actual, err := client.Deployments(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{}) @@ -65,7 +65,7 @@ func ApplyDaemonSetv1(ctx context.Context, client appsclientv1.DaemonSetsGetter, } if reconciling { - klog.V(4).Infof("Updating DaemonSet %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating DaemonSet %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) } actual, err := client.DaemonSets(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{}) diff --git a/lib/resourceapply/batch.go b/lib/resourceapply/batch.go index 16d378eec..b013c3cc5 100644 --- a/lib/resourceapply/batch.go +++ b/lib/resourceapply/batch.go @@ -36,7 +36,7 @@ func ApplyJobv1(ctx context.Context, client batchclientv1.JobsGetter, required * } if reconciling { - klog.V(4).Infof("Updating Job %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating Job %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) } actual, err := client.Jobs(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{}) diff --git a/lib/resourceapply/core.go b/lib/resourceapply/core.go index 2ed75c31b..aad34657e 100644 --- a/lib/resourceapply/core.go +++ b/lib/resourceapply/core.go @@ -38,7 +38,7 @@ func ApplyNamespacev1(ctx context.Context, client coreclientv1.NamespacesGetter, return existing, false, nil } if reconciling { - klog.V(4).Infof("Updating Namespace %s due to diff: %v", required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating Namespace %s due to diff: %v", required.Name, cmp.Diff(existing, required)) } actual, err := client.Namespaces().Update(ctx, existing, metav1.UpdateOptions{}) @@ -75,7 +75,7 @@ func ApplyServicev1(ctx context.Context, client coreclientv1.ServicesGetter, req existing.Spec.Selector = required.Spec.Selector if reconciling { - klog.V(4).Infof("Updating Service %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating Service %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) } actual, err := client.Services(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{}) @@ -105,7 +105,7 @@ func ApplyServiceAccountv1(ctx context.Context, client coreclientv1.ServiceAccou } if reconciling { - klog.V(4).Infof("Updating ServiceAccount %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating ServiceAccount %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) } actual, err := client.ServiceAccounts(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{}) @@ -135,7 +135,7 @@ func ApplyConfigMapv1(ctx context.Context, client coreclientv1.ConfigMapsGetter, } if reconciling { - klog.V(4).Infof("Updating ConfigMap %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating ConfigMap %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) } actual, err := client.ConfigMaps(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{}) diff --git a/lib/resourceapply/cv.go b/lib/resourceapply/cv.go index 8f4edb6af..62a2a4c08 100644 --- a/lib/resourceapply/cv.go +++ b/lib/resourceapply/cv.go @@ -36,7 +36,7 @@ func ApplyClusterVersionFromCache(ctx context.Context, lister configlistersv1.Cl return existing, false, nil } - klog.V(4).Infof("Updating ClusterVersion %s due to diff: %v", required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating ClusterVersion %s due to diff: %v", required.Name, cmp.Diff(existing, required)) actual, err := client.ClusterVersions().Update(ctx, existing, metav1.UpdateOptions{}) return actual, true, err diff --git a/lib/resourceapply/imagestream.go b/lib/resourceapply/imagestream.go index 9a5113c07..a00edcfb0 100644 --- a/lib/resourceapply/imagestream.go +++ b/lib/resourceapply/imagestream.go @@ -34,7 +34,7 @@ func ApplyImageStreamv1(ctx context.Context, client imageclientv1.ImageStreamsGe } if reconciling { - klog.V(4).Infof("Updating Namespace %s due to diff: %v", required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating Namespace %s due to diff: %v", required.Name, cmp.Diff(existing, required)) } actual, err := client.ImageStreams(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{}) return actual, true, err diff --git a/lib/resourceapply/rbac.go b/lib/resourceapply/rbac.go index 4f7fd184f..2e1a141c5 100644 --- a/lib/resourceapply/rbac.go +++ b/lib/resourceapply/rbac.go @@ -35,7 +35,7 @@ func ApplyClusterRoleBindingv1(ctx context.Context, client rbacclientv1.ClusterR return existing, false, nil } if reconciling { - klog.V(4).Infof("Updating ClusterRoleBinding %s due to diff: %v", required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating ClusterRoleBinding %s due to diff: %v", required.Name, cmp.Diff(existing, required)) } actual, err := client.ClusterRoleBindings().Update(ctx, existing, metav1.UpdateOptions{}) @@ -64,7 +64,7 @@ func ApplyClusterRolev1(ctx context.Context, client rbacclientv1.ClusterRolesGet return existing, false, nil } if reconciling { - klog.V(4).Infof("Updating ClusterRole %s due to diff: %v", required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating ClusterRole %s due to diff: %v", required.Name, cmp.Diff(existing, required)) } actual, err := client.ClusterRoles().Update(ctx, existing, metav1.UpdateOptions{}) @@ -93,7 +93,7 @@ func ApplyRoleBindingv1(ctx context.Context, client rbacclientv1.RoleBindingsGet return existing, false, nil } if reconciling { - klog.V(4).Infof("Updating RoleBinding %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating RoleBinding %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) } actual, err := client.RoleBindings(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{}) @@ -122,7 +122,7 @@ func ApplyRolev1(ctx context.Context, client rbacclientv1.RolesGetter, required return existing, false, nil } if reconciling { - klog.V(4).Infof("Updating Role %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating Role %s/%s due to diff: %v", required.Namespace, required.Name, cmp.Diff(existing, required)) } actual, err := client.Roles(required.Namespace).Update(ctx, existing, metav1.UpdateOptions{}) diff --git a/lib/resourceapply/security.go b/lib/resourceapply/security.go index c4b2b54cc..ec839e6ba 100644 --- a/lib/resourceapply/security.go +++ b/lib/resourceapply/security.go @@ -36,7 +36,7 @@ func ApplySecurityContextConstraintsv1(ctx context.Context, client securityclien } if reconciling { - klog.V(4).Infof("Updating SCC %s due to diff: %v", required.Name, cmp.Diff(existing, required)) + klog.V(2).Infof("Updating SCC %s due to diff: %v", required.Name, cmp.Diff(existing, required)) } actual, err := client.SecurityContextConstraints().Update(ctx, existing, metav1.UpdateOptions{}) diff --git a/lib/resourcebuilder/batch.go b/lib/resourcebuilder/batch.go index ee6c6a241..95ed3f42f 100644 --- a/lib/resourcebuilder/batch.go +++ b/lib/resourcebuilder/batch.go @@ -19,7 +19,7 @@ func WaitForJobCompletion(ctx context.Context, client batchclientv1.JobsGetter, klog.Error(err) return false, nil } else if !done { - klog.V(4).Infof("Job %s in namespace %s is not ready, continuing to wait.", job.ObjectMeta.Name, job.ObjectMeta.Namespace) + klog.V(2).Infof("Job %s in namespace %s is not ready, continuing to wait.", job.ObjectMeta.Name, job.ObjectMeta.Namespace) return false, nil } return true, nil @@ -50,7 +50,7 @@ func checkJobHealth(ctx context.Context, client batchclientv1.JobsGetter, job *b // the Job will 'Active == 0' if and only if it exceeds the deadline or if the update image could not be pulled. // Failed jobs will be recreated in the next run. if j.Status.Active == 0 { - klog.V(4).Infof("No active pods for job %s in namespace %s", job.Name, job.Namespace) + klog.V(2).Infof("No active pods for job %s in namespace %s", job.Name, job.Namespace) failed, reason, message := hasJobFailed(job) // If there is more than one failed job pod then get the cause for failure if j.Status.Failed > 0 { @@ -68,7 +68,7 @@ func checkJobHealth(ctx context.Context, client batchclientv1.JobsGetter, job *b if reason == "DeadlineExceeded" { return false, fmt.Errorf("deadline exceeded, reason: %q, message: %q", reason, message) } else { - klog.V(4).Infof("Ignoring job %s in namespace %s with condition Failed=True because %s: %s", job.Name, job.Namespace, reason, message) + klog.V(2).Infof("Ignoring job %s in namespace %s with condition Failed=True because %s: %s", job.Name, job.Namespace, reason, message) } } } diff --git a/lib/resourcedelete/helper.go b/lib/resourcedelete/helper.go index a0c312f25..5508b67b7 100644 --- a/lib/resourcedelete/helper.go +++ b/lib/resourcedelete/helper.go @@ -62,7 +62,7 @@ func SetDeleteRequested(obj metav1.Object, resource Resource) { deletedResources.lock.Lock() deletedResources.m[resource] = times deletedResources.lock.Unlock() - klog.V(4).Infof("Delete requested for %s.", resource) + klog.V(2).Infof("Delete requested for %s.", resource) } // SetDeleteVerified updates map entry to indicate resource deletion has been completed. @@ -73,7 +73,7 @@ func SetDeleteVerified(resource Resource) { deletedResources.lock.Lock() deletedResources.m[resource] = times deletedResources.lock.Unlock() - klog.V(4).Infof("Delete of %s completed.", resource) + klog.V(2).Infof("Delete of %s completed.", resource) } // getDeleteTimes returns map entry for given resource. @@ -111,9 +111,9 @@ func GetDeleteProgress(resource Resource, getError error) (bool, error) { SetDeleteVerified(resource) } else { if deletionTimes.Expected != nil { - klog.V(4).Infof("Delete of %s is expected by %s.", resource, deletionTimes.Expected.String()) + klog.V(2).Infof("Delete of %s is expected by %s.", resource, deletionTimes.Expected.String()) } else { - klog.V(4).Infof("Delete of %s has already been requested.", resource) + klog.V(2).Infof("Delete of %s has already been requested.", resource) } } } diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go index 5b752863b..2a8ce8cf2 100644 --- a/pkg/autoupdate/autoupdate.go +++ b/pkg/autoupdate/autoupdate.go @@ -153,9 +153,9 @@ func (ctrl *Controller) handleErr(err error, key interface{}) { func (ctrl *Controller) sync(ctx context.Context, key string) error { startTime := time.Now() - klog.V(4).Infof("Started syncing auto-updates %q (%v)", key, startTime) + klog.V(2).Infof("Started syncing auto-updates %q (%v)", key, startTime) defer func() { - klog.V(4).Infof("Finished syncing auto-updates %q (%v)", key, time.Since(startTime)) + klog.V(2).Infof("Finished syncing auto-updates %q (%v)", key, time.Since(startTime)) }() clusterversion, err := ctrl.cvLister.Get(ctrl.name) diff --git a/pkg/cincinnati/cincinnati.go b/pkg/cincinnati/cincinnati.go index fb3abfea9..53f43380d 100644 --- a/pkg/cincinnati/cincinnati.go +++ b/pkg/cincinnati/cincinnati.go @@ -84,16 +84,16 @@ func (c Client) GetUpdates(ctx context.Context, uri *url.URL, arch string, chann req.Header.Add("Accept", GraphMediaType) if c.transport != nil && c.transport.TLSClientConfig != nil { if c.transport.TLSClientConfig.ClientCAs == nil { - klog.V(5).Infof("Using a root CA pool with 0 root CA subjects to request updates from %s", uri) + klog.V(2).Infof("Using a root CA pool with 0 root CA subjects to request updates from %s", uri) } else { - klog.V(5).Infof("Using a root CA pool with %n root CA subjects to request updates from %s", len(c.transport.TLSClientConfig.RootCAs.Subjects()), uri) + klog.V(2).Infof("Using a root CA pool with %n root CA subjects to request updates from %s", len(c.transport.TLSClientConfig.RootCAs.Subjects()), uri) } } if c.transport != nil && c.transport.Proxy != nil { proxy, err := c.transport.Proxy(req) if err == nil && proxy != nil { - klog.V(5).Infof("Using proxy %s to request updates from %s", proxy.Host, uri) + klog.V(2).Infof("Using proxy %s to request updates from %s", proxy.Host, uri) } } diff --git a/pkg/clusterconditions/cache/cache.go b/pkg/clusterconditions/cache/cache.go index 393ef0659..ec772d2b2 100644 --- a/pkg/clusterconditions/cache/cache.go +++ b/pkg/clusterconditions/cache/cache.go @@ -116,7 +116,7 @@ func (c *Cache) Match(ctx context.Context, condition *configv1.ClusterCondition) if thiefResult, ok := c.MatchResults[thiefKey]; ok { detail = fmt.Sprintf(" (last evaluated on %s)", thiefResult.When) } - klog.V(4).Infof("%s is the most stale cached cluster-condition match entry, but it is too fresh%s. However, we don't have a cached evaluation for %s, so attempt to evaluate that now.", thiefKey, detail, key) + klog.V(2).Infof("%s is the most stale cached cluster-condition match entry, but it is too fresh%s. However, we don't have a cached evaluation for %s, so attempt to evaluate that now.", thiefKey, detail, key) } // if we ended up stealing this Match call, log that, to make contention more clear @@ -127,7 +127,7 @@ func (c *Cache) Match(ctx context.Context, condition *configv1.ClusterCondition) } else { reason = fmt.Sprintf("its last evaluation completed %s ago", now.Sub(thiefResult.When)) } - klog.V(4).Infof("%s is stealing this cluster-condition match call for %s, because %s", thiefKey, key, reason) + klog.V(2).Infof("%s is stealing this cluster-condition match call for %s, because %s", thiefKey, key, reason) } match, err := c.Condition.Match(ctx, targetCondition) @@ -160,7 +160,7 @@ func (c *Cache) expireStaleMatchResults(ctx context.Context, now time.Time) { aspect = "queued request" } if age > c.Expiration { - klog.V(4).Infof("pruning %q from the condition cache, as the %s is %s old", key, aspect, age) + klog.V(2).Infof("pruning %q from the condition cache, as the %s is %s old", key, aspect, age) delete(c.MatchResults, key) } } diff --git a/pkg/clusterconditions/clusterconditions.go b/pkg/clusterconditions/clusterconditions.go index 4581ff052..e532547cb 100644 --- a/pkg/clusterconditions/clusterconditions.go +++ b/pkg/clusterconditions/clusterconditions.go @@ -69,7 +69,7 @@ func Match(ctx context.Context, matchingRules []configv1.ClusterCondition) (bool for _, config := range matchingRules { condition, ok := Registry[config.Type] if !ok { - klog.V(4).Infof("Skipping unrecognized cluster condition type %q", config.Type) + klog.V(2).Infof("Skipping unrecognized cluster condition type %q", config.Type) continue } match, err := condition.Match(ctx, &config) diff --git a/pkg/clusterconditions/promql/promql.go b/pkg/clusterconditions/promql/promql.go index 1e0359c48..b4c949d7f 100644 --- a/pkg/clusterconditions/promql/promql.go +++ b/pkg/clusterconditions/promql/promql.go @@ -79,7 +79,7 @@ func (p *PromQL) Match(ctx context.Context, condition *configv1.ClusterCondition } v1api := prometheusv1.NewAPI(client) - klog.V(4).Infof("evaluate %s cluster condition: %q", condition.Type, condition.PromQL.PromQL) + klog.V(2).Infof("evaluate %s cluster condition: %q", condition.Type, condition.PromQL.PromQL) result, warnings, err := v1api.Query(ctx, condition.PromQL.PromQL, time.Now()) if err != nil { return false, fmt.Errorf("executing PromQL query: %w", err) diff --git a/pkg/cvo/availableupdates.go b/pkg/cvo/availableupdates.go index ecca74a07..d897e1bfd 100644 --- a/pkg/cvo/availableupdates.go +++ b/pkg/cvo/availableupdates.go @@ -41,16 +41,16 @@ func (optr *Operator) syncAvailableUpdates(ctx context.Context, config *configv1 // updates are only checked at most once per minimumUpdateCheckInterval or if the generation changes u := optr.getAvailableUpdates() if u == nil { - klog.V(4).Info("First attempt to retrieve available updates") + klog.V(2).Info("First attempt to retrieve available updates") } else if !u.RecentlyChanged(optr.minimumUpdateCheckInterval) { - klog.V(4).Infof("Retrieving available updates again, because more than %s has elapsed since %s", optr.minimumUpdateCheckInterval, u.LastAttempt) + klog.V(2).Infof("Retrieving available updates again, because more than %s has elapsed since %s", optr.minimumUpdateCheckInterval, u.LastAttempt) } else if channel != u.Channel { - klog.V(4).Infof("Retrieving available updates again, because the channel has changed from %q to %q", u.Channel, channel) + klog.V(2).Infof("Retrieving available updates again, because the channel has changed from %q to %q", u.Channel, channel) } else if upstream == u.Upstream || (upstream == optr.defaultUpstreamServer && u.Upstream == "") { - klog.V(4).Infof("Available updates were recently retrieved, with less than %s elapsed since %s, will try later.", optr.minimumUpdateCheckInterval, u.LastAttempt) + klog.V(2).Infof("Available updates were recently retrieved, with less than %s elapsed since %s, will try later.", optr.minimumUpdateCheckInterval, u.LastAttempt) return nil } else { - klog.V(4).Infof("Retrieving available updates again, because the upstream has changed from %q to %q", u.Upstream, config.Spec.Upstream) + klog.V(2).Infof("Retrieving available updates again, because the upstream has changed from %q to %q", u.Upstream, config.Spec.Upstream) } transport, err := optr.getTransport() diff --git a/pkg/cvo/cvo.go b/pkg/cvo/cvo.go index ad5e1615c..6d7f0cfef 100644 --- a/pkg/cvo/cvo.go +++ b/pkg/cvo/cvo.go @@ -511,9 +511,9 @@ func handleErr(ctx context.Context, queue workqueue.RateLimitingInterface, err e // It returns an error if it could not update the cluster version object. func (optr *Operator) sync(ctx context.Context, key string) error { startTime := time.Now() - klog.V(4).Infof("Started syncing cluster version %q (%v)", key, startTime) + klog.V(2).Infof("Started syncing cluster version %q (%v)", key, startTime) defer func() { - klog.V(4).Infof("Finished syncing cluster version %q (%v)", key, time.Since(startTime)) + klog.V(2).Infof("Finished syncing cluster version %q (%v)", key, time.Since(startTime)) }() // ensure the cluster version exists, that the object is valid, and that @@ -523,11 +523,11 @@ func (optr *Operator) sync(ctx context.Context, key string) error { return err } if changed { - klog.V(4).Infof("Cluster version changed, waiting for newer event") + klog.V(2).Infof("Cluster version changed, waiting for newer event") return nil } if original == nil { - klog.V(4).Infof("No ClusterVersion object and defaulting not enabled, waiting for one") + klog.V(2).Infof("No ClusterVersion object and defaulting not enabled, waiting for one") return nil } @@ -542,14 +542,14 @@ func (optr *Operator) sync(ctx context.Context, key string) error { // identify the desired next version desired, ok := findUpdateFromConfig(config) if ok { - klog.V(4).Infof("Desired version from spec is %#v", desired) + klog.V(2).Infof("Desired version from spec is %#v", desired) } else { currentVersion := optr.currentVersion() desired = configv1.Update{ Version: currentVersion.Version, Image: currentVersion.Image, } - klog.V(4).Infof("Desired version from operator is %#v", desired) + klog.V(2).Infof("Desired version from operator is %#v", desired) } // handle the case of a misconfigured CVO by doing nothing @@ -584,9 +584,9 @@ func (optr *Operator) sync(ctx context.Context, key string) error { // sync available updates. It only modifies cluster version. func (optr *Operator) availableUpdatesSync(ctx context.Context, key string) error { startTime := time.Now() - klog.V(4).Infof("Started syncing available updates %q (%v)", key, startTime) + klog.V(2).Infof("Started syncing available updates %q (%v)", key, startTime) defer func() { - klog.V(4).Infof("Finished syncing available updates %q (%v)", key, time.Since(startTime)) + klog.V(2).Infof("Finished syncing available updates %q (%v)", key, time.Since(startTime)) }() config, err := optr.cvLister.Get(optr.name) @@ -603,9 +603,9 @@ func (optr *Operator) availableUpdatesSync(ctx context.Context, key string) erro // sync upgradeableCondition. It only modifies cluster version. func (optr *Operator) upgradeableSync(ctx context.Context, key string) error { startTime := time.Now() - klog.V(4).Infof("Started syncing upgradeable %q (%v)", key, startTime) + klog.V(2).Infof("Started syncing upgradeable %q (%v)", key, startTime) defer func() { - klog.V(4).Infof("Finished syncing upgradeable %q (%v)", key, time.Since(startTime)) + klog.V(2).Infof("Finished syncing upgradeable %q (%v)", key, time.Since(startTime)) }() config, err := optr.cvLister.Get(optr.name) diff --git a/pkg/cvo/cvo_test.go b/pkg/cvo/cvo_test.go index 9f7b287f4..6342326d5 100644 --- a/pkg/cvo/cvo_test.go +++ b/pkg/cvo/cvo_test.go @@ -4011,7 +4011,7 @@ func fakeClientsetWithUpdates(obj *configv1.ClusterVersion) *fake.Clientset { obj.Status = update.Status rv, _ := strconv.Atoi(update.ResourceVersion) obj.ResourceVersion = strconv.Itoa(rv + 1) - klog.V(5).Infof("updated object to %#v", obj) + klog.V(2).Infof("updated object to %#v", obj) return true, obj.DeepCopy(), nil } return false, nil, fmt.Errorf("unrecognized") diff --git a/pkg/cvo/internal/generic.go b/pkg/cvo/internal/generic.go index 177b3f19e..72e9e09a1 100644 --- a/pkg/cvo/internal/generic.go +++ b/pkg/cvo/internal/generic.go @@ -106,7 +106,7 @@ func applyUnstructured(ctx context.Context, client dynamic.ResourceInterface, re existing.SetOwnerReferences(required.GetOwnerReferences()) if reconciling { - klog.V(4).Infof("Updating %s %s/%s due to diff: %v", required.GetKind(), required.GetNamespace(), required.GetName(), objDiff) + klog.V(2).Infof("Updating %s %s/%s due to diff: %v", required.GetKind(), required.GetNamespace(), required.GetName(), objDiff) } actual, err := client.Update(ctx, existing, metav1.UpdateOptions{}) diff --git a/pkg/cvo/metrics.go b/pkg/cvo/metrics.go index 61413fbe0..e8780f345 100644 --- a/pkg/cvo/metrics.go +++ b/pkg/cvo/metrics.go @@ -323,7 +323,7 @@ func (m *operatorMetrics) Collect(ch chan<- prometheus.Metric) { for _, condition := range cv.Status.Conditions { if condition.Status != configv1.ConditionFalse && condition.Status != configv1.ConditionTrue { - klog.V(4).Infof("skipping metrics for ClusterVersion condition %s=%s (neither True nor False)", condition.Type, condition.Status) + klog.V(2).Infof("skipping metrics for ClusterVersion condition %s=%s (neither True nor False)", condition.Type, condition.Status) continue } g := m.clusterOperatorConditions.WithLabelValues("version", string(condition.Type), string(condition.Reason)) @@ -355,7 +355,7 @@ func (m *operatorMetrics) Collect(ch chan<- prometheus.Metric) { } } if version == "" { - klog.V(4).Infof("ClusterOperator %s is not setting the 'operator' version", op.Name) + klog.V(2).Infof("ClusterOperator %s is not setting the 'operator' version", op.Name) } g := m.clusterOperatorUp.WithLabelValues(op.Name, version) if resourcemerge.IsOperatorStatusConditionTrue(op.Status.Conditions, configv1.OperatorAvailable) { @@ -366,7 +366,7 @@ func (m *operatorMetrics) Collect(ch chan<- prometheus.Metric) { ch <- g for _, condition := range op.Status.Conditions { if condition.Status != configv1.ConditionFalse && condition.Status != configv1.ConditionTrue { - klog.V(4).Infof("skipping metrics for %s ClusterOperator condition %s=%s (neither True nor False)", op.Name, condition.Type, condition.Status) + klog.V(2).Infof("skipping metrics for %s ClusterOperator condition %s=%s (neither True nor False)", op.Name, condition.Type, condition.Status) continue } g := m.clusterOperatorConditions.WithLabelValues(op.Name, string(condition.Type), string(condition.Reason)) diff --git a/pkg/cvo/status.go b/pkg/cvo/status.go index e2b24dd9e..b96894edc 100644 --- a/pkg/cvo/status.go +++ b/pkg/cvo/status.go @@ -61,7 +61,7 @@ func mergeOperatorHistory(config *configv1.ClusterVersion, desired configv1.Rele } if len(config.Status.History) == 0 { - klog.V(5).Infof("initialize new history completed=%t desired=%#v", completed, desired) + klog.V(2).Infof("initialize new history completed=%t desired=%#v", completed, desired) config.Status.History = append(config.Status.History, configv1.UpdateHistory{ Version: desired.Version, Image: desired.Image, @@ -78,7 +78,7 @@ func mergeOperatorHistory(config *configv1.ClusterVersion, desired configv1.Rele } if mergeEqualVersions(last, desired) { - klog.V(5).Infof("merge into existing history completed=%t desired=%#v last=%#v", completed, desired, last) + klog.V(2).Infof("merge into existing history completed=%t desired=%#v last=%#v", completed, desired, last) if completed { last.State = configv1.CompletedUpdate if last.CompletionTime == nil { @@ -86,7 +86,7 @@ func mergeOperatorHistory(config *configv1.ClusterVersion, desired configv1.Rele } } } else { - klog.V(5).Infof("must add a new history entry completed=%t desired=%#v != last=%#v", completed, desired, last) + klog.V(2).Infof("must add a new history entry completed=%t desired=%#v != last=%#v", completed, desired, last) if last.CompletionTime == nil { last.CompletionTime = &now } @@ -115,7 +115,7 @@ func mergeOperatorHistory(config *configv1.ClusterVersion, desired configv1.Rele } // leave this here in case we find other future history bugs and need to debug it - if klog.V(5).Enabled() && len(config.Status.History) > 1 { + if klog.V(2).Enabled() && len(config.Status.History) > 1 { if config.Status.History[0].Image == config.Status.History[1].Image && config.Status.History[0].Version == config.Status.History[1].Version { data, _ := json.MarshalIndent(config.Status.History, "", " ") panic(fmt.Errorf("tried to update cluster version history to contain duplicate image entries: %s", string(data))) @@ -158,7 +158,7 @@ const ClusterVersionInvalid configv1.ClusterStatusConditionType = "Invalid" // syncStatus calculates the new status of the ClusterVersion based on the current sync state and any // validation errors found. We allow the caller to pass the original object to avoid DeepCopying twice. func (optr *Operator) syncStatus(ctx context.Context, original, config *configv1.ClusterVersion, status *SyncWorkerStatus, validationErrs field.ErrorList) error { - klog.V(5).Infof("Synchronizing errs=%#v status=%#v", validationErrs, status) + klog.V(2).Infof("Synchronizing errs=%#v status=%#v", validationErrs, status) cvUpdated := false // update the config with the latest available updates diff --git a/pkg/cvo/sync_worker.go b/pkg/cvo/sync_worker.go index 6e9f4a9bc..019f11049 100644 --- a/pkg/cvo/sync_worker.go +++ b/pkg/cvo/sync_worker.go @@ -221,12 +221,12 @@ func (w *SyncWorker) Update(generation int64, desired configv1.Update, overrides } if work.Empty() { - klog.V(5).Info("Update work has no release image; ignoring requested change") + klog.V(2).Info("Update work has no release image; ignoring requested change") return w.status.DeepCopy() } if equalSyncWork(w.work, work, state) { - klog.V(5).Info("Update work is equal to current target; no change required") + klog.V(2).Info("Update work is equal to current target; no change required") return w.status.DeepCopy() } @@ -247,15 +247,15 @@ func (w *SyncWorker) Update(generation int64, desired configv1.Update, overrides // notify the sync loop that we changed config w.work = work if w.cancelFn != nil { - klog.V(5).Info("Cancel the sync worker's current loop") + klog.V(2).Info("Cancel the sync worker's current loop") w.cancelFn() w.cancelFn = nil } select { case w.notify <- struct{}{}: - klog.V(5).Info("Notify the sync worker that new work is available") + klog.V(2).Info("Notify the sync worker that new work is available") default: - klog.V(5).Info("The sync worker has already been notified that new work is available") + klog.V(2).Info("The sync worker has already been notified that new work is available") } return w.status.DeepCopy() @@ -265,7 +265,7 @@ func (w *SyncWorker) Update(generation int64, desired configv1.Update, overrides // It is edge-triggered when Update() is invoked and level-driven after the // syncOnce() has succeeded for a given input (we are said to be "reconciling"). func (w *SyncWorker) Start(ctx context.Context, maxWorkers int, cvoOptrName string, lister configlistersv1.ClusterVersionLister) { - klog.V(5).Infof("Starting sync worker") + klog.V(2).Infof("Starting sync worker") work := &SyncWork{} @@ -278,26 +278,26 @@ func (w *SyncWorker) Start(ctx context.Context, maxWorkers int, cvoOptrName stri waitingToReconcile := work.State == payload.ReconcilingPayload select { case <-ctx.Done(): - klog.V(5).Infof("Stopped worker") + klog.V(2).Infof("Stopped worker") return case <-next: waitingToReconcile = false - klog.V(5).Infof("Wait finished") + klog.V(2).Infof("Wait finished") case <-w.notify: - klog.V(5).Infof("Work updated") + klog.V(2).Infof("Work updated") } // determine whether we need to do work changed := w.calculateNext(work) if !changed && waitingToReconcile { - klog.V(5).Infof("No change, waiting") + klog.V(2).Infof("No change, waiting") continue } // until Update() has been called at least once, we do nothing if work.Empty() { next = time.After(w.minimumReconcileInterval) - klog.V(5).Infof("No work, waiting") + klog.V(2).Infof("No work, waiting") continue } @@ -336,7 +336,7 @@ func (w *SyncWorker) Start(ctx context.Context, maxWorkers int, cvoOptrName stri // reporter hides status updates that occur earlier than the previous failure, // so that we don't fail, then immediately start reporting an earlier status reporter := &statusWrapper{w: w, previousStatus: w.Status()} - klog.V(5).Infof("Previous sync status: %#v", reporter.previousStatus) + klog.V(2).Infof("Previous sync status: %#v", reporter.previousStatus) return w.syncOnce(ctx, work, maxWorkers, reporter, config) }() if err != nil { @@ -357,7 +357,7 @@ func (w *SyncWorker) Start(ctx context.Context, maxWorkers int, cvoOptrName stri continue } if work.State != payload.ReconcilingPayload { - klog.V(4).Infof("Sync succeeded, transitioning from %s to %s", work.State, payload.ReconcilingPayload) + klog.V(2).Infof("Sync succeeded, transitioning from %s to %s", work.State, payload.ReconcilingPayload) } work.Completed++ @@ -367,7 +367,7 @@ func (w *SyncWorker) Start(ctx context.Context, maxWorkers int, cvoOptrName stri } }, 10*time.Millisecond, ctx.Done()) - klog.V(5).Infof("Worker shut down") + klog.V(2).Infof("Worker shut down") } // statusWrapper prevents a newer status update from overwriting a previous @@ -390,7 +390,7 @@ func (w *statusWrapper) Report(status SyncWorkerStatus) { if p.Failure != nil && status.Failure == nil { if p.Actual.Image == status.Actual.Image { if fractionComplete < previousFractionComplete { - klog.V(5).Infof("Dropping status report from earlier in sync loop") + klog.V(2).Infof("Dropping status report from earlier in sync loop") return } } @@ -494,7 +494,7 @@ func equalSyncWork(a, b *SyncWork, state payload.State) bool { klog.Warningf("Ignoring detected %s during payload initialization", detected) return true } - klog.V(5).Infof("Detected %s", detected) + klog.V(2).Infof("Detected %s", detected) return false } return true @@ -543,7 +543,7 @@ func (w *SyncWorker) syncOnce(ctx context.Context, work *SyncWork, maxWorkers in Version: work.Desired.Version, Image: work.Desired.Image, } - klog.V(4).Infof("Running sync %s (force=%t) on generation %d in state %s at attempt %d", versionString(desired), work.Desired.Force, work.Generation, work.State, work.Attempt) + klog.V(2).Infof("Running sync %s (force=%t) on generation %d in state %s at attempt %d", versionString(desired), work.Desired.Force, work.Generation, work.State, work.Attempt) if work.Attempt == 0 { payload.InitCOUpdateStartTimes() @@ -555,7 +555,7 @@ func (w *SyncWorker) syncOnce(ctx context.Context, work *SyncWork, maxWorkers in // possibly complain here if Version, etc. diverges from the payload content desired = validPayload.Release } else if validPayload == nil || !equalUpdate(configv1.Update{Image: validPayload.Release.Image}, configv1.Update{Image: desired.Image}) { - klog.V(4).Infof("Loading payload") + klog.V(2).Infof("Loading payload") cvoObjectRef := &corev1.ObjectReference{APIVersion: "config.openshift.io/v1", Kind: "ClusterVersion", Name: "version", Namespace: "openshift-cluster-version"} w.eventRecorder.Eventf(cvoObjectRef, corev1.EventTypeNormal, "RetrievePayload", "retrieving payload version=%q image=%q", desired.Version, desired.Image) reporter.Report(SyncWorkerStatus{ @@ -618,9 +618,9 @@ func (w *SyncWorker) syncOnce(ctx context.Context, work *SyncWork, maxWorkers in // need to make sure the payload is only set when the preconditions have been successful if len(w.preconditions) == 0 { - klog.V(4).Info("No preconditions configured.") + klog.V(2).Info("No preconditions configured.") } else if info.Local { - klog.V(4).Info("Skipping preconditions for a local operator image payload.") + klog.V(2).Info("Skipping preconditions for a local operator image payload.") } else { reporter.Report(SyncWorkerStatus{ Generation: work.Generation, @@ -633,7 +633,7 @@ func (w *SyncWorker) syncOnce(ctx context.Context, work *SyncWork, maxWorkers in if block, err := precondition.Summarize(w.preconditions.RunAll(ctx, precondition.ReleaseContext{ DesiredVersion: payloadUpdate.Release.Version, }), work.Desired.Force); err != nil { - klog.V(4).Infof("Precondition error (force %t, block %t): %v", work.Desired.Force, block, err) + klog.V(2).Infof("Precondition error (force %t, block %t): %v", work.Desired.Force, block, err) if block { w.eventRecorder.Eventf(cvoObjectRef, corev1.EventTypeWarning, "PreconditionBlock", "preconditions failed for payload loaded version=%q image=%q: %v", desired.Version, desired.Image, err) reporter.Report(SyncWorkerStatus{ @@ -655,7 +655,7 @@ func (w *SyncWorker) syncOnce(ctx context.Context, work *SyncWork, maxWorkers in w.payload = payloadUpdate w.eventRecorder.Eventf(cvoObjectRef, corev1.EventTypeNormal, "PayloadLoaded", "payload loaded version=%q image=%q", desired.Version, desired.Image) - klog.V(4).Infof("Payload loaded from %s with hash %s", desired.Image, payloadUpdate.ManifestHash) + klog.V(2).Infof("Payload loaded from %s with hash %s", desired.Image, payloadUpdate.ManifestHash) } return w.apply(ctx, w.payload, work, maxWorkers, reporter) @@ -739,14 +739,14 @@ func (w *SyncWorker) apply(ctx context.Context, payloadUpdate *payload.Update, w } ov, ok := getOverrideForManifest(work.Overrides, task.Manifest) if ok && ov.Unmanaged { - klog.V(4).Infof("Skipping precreation of %s as unmanaged", task) + klog.V(2).Infof("Skipping precreation of %s as unmanaged", task) continue } if err := w.builder.Apply(ctx, task.Manifest, payload.PrecreatingPayload); err != nil { klog.V(2).Infof("Unable to precreate resource %s: %v", task, err) continue } - klog.V(4).Infof("Precreated resource %s", task) + klog.V(2).Infof("Precreated resource %s", task) } } @@ -756,11 +756,11 @@ func (w *SyncWorker) apply(ctx context.Context, payloadUpdate *payload.Update, w } cr.Update() - klog.V(4).Infof("Running sync for %s", task) + klog.V(2).Infof("Running sync for %s", task) ov, ok := getOverrideForManifest(work.Overrides, task.Manifest) if ok && ov.Unmanaged { - klog.V(4).Infof("Skipping %s as unmanaged", task) + klog.V(2).Infof("Skipping %s as unmanaged", task) continue } @@ -768,7 +768,7 @@ func (w *SyncWorker) apply(ctx context.Context, payloadUpdate *payload.Update, w return err } cr.Inc() - klog.V(4).Infof("Done syncing for %s", task) + klog.V(2).Infof("Done syncing for %s", task) } return nil }) @@ -912,7 +912,7 @@ func summarizeTaskGraphErrors(errs []error) error { // server err := errors.FilterOut(errors.NewAggregate(errs), isContextError) if err == nil { - klog.V(4).Infof("All errors were context errors: %v", errs) + klog.V(2).Infof("All errors were context errors: %v", errs) return nil } agg, ok := err.(errors.Aggregate) @@ -923,7 +923,7 @@ func summarizeTaskGraphErrors(errs []error) error { } // log the errors to assist in debugging future summarization - if klog.V(4).Enabled() { + if klog.V(2).Enabled() { klog.Infof("Summarizing %d errors", len(errs)) for _, err := range errs { if uErr, ok := err.(*payload.UpdateError); ok { diff --git a/pkg/cvo/updatepayload.go b/pkg/cvo/updatepayload.go index f4adef48c..52c69925c 100644 --- a/pkg/cvo/updatepayload.go +++ b/pkg/cvo/updatepayload.go @@ -98,7 +98,7 @@ func (r *payloadRetriever) RetrievePayload(ctx context.Context, update configv1. if deadline, deadlineSet := ctx.Deadline(); deadlineSet { timeout = time.Until(deadline) / 2 } - klog.V(4).Infof("Forced update so reducing payload signature verification timeout to %s", timeout) + klog.V(2).Infof("Forced update so reducing payload signature verification timeout to %s", timeout) var cancel context.CancelFunc verifyCtx, cancel = context.WithTimeout(ctx, timeout) defer cancel() diff --git a/pkg/cvo/upgradeable.go b/pkg/cvo/upgradeable.go index 831721ed0..465a875a2 100644 --- a/pkg/cvo/upgradeable.go +++ b/pkg/cvo/upgradeable.go @@ -39,7 +39,7 @@ func (optr *Operator) syncUpgradeable() error { // updates are only checked at most once per minimumUpdateCheckInterval or if the generation changes u := optr.getUpgradeable() if u != nil && u.RecentlyChanged(optr.minimumUpdateCheckInterval) { - klog.V(4).Infof("Upgradeable conditions were recently checked, will try later.") + klog.V(2).Infof("Upgradeable conditions were recently checked, will try later.") return nil } optr.setUpgradeableConditions() @@ -245,7 +245,7 @@ func (check *clusterManifestDeleteInProgressUpgradeable) Check() *configv1.Clust } if deletes := resourcedelete.DeletesInProgress(); len(deletes) > 0 { resources := strings.Join(deletes, ",") - klog.V(4).Infof("Resource deletions in progress; resources=%s", resources) + klog.V(2).Infof("Resource deletions in progress; resources=%s", resources) cond.Reason = "ResourceDeletesInProgress" cond.Message = fmt.Sprintf("Cluster minor level upgrades are not allowed while resource deletions are in progress; resources=%s", resources) return cond @@ -399,7 +399,7 @@ func (optr *Operator) defaultUpgradeableChecks() []upgradeableCheck { func (optr *Operator) addFunc(obj interface{}) { cm := obj.(*corev1.ConfigMap) if cm.Name == internal.AdminGatesConfigMap || cm.Name == internal.AdminAcksConfigMap { - klog.V(4).Infof("ConfigMap %s/%s added.", cm.Namespace, cm.Name) + klog.V(2).Infof("ConfigMap %s/%s added.", cm.Namespace, cm.Name) optr.setUpgradeableConditions() } } @@ -409,7 +409,7 @@ func (optr *Operator) updateFunc(oldObj, newObj interface{}) { if cm.Name == internal.AdminGatesConfigMap || cm.Name == internal.AdminAcksConfigMap { oldCm := oldObj.(*corev1.ConfigMap) if !equality.Semantic.DeepEqual(cm, oldCm) { - klog.V(4).Infof("ConfigMap %s/%s updated.", cm.Namespace, cm.Name) + klog.V(2).Infof("ConfigMap %s/%s updated.", cm.Namespace, cm.Name) optr.setUpgradeableConditions() } } @@ -418,7 +418,7 @@ func (optr *Operator) updateFunc(oldObj, newObj interface{}) { func (optr *Operator) deleteFunc(obj interface{}) { cm := obj.(*corev1.ConfigMap) if cm.Name == internal.AdminGatesConfigMap || cm.Name == internal.AdminAcksConfigMap { - klog.V(4).Infof("ConfigMap %s/%s deleted.", cm.Namespace, cm.Name) + klog.V(2).Infof("ConfigMap %s/%s deleted.", cm.Namespace, cm.Name) optr.setUpgradeableConditions() } } diff --git a/pkg/payload/payload.go b/pkg/payload/payload.go index 49104c4b4..3df062b89 100644 --- a/pkg/payload/payload.go +++ b/pkg/payload/payload.go @@ -266,7 +266,7 @@ type payloadTasks struct { } func loadUpdatePayloadMetadata(dir, releaseImage, clusterProfile string) (*Update, []payloadTasks, error) { - klog.V(4).Infof("Loading updatepayload from %q", dir) + klog.V(2).Infof("Loading updatepayload from %q", dir) if err := ValidateDirectory(dir); err != nil { return nil, nil, err } diff --git a/pkg/payload/precondition/clusterversion/upgradeable.go b/pkg/payload/precondition/clusterversion/upgradeable.go index 1d4422e5a..9df5ebbe3 100644 --- a/pkg/payload/precondition/clusterversion/upgradeable.go +++ b/pkg/payload/precondition/clusterversion/upgradeable.go @@ -65,31 +65,31 @@ func (pf *Upgradeable) Run(ctx context.Context, releaseContext precondition.Rele // if we are upgradeable==true we can always upgrade up := resourcemerge.FindOperatorStatusCondition(cv.Status.Conditions, configv1.OperatorUpgradeable) if up == nil { - klog.V(4).Infof("Precondition %s passed: no Upgradeable condition on ClusterVersion.", pf.Name()) + klog.V(2).Infof("Precondition %s passed: no Upgradeable condition on ClusterVersion.", pf.Name()) return nil } if up.Status != configv1.ConditionFalse { - klog.V(4).Infof("Precondition %s passed: Upgradeable %s since %v: %s: %s", pf.Name(), up.Status, up.LastTransitionTime, up.Reason, up.Message) + klog.V(2).Infof("Precondition %s passed: Upgradeable %s since %v: %s: %s", pf.Name(), up.Status, up.LastTransitionTime, up.Reason, up.Message) return nil } // we can always allow the upgrade if there isn't a version already installed if len(cv.Status.History) == 0 { - klog.V(4).Infof("Precondition %s passed: no release history.", pf.Name()) + klog.V(2).Infof("Precondition %s passed: no release history.", pf.Name()) return nil } currentVersion := GetCurrentVersion(cv.Status.History) currentMinor := GetEffectiveMinor(currentVersion) desiredMinor := GetEffectiveMinor(releaseContext.DesiredVersion) - klog.V(5).Infof("currentMinor %s releaseContext.DesiredVersion %s desiredMinor %s", currentMinor, releaseContext.DesiredVersion, desiredMinor) + klog.V(2).Infof("currentMinor %s releaseContext.DesiredVersion %s desiredMinor %s", currentMinor, releaseContext.DesiredVersion, desiredMinor) // if there is no difference in the minor version (4.y.z where 4.y is the same for current and desired), then we can still upgrade // if no cluster overrides have been set if currentMinor == desiredMinor { - klog.V(4).Infof("Precondition %q passed: minor from the current %s matches minor from the target %s (both %s).", pf.Name(), currentVersion, releaseContext.DesiredVersion, currentMinor) + klog.V(2).Infof("Precondition %q passed: minor from the current %s matches minor from the target %s (both %s).", pf.Name(), currentVersion, releaseContext.DesiredVersion, currentMinor) if condition := ClusterVersionOverridesCondition(cv); condition != nil { - klog.V(4).Infof("Update from %s to %s blocked by %s: %s", currentVersion, releaseContext.DesiredVersion, condition.Reason, condition.Message) + klog.V(2).Infof("Update from %s to %s blocked by %s: %s", currentVersion, releaseContext.DesiredVersion, condition.Reason, condition.Message) return &precondition.Error{ Reason: condition.Reason, @@ -119,7 +119,7 @@ func (pf *Upgradeable) Name() string { return "ClusterVersionUpgradeable" } func GetCurrentVersion(history []configv1.UpdateHistory) string { for _, h := range history { if h.State == configv1.CompletedUpdate { - klog.V(5).Infof("Cluster current version=%s", h.Version) + klog.V(2).Infof("Cluster current version=%s", h.Version) return h.Version } } diff --git a/pkg/payload/task_graph.go b/pkg/payload/task_graph.go index d31c0847e..baa895a15 100644 --- a/pkg/payload/task_graph.go +++ b/pkg/payload/task_graph.go @@ -471,10 +471,10 @@ func RunGraph(ctx context.Context, graph *TaskGraph, maxParallelism int, fn func for { select { case <-ctx.Done(): - klog.V(4).Infof("Canceled worker %d while waiting for work", job) + klog.V(2).Infof("Canceled worker %d while waiting for work", job) return case runTask := <-workCh: - klog.V(4).Infof("Running %d on worker %d", runTask.index, job) + klog.V(2).Infof("Running %d on worker %d", runTask.index, job) err := fn(ctx, runTask.tasks) resultCh <- taskStatus{index: runTask.index, error: err} } @@ -520,7 +520,7 @@ func RunGraph(ctx context.Context, graph *TaskGraph, maxParallelism int, fn func cancelFn() wg.Wait() - klog.V(4).Infof("Workers finished") + klog.V(2).Infof("Workers finished") var errs []error var firstIncompleteNode *TaskNode @@ -543,7 +543,7 @@ func RunGraph(ctx context.Context, graph *TaskGraph, maxParallelism int, fn func } } - klog.V(4).Infof("Result of work: %v", errs) + klog.V(2).Infof("Result of work: %v", errs) if len(errs) > 0 { return errs }