Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pkg/clusterconditions/mock/mock.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ type Mock struct {
}

// Valid returns an error popped from ValidQueue.
func (m *Mock) Valid(ctx context.Context, condition *configv1.ClusterCondition) error {
func (m *Mock) Valid(_ context.Context, condition *configv1.ClusterCondition) error {
m.Calls = append(m.Calls, Call{
When: time.Now(),
Method: "Valid",
Expand All @@ -61,7 +61,7 @@ func (m *Mock) Valid(ctx context.Context, condition *configv1.ClusterCondition)
}

// Match returns an error popped from MatchQueue.
func (m *Mock) Match(ctx context.Context, condition *configv1.ClusterCondition) (bool, error) {
func (m *Mock) Match(_ context.Context, condition *configv1.ClusterCondition) (bool, error) {
m.Calls = append(m.Calls, Call{
When: time.Now(),
Method: "Match",
Expand Down
34 changes: 23 additions & 11 deletions pkg/cvo/availableupdates.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,20 +45,20 @@ func (optr *Operator) syncAvailableUpdates(ctx context.Context, config *configv1
}

// updates are only checked at most once per minimumUpdateCheckInterval or if the generation changes
u := optr.getAvailableUpdates()
if u == nil {
optrAvailableUpdates := optr.getAvailableUpdates()
if optrAvailableUpdates == nil {
klog.V(2).Info("First attempt to retrieve available updates")
} else if !u.RecentlyChanged(optr.minimumUpdateCheckInterval) {
klog.V(2).Infof("Retrieving available updates again, because more than %s has elapsed since %s", optr.minimumUpdateCheckInterval, u.LastAttempt.Format(time.RFC3339))
} else if channel != u.Channel {
klog.V(2).Infof("Retrieving available updates again, because the channel has changed from %q to %q", u.Channel, channel)
} else if desiredArch != u.Architecture {
klog.V(2).Infof("Retrieving available updates again, because the architecture has changed from %q to %q", u.Architecture, desiredArch)
} else if upstream == u.Upstream || (upstream == optr.defaultUpstreamServer && u.Upstream == "") {
klog.V(2).Infof("Available updates were recently retrieved, with less than %s elapsed since %s, will try later.", optr.minimumUpdateCheckInterval, u.LastAttempt.Format(time.RFC3339))
} else if !optrAvailableUpdates.RecentlyChanged(optr.minimumUpdateCheckInterval) {
klog.V(2).Infof("Retrieving available updates again, because more than %s has elapsed since %s", optr.minimumUpdateCheckInterval, optrAvailableUpdates.LastAttempt.Format(time.RFC3339))
} else if channel != optrAvailableUpdates.Channel {
klog.V(2).Infof("Retrieving available updates again, because the channel has changed from %q to %q", optrAvailableUpdates.Channel, channel)
} else if desiredArch != optrAvailableUpdates.Architecture {
klog.V(2).Infof("Retrieving available updates again, because the architecture has changed from %q to %q", optrAvailableUpdates.Architecture, desiredArch)
} else if upstream == optrAvailableUpdates.Upstream || (upstream == optr.defaultUpstreamServer && optrAvailableUpdates.Upstream == "") {
klog.V(2).Infof("Available updates were recently retrieved, with less than %s elapsed since %s, will try later.", optr.minimumUpdateCheckInterval, optrAvailableUpdates.LastAttempt.Format(time.RFC3339))
return nil
} else {
klog.V(2).Infof("Retrieving available updates again, because the upstream has changed from %q to %q", u.Upstream, config.Spec.Upstream)
klog.V(2).Infof("Retrieving available updates again, because the upstream has changed from %q to %q", optrAvailableUpdates.Upstream, config.Spec.Upstream)
}

transport, err := optr.getTransport()
Expand All @@ -71,6 +71,18 @@ func (optr *Operator) syncAvailableUpdates(ctx context.Context, config *configv1
current, updates, conditionalUpdates, condition := calculateAvailableUpdatesStatus(ctx, string(config.Spec.ClusterID),
transport, userAgent, upstream, desiredArch, currentArch, channel, optr.release.Version, optr.conditionRegistry)

// Populate conditions on conditional updates from operator state
if optrAvailableUpdates != nil {
for i := range optrAvailableUpdates.ConditionalUpdates {
for j := range conditionalUpdates {
if optrAvailableUpdates.ConditionalUpdates[i].Release.Image == conditionalUpdates[j].Release.Image {
conditionalUpdates[j].Conditions = optrAvailableUpdates.ConditionalUpdates[i].Conditions
break
}
}
}
}

if usedDefaultUpstream {
upstream = ""
}
Expand Down
226 changes: 226 additions & 0 deletions pkg/cvo/availableupdates_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
package cvo

import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"

"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/util/workqueue"

configv1 "github.com/openshift/api/config/v1"
"github.com/openshift/cluster-version-operator/pkg/clusterconditions"
"github.com/openshift/cluster-version-operator/pkg/clusterconditions/always"
"github.com/openshift/cluster-version-operator/pkg/clusterconditions/mock"
)

// notFoundProxyLister is a stub for ProxyLister
type notFoundProxyLister struct{}

func (n notFoundProxyLister) List(labels.Selector) ([]*configv1.Proxy, error) {
return nil, nil
}

func (n notFoundProxyLister) Get(name string) (*configv1.Proxy, error) {
return nil, errors.NewNotFound(schema.GroupResource{Group: configv1.GroupName, Resource: "proxy"}, name)
}

type notFoundConfigMapLister struct{}

func (n notFoundConfigMapLister) List(labels.Selector) ([]*corev1.ConfigMap, error) {
return nil, nil
}

func (n notFoundConfigMapLister) Get(name string) (*corev1.ConfigMap, error) {
return nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmap"}, name)
}

// osusWithSingleConditionalEdge helper returns:
// 1. mock osus server that serves a simple conditional path between two versions.
// 2. mock condition that always evaluates to match
// 3. expected []ConditionalUpdate data after evaluation of the data served by mock osus server
// (assuming the mock condition (2) was used)
// 4. current version of the cluster that would issue the request to the mock osus server
func osusWithSingleConditionalEdge() (*httptest.Server, clusterconditions.Condition, []configv1.ConditionalUpdate, string) {
from := "4.5.5"
to := "4.5.6"
osus := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: we could possibly simplify this test harness a bit if we pull this out into a new function, because the upstream defaulting, available-updates throttling, and proxy transport lookup don't seem as interesting to cover in tests. You could create a dummy client to pass through to calculateAvailableUpdatesStatus like:

type MockRoundTripper func(r *http.Request) *http.Response

func (f MockRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
    return f(r), nil
}

and:

httpClient := &http.Client{
  Transport: MockRoundTripper(func(r *http.Request) *http.Response {
    return &http.Response{
      StatusCode: http.StatusOK,
      Body:       io.NopCloser(strings.NewReader(fmt.Sprintf(`{
  "nodes": [{"version": "%s", "payload": "payload/%s"}, {"version": "%s", "payload": "payload/%s"}],
  "conditionalEdges": [
    {
      "edges": [{"from": "%s", "to": "%s"}],
      "risks": [
        {
          "url": "https://example.com/%s",
          "name": "FourFiveSix",
          "message": "Four Five Five is just fine",
          "matchingRules": [{"type": "%s", "promql": { "promql": "%s"}}]
        }
      ]
    }
  ]
}
`, from, from, to, to, from, to, to, ruleType, promql
      ))),
    },
  },
}

or some such without needing to set up an HTTP server that needs to be closed later.

I'm not sure if it's worth the effort to pivot, now that you've already figured out all the plumbing to get this up to the existing level, but it might be worth poking at to see if the pivoting-this-pull cost seems like it might be worth reducing the onboarding-the-next-dev-who-needs-to-understand-this-test-suite cost.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have considered doing something like that, but the tested functionality felt like deserving a higher-level integration test so I thought test HTTP servers are cheap in Go with httptest. The real complexity is in preparing all the data and expected fixtures.

But I'll reconsider, with further development and feedback the code now is different than when started. Thanks for the suggestion!

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I gave the pivot a quick shot and I think we're not really saving that much complexity, the annoying bits with "set up the input/output" would need to stay, we'd just need different plumbing.

But I actually discovered that I like using httptest here. It allows testing the full method and it quite explicitly communicates that the important input comes from a server. The plumbing needed is basically identical to mocking it in the client transport. I liked that I could get rid of the annoying queue stub though :)

fmt.Fprintf(w, `{
"nodes": [{"version": "%s", "payload": "payload/%s"}, {"version": "%s", "payload": "payload/%s"}],
"conditionalEdges": [
{
"edges": [{"from": "%s", "to": "%s"}],
"risks": [
{
"url": "https://example.com/%s",
"name": "FourFiveSix",
"message": "Four Five Five is just fine",
"matchingRules": [{"type": "PromQL", "promql": { "promql": "this is a query"}}]
}
]
}
]
}
`, from, from, to, to, from, to, to)
}))

updates := []configv1.ConditionalUpdate{
{
Release: configv1.Release{Version: to, Image: "payload/" + to},
Risks: []configv1.ConditionalUpdateRisk{
{
URL: "https://example.com/" + to,
Name: "FourFiveSix",
Message: "Four Five Five is just fine",
MatchingRules: []configv1.ClusterCondition{
{
Type: "PromQL",
PromQL: &configv1.PromQLClusterCondition{PromQL: "this is a query"},
},
},
},
},
Conditions: []metav1.Condition{
{
Type: "Recommended",
Status: metav1.ConditionFalse,
Reason: "FourFiveSix",
Message: "Four Five Five is just fine https://example.com/" + to,
},
},
},
}
mockPromql := &mock.Mock{
ValidQueue: []error{nil},
MatchQueue: []mock.MatchResult{{Match: true, Error: nil}},
}

return osus, mockPromql, updates, from
}

func newOperator(url, version string, promqlMock clusterconditions.Condition) (*availableUpdates, *Operator) {
currentRelease := configv1.Release{Version: version, Image: "payload/" + version}
registry := clusterconditions.NewConditionRegistry()
registry.Register("Always", &always.Always{})
registry.Register("PromQL", promqlMock)
operator := &Operator{
defaultUpstreamServer: url,
architecture: "amd64",
proxyLister: notFoundProxyLister{},
cmConfigManagedLister: notFoundConfigMapLister{},
conditionRegistry: registry,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
release: currentRelease,
}
availableUpdates := &availableUpdates{
Architecture: "amd64",
Current: configv1.Release{Version: version, Image: "payload/" + version},
}
return availableUpdates, operator
}

var cvFixture = &configv1.ClusterVersion{
Spec: configv1.ClusterVersionSpec{
ClusterID: "897f0a22-33ca-4106-a2c4-29b75250255a",
Channel: "channel",
},
}

var availableUpdatesCmpOpts = []cmp.Option{
cmpopts.IgnoreTypes(time.Time{}),
cmpopts.IgnoreInterfaces(struct {
clusterconditions.ConditionRegistry
}{}),
}

func TestSyncAvailableUpdates(t *testing.T) {
fakeOsus, mockPromql, expectedConditionalUpdates, version := osusWithSingleConditionalEdge()
defer fakeOsus.Close()
expectedAvailableUpdates, optr := newOperator(fakeOsus.URL, version, mockPromql)
expectedAvailableUpdates.ConditionalUpdates = expectedConditionalUpdates
expectedAvailableUpdates.Channel = cvFixture.Spec.Channel
expectedAvailableUpdates.Condition = configv1.ClusterOperatorStatusCondition{
Type: configv1.RetrievedUpdates,
Status: configv1.ConditionTrue,
}

err := optr.syncAvailableUpdates(context.Background(), cvFixture)

if err != nil {
t.Fatalf("syncAvailableUpdates() unexpected error: %v", err)
}
if diff := cmp.Diff(expectedAvailableUpdates, optr.availableUpdates, availableUpdatesCmpOpts...); diff != "" {
t.Fatalf("available updates differ from expected:\n%s", diff)
}
}

func TestSyncAvailableUpdates_ConditionalUpdateRecommendedConditions(t *testing.T) {
testCases := []struct {
name string
modifyOriginalState func(condition *metav1.Condition)
expectTimeChange bool
}{
{
name: "lastTransitionTime is not updated when nothing changes",
modifyOriginalState: func(condition *metav1.Condition) {},
},
{
name: "lastTransitionTime is not updated when changed but status is identical",
modifyOriginalState: func(condition *metav1.Condition) {
condition.Reason = "OldReason"
condition.Message = "This message should be changed to something else"
},
},
{
name: "lastTransitionTime is updated when status changes",
modifyOriginalState: func(condition *metav1.Condition) {
condition.Status = metav1.ConditionUnknown
},
expectTimeChange: true,
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fakeOsus, mockPromql, conditionalUpdates, version := osusWithSingleConditionalEdge()
defer fakeOsus.Close()
availableUpdates, optr := newOperator(fakeOsus.URL, version, mockPromql)
optr.availableUpdates = availableUpdates
optr.availableUpdates.ConditionalUpdates = conditionalUpdates
expectedConditions := []metav1.Condition{{}}
conditionalUpdates[0].Conditions[0].DeepCopyInto(&expectedConditions[0])
tc.modifyOriginalState(&optr.availableUpdates.ConditionalUpdates[0].Conditions[0])

err := optr.syncAvailableUpdates(context.Background(), cvFixture)

if err != nil {
t.Fatalf("syncAvailableUpdates() unexpected error: %v", err)
}
if optr.availableUpdates == nil || len(optr.availableUpdates.ConditionalUpdates) == 0 {
t.Fatalf("syncAvailableUpdates() did not properly set available updates")
}
if diff := cmp.Diff(expectedConditions, optr.availableUpdates.ConditionalUpdates[0].Conditions, cmpopts.IgnoreTypes(time.Time{})); diff != "" {
t.Errorf("conditions on conditional updates differ from expected:\n%s", diff)
}
timeBefore := expectedConditions[0].LastTransitionTime
timeAfter := optr.availableUpdates.ConditionalUpdates[0].Conditions[0].LastTransitionTime

if tc.expectTimeChange && timeBefore == timeAfter {
t.Errorf("lastTransitionTime was not updated as expected: before=%s after=%s", timeBefore, timeAfter)
}
if !tc.expectTimeChange && timeBefore != timeAfter {
t.Errorf("lastTransitionTime was updated but was not expected to: before=%s after=%s", timeBefore, timeAfter)
}
})
}
}