Skip to content
Merged
4 changes: 3 additions & 1 deletion pkg/apis/upgrade/v1alpha1/upgradeconfig_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ type UpgradeConfigSpec struct {
UpgradeAt string `json:"upgradeAt"`

// +kubebuilder:validation:Minimum:=0
// The maximum grace period granted to a node whose drain is blocked by a Pod Disruption Budget, before that drain is forced. Measured in minutes. The minimum accepted value is 0 and in this case it will trigger force drain after the expectedNodeDrainTime lapsed.
// The maximum grace period granted to a node whose drain is blocked by a Pod Disruption Budget, before that drain is forced. Measured in minutes. The minimum accepted value is 0 and in this case it will trigger force drain after the expectedNodeDrainTime lapsed.
PDBForceDrainTimeout int32 `json:"PDBForceDrainTimeout"`

// +kubebuilder:validation:Enum={"OSD","ARO"}
Expand Down Expand Up @@ -127,6 +127,8 @@ const (
PostClusterHealthCheck UpgradeConditionType = "ClusterHealthyAfterUpgrade"
// SendCompletedNotification is an UpgradeConditionType
SendCompletedNotification UpgradeConditionType = "CompletedNotificationSent"
// IsClusterUpgradable is an UpgradeConditionType
IsClusterUpgradable UpgradeConditionType = "IsClusterUpgradable"
)

// UpgradePhase is a Go string type.
Expand Down
2 changes: 2 additions & 0 deletions pkg/eventmanager/eventmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,8 @@ func createFailureDescription(uc *v1alpha1.UpgradeConfig) string {
}

switch failedCondition.Type {
case v1alpha1.IsClusterUpgradable:
description = failedCondition.Message
case v1alpha1.UpgradePreHealthCheck:
description = fmt.Sprintf(UPGRADE_PREHEALTHCHECK_FAILED_DESC, uc.Spec.Desired.Version)
case v1alpha1.ExtDepAvailabilityCheck:
Expand Down
1 change: 1 addition & 0 deletions pkg/upgraders/osdupgrader.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ func NewOSDUpgrader(c client.Client, cfm configmanager.ConfigManager, mc metrics
steps := []upgradesteps.UpgradeStep{
upgradesteps.Action(string(upgradev1alpha1.SendStartedNotification), ou.SendStartedNotification),
upgradesteps.Action(string(upgradev1alpha1.SendStartedNotification), ou.UpgradeDelayedCheck),
upgradesteps.Action(string(upgradev1alpha1.IsClusterUpgradable), ou.IsUpgradeable),
upgradesteps.Action(string(upgradev1alpha1.UpgradePreHealthCheck), ou.PreUpgradeHealthCheck),
upgradesteps.Action(string(upgradev1alpha1.ExtDepAvailabilityCheck), ou.ExternalDependencyAvailabilityCheck),
upgradesteps.Action(string(upgradev1alpha1.UpgradeScaleUpExtraNodes), ou.EnsureExtraUpgradeWorkers),
Expand Down
51 changes: 51 additions & 0 deletions pkg/upgraders/upgradeable.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
package upgraders

import (
"context"
"fmt"

"github.com/blang/semver"
"github.com/go-logr/logr"
configv1 "github.com/openshift/api/config/v1"
upgradev1alpha1 "github.com/openshift/managed-upgrade-operator/pkg/apis/upgrade/v1alpha1"
cv "github.com/openshift/managed-upgrade-operator/pkg/clusterversion"
)

func (c *clusterUpgrader) IsUpgradeable(ctx context.Context, logger logr.Logger) (bool, error) {
upgradeCommenced, err := c.cvClient.HasUpgradeCommenced(c.upgradeConfig)
if err != nil {
return false, err
}
if upgradeCommenced {
logger.Info(fmt.Sprintf("Skipping upgrade step %s", upgradev1alpha1.IsClusterUpgradable))
return true, nil
}

clusterVersion, err := c.cvClient.GetClusterVersion()
if err != nil {
return false, err
}
currentVersion, err := cv.GetCurrentVersion(clusterVersion)
if err != nil {
return false, err
}
parsedCurrentVersion, err := semver.Parse(currentVersion)
if err != nil {
return false, err
}

desiredVersion := c.upgradeConfig.Spec.Desired.Version
parsedDesiredVersion, err := semver.Parse(desiredVersion)
if err != nil {
return false, err
}

// if the upgradeable is false then we need to check the current version with upgrade version for y-stream update
for _, condition := range clusterVersion.Status.Conditions {
if condition.Type == configv1.OperatorUpgradeable && condition.Status == configv1.ConditionFalse && parsedDesiredVersion.Major >= parsedCurrentVersion.Major && parsedDesiredVersion.Minor > parsedCurrentVersion.Minor {
return false, fmt.Errorf("Cluster upgrade to version %s is canceled with the reason of %s containing message that %s Automated upgrades will be retried on their next scheduling cycle. If you have manually scheduled an upgrade instead, it must be rescheduled.", desiredVersion, condition.Reason, condition.Message)
}
}

return true, nil
}
168 changes: 168 additions & 0 deletions pkg/upgraders/upgradeable_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
package upgraders

import (
"context"
"time"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

"github.com/go-logr/logr"
"github.com/golang/mock/gomock"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
logf "sigs.k8s.io/controller-runtime/pkg/log"

configv1 "github.com/openshift/api/config/v1"
upgradev1alpha1 "github.com/openshift/managed-upgrade-operator/pkg/apis/upgrade/v1alpha1"
cvMocks "github.com/openshift/managed-upgrade-operator/pkg/clusterversion/mocks"
mockDrain "github.com/openshift/managed-upgrade-operator/pkg/drain/mocks"
emMocks "github.com/openshift/managed-upgrade-operator/pkg/eventmanager/mocks"
mockMachinery "github.com/openshift/managed-upgrade-operator/pkg/machinery/mocks"
mockMaintenance "github.com/openshift/managed-upgrade-operator/pkg/maintenance/mocks"
mockMetrics "github.com/openshift/managed-upgrade-operator/pkg/metrics/mocks"
mockScaler "github.com/openshift/managed-upgrade-operator/pkg/scaler/mocks"
"github.com/openshift/managed-upgrade-operator/util/mocks"
testStructs "github.com/openshift/managed-upgrade-operator/util/mocks/structs"
)

var _ = Describe("UpgradableCheckStep", func() {
var (
logger logr.Logger

// mocks
mockKubeClient *mocks.MockClient
mockCtrl *gomock.Controller
mockMaintClient *mockMaintenance.MockMaintenance
mockScalerClient *mockScaler.MockScaler
mockMachineryClient *mockMachinery.MockMachinery
mockMetricsClient *mockMetrics.MockMetrics
mockCVClient *cvMocks.MockClusterVersion
mockDrainStrategyBuilder *mockDrain.MockNodeDrainStrategyBuilder
mockEMClient *emMocks.MockEventManager

// upgradeconfig to be used during tests
upgradeConfigName types.NamespacedName
upgradeConfig *upgradev1alpha1.UpgradeConfig

// upgrader to be used during tests
config *upgraderConfig
upgrader *osdUpgrader

currentClusterVersion *configv1.ClusterVersion
)

BeforeEach(func() {
upgradeConfigName = types.NamespacedName{
Name: "test-upgradeconfig",
Namespace: "test-namespace",
}
upgradeConfig = testStructs.NewUpgradeConfigBuilder().WithNamespacedName(upgradeConfigName).GetUpgradeConfig()
mockCtrl = gomock.NewController(GinkgoT())
mockKubeClient = mocks.NewMockClient(mockCtrl)
mockMaintClient = mockMaintenance.NewMockMaintenance(mockCtrl)
mockMetricsClient = mockMetrics.NewMockMetrics(mockCtrl)
mockScalerClient = mockScaler.NewMockScaler(mockCtrl)
mockMachineryClient = mockMachinery.NewMockMachinery(mockCtrl)
mockCVClient = cvMocks.NewMockClusterVersion(mockCtrl)
mockDrainStrategyBuilder = mockDrain.NewMockNodeDrainStrategyBuilder(mockCtrl)
mockEMClient = emMocks.NewMockEventManager(mockCtrl)
logger = logf.Log.WithName("cluster upgrader test logger")
config = buildTestUpgraderConfig(90, 30, 8, 120, 30)
upgrader = &osdUpgrader{
clusterUpgrader: &clusterUpgrader{
client: mockKubeClient,
metrics: mockMetricsClient,
cvClient: mockCVClient,
notifier: mockEMClient,
config: config,
scaler: mockScalerClient,
drainstrategyBuilder: mockDrainStrategyBuilder,
maintenance: mockMaintClient,
machinery: mockMachineryClient,
upgradeConfig: upgradeConfig,
},
}
currentClusterVersion = &configv1.ClusterVersion{
Status: configv1.ClusterVersionStatus{
Conditions: []configv1.ClusterOperatorStatusCondition{
{
Type: configv1.OperatorUpgradeable,
Status: configv1.ConditionFalse,
Reason: "IsClusterUpgradable not done",
Message: "Kubernetes 1.22 and therefore OpenShift 4.9 remove several APIs which require admin consideration. Please see the knowledge article https://access.redhat.com/articles/6329921 for details and instructions.",
},
},
History: []configv1.UpdateHistory{
{
State: "fakeState",
StartedTime: v1.Time{
Time: time.Now().UTC(),
},
CompletionTime: &v1.Time{
Time: time.Now().UTC(),
},
Version: "fakeVersion",
Verified: false,
},
},
},
}
})

AfterEach(func() {
mockCtrl.Finish()
})

Context("When running the IsUpgradable check", func() {
Context("When current 'y' stream version is lower then desired version", func() {
BeforeEach(func() {
upgradeConfig.Spec.Desired.Version = "1.2.3"
currentClusterVersion.Status.History = []configv1.UpdateHistory{{State: configv1.CompletedUpdate, Version: "1.1.3"}}
})
It("will not perform upgrade", func() {
gomock.InOrder(
mockCVClient.EXPECT().HasUpgradeCommenced(gomock.Any()).Return(false, nil),
mockCVClient.EXPECT().GetClusterVersion().Return(currentClusterVersion, nil),
)
result, err := upgrader.IsUpgradeable(context.TODO(), logger)
Expect(err).To(HaveOccurred())
Expect(result).To(BeFalse())
})
})

Context("When Upgradeable condition exists and is set to true", func() {
BeforeEach(func() {
upgradeConfig.Spec.Desired.Version = "1.2.3"
currentClusterVersion.Status.History = []configv1.UpdateHistory{{State: configv1.CompletedUpdate, Version: "1.1.3"}}
currentClusterVersion.Status.Conditions = []configv1.ClusterOperatorStatusCondition{{Type: configv1.OperatorUpgradeable, Status: configv1.ConditionTrue}}
})
It("will perform upgrade", func() {
gomock.InOrder(
mockCVClient.EXPECT().HasUpgradeCommenced(gomock.Any()).Return(false, nil),
mockCVClient.EXPECT().GetClusterVersion().Return(currentClusterVersion, nil),
)
result, err := upgrader.IsUpgradeable(context.TODO(), logger)
Expect(err).ToNot(HaveOccurred())
Expect(result).To(BeTrue())
})
})

Context("When the clusterversion does not have Upgradeable condition", func() {
BeforeEach(func() {
upgradeConfig.Spec.Desired.Version = "1.2.3"
currentClusterVersion.Status.History = []configv1.UpdateHistory{{State: configv1.CompletedUpdate, Version: "1.1.3"}}
currentClusterVersion.Status.Conditions = []configv1.ClusterOperatorStatusCondition{{Type: configv1.OperatorDegraded}}
})
It("will perform upgrade", func() {
gomock.InOrder(
mockCVClient.EXPECT().HasUpgradeCommenced(gomock.Any()).Return(false, nil),
mockCVClient.EXPECT().GetClusterVersion().Return(currentClusterVersion, nil),
)
result, err := upgrader.IsUpgradeable(context.TODO(), logger)
Expect(err).ToNot(HaveOccurred())
Expect(result).To(BeTrue())
})
})
})
})