Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
632 changes: 0 additions & 632 deletions .tekton/assisted-service-rhel8-acm-ds-2-15-pull-request.yaml

This file was deleted.

629 changes: 0 additions & 629 deletions .tekton/assisted-service-rhel8-acm-ds-2-15-push.yaml

This file was deleted.

632 changes: 0 additions & 632 deletions .tekton/assisted-service-rhel8-acm-ds-main-pull-request.yaml

This file was deleted.

629 changes: 0 additions & 629 deletions .tekton/assisted-service-rhel8-acm-ds-main-push.yaml

This file was deleted.

632 changes: 0 additions & 632 deletions .tekton/assisted-service-rhel9-acm-ds-2-15-pull-request.yaml

This file was deleted.

629 changes: 0 additions & 629 deletions .tekton/assisted-service-rhel9-acm-ds-2-15-push.yaml

This file was deleted.

568 changes: 0 additions & 568 deletions .tekton/assisted-service-saas-main-pull-request.yaml

This file was deleted.

565 changes: 0 additions & 565 deletions .tekton/assisted-service-saas-main-push.yaml

This file was deleted.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions internal/bminventory/inventory.go
Original file line number Diff line number Diff line change
Expand Up @@ -1324,7 +1324,7 @@ func (b *bareMetalInventory) InstallClusterInternal(ctx context.Context, params
}

// Refresh schedulable masters again after all roles are assigned
if internalErr := b.clusterApi.RefreshSchedulableMastersForcedTrue(ctx, *cluster.ID); internalErr != nil {
if internalErr := b.clusterApi.RefreshSchedulableMastersForcedTrue(ctx, cluster); internalErr != nil {
log.WithError(internalErr).Errorf("Failed to refresh SchedulableMastersForcedTrue while installing cluster <%s>", cluster.ID)
return internalErr
}
Expand Down Expand Up @@ -3488,7 +3488,7 @@ func (b *bareMetalInventory) V2DeregisterHostInternal(ctx context.Context, param
log.WithError(err).Warnf("Failed to refresh cluster after de-registerating host <%s>", params.HostID)
}
}
if err := b.clusterApi.RefreshSchedulableMastersForcedTrue(ctx, *h.ClusterID); err != nil {
if err := b.clusterApi.RefreshSchedulableMastersForcedTrueWithClusterID(ctx, *h.ClusterID); err != nil {
log.WithError(err).Errorf("Failed to refresh SchedulableMastersForcedTrue while de-registering host <%s> to cluster <%s>", h.ID, h.ClusterID)
return err
}
Expand Down Expand Up @@ -5678,7 +5678,7 @@ func (b *bareMetalInventory) V2RegisterHost(ctx context.Context, params installe
}

if host.ClusterID != nil {
if err = b.clusterApi.RefreshSchedulableMastersForcedTrue(ctx, *host.ClusterID); err != nil {
if err = b.clusterApi.RefreshSchedulableMastersForcedTrueWithClusterID(ctx, *host.ClusterID); err != nil {
log.WithError(err).Errorf("Failed to refresh SchedulableMastersForcedTrue while registering host <%s> to cluster <%s>", host.ID, host.ClusterID)
return installer.NewV2RegisterHostInternalServerError().
WithPayload(common.GenerateError(http.StatusInternalServerError, err))
Expand Down Expand Up @@ -5936,7 +5936,7 @@ func (b *bareMetalInventory) BindHostInternal(ctx context.Context, params instal
return nil, common.NewApiError(http.StatusInternalServerError, err)
}

if err = b.clusterApi.RefreshSchedulableMastersForcedTrue(ctx, *cluster.ID); err != nil {
if err = b.clusterApi.RefreshSchedulableMastersForcedTrue(ctx, cluster); err != nil {
log.WithError(err).Errorf("Failed to refresh SchedulableMastersForcedTrue while binding host <%s> to cluster <%s>", host.ID, host.ClusterID)
return nil, common.NewApiError(http.StatusInternalServerError, err)
}
Expand Down Expand Up @@ -5982,7 +5982,7 @@ func (b *bareMetalInventory) UnbindHostInternal(ctx context.Context, params inst
}
}

if err = b.clusterApi.RefreshSchedulableMastersForcedTrue(ctx, *host.ClusterID); err != nil {
if err = b.clusterApi.RefreshSchedulableMastersForcedTrueWithClusterID(ctx, *host.ClusterID); err != nil {
log.WithError(err).Errorf("Failed to refresh SchedulableMastersForcedTrue while unbinding host <%s> to cluster <%s>", host.ID, host.ClusterID)
return nil, common.NewApiError(http.StatusInternalServerError, err)
}
Expand Down
10 changes: 5 additions & 5 deletions internal/bminventory/inventory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,7 @@ var _ = Describe("RegisterHost", func() {
infraEnv := createInfraEnv(db, *cluster.ID, *cluster.ID)

mockClusterApi.EXPECT().AcceptRegistration(gomock.Any()).Return(nil).Times(1)
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrue(gomock.Any(), gomock.Any()).Return(nil).Times(1)
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrueWithClusterID(gomock.Any(), gomock.Any()).Return(nil).Times(1)
mockHostApi.EXPECT().RegisterHost(gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(ctx context.Context, h *models.Host, db *gorm.DB) error {
// validate that host is registered with auto-assign role
Expand Down Expand Up @@ -662,7 +662,7 @@ var _ = Describe("RegisterHost", func() {
infraEnv := createInfraEnv(db, *cluster.ID, *cluster.ID)

mockClusterApi.EXPECT().AcceptRegistration(gomock.Any()).Return(nil).Times(1)
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrue(gomock.Any(), gomock.Any()).Return(nil).Times(1)
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrueWithClusterID(gomock.Any(), gomock.Any()).Return(nil).Times(1)
mockHostApi.EXPECT().RegisterHost(gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(ctx context.Context, h *models.Host, db *gorm.DB) error {
// validate that host is registered with auto-assign role
Expand Down Expand Up @@ -781,7 +781,7 @@ var _ = Describe("RegisterHost", func() {
}).Times(1)
mockHostApi.EXPECT().GetStagesByRole(gomock.Any(), gomock.Any()).Return(nil).Times(1)
mockCRDUtils.EXPECT().CreateAgentCR(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1)
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrue(gomock.Any(), gomock.Any()).Return(nil).Times(1)
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrueWithClusterID(gomock.Any(), gomock.Any()).Return(nil).Times(1)

By("trying to register a host bound to day2 cluster")
reply := bm.V2RegisterHost(ctx, installer.V2RegisterHostParams{
Expand Down Expand Up @@ -18227,7 +18227,7 @@ var _ = Describe("V2DeregisterHost", func() {
eventstest.WithInfraEnvIdMatcher(infraEnvID.String()),
eventstest.WithSeverityMatcher(models.EventSeverityInfo)))
mockClusterApi.EXPECT().RefreshStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("Bad Refresh Status"))
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrue(gomock.Any(), gomock.Any()).Return(nil).Times(1)
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrueWithClusterID(gomock.Any(), gomock.Any()).Return(nil).Times(1)
response := bm.V2DeregisterHost(ctx, params)
Expect(response).To(BeAssignableToTypeOf(&installer.V2DeregisterHostNoContent{}))
})
Expand Down Expand Up @@ -18272,7 +18272,7 @@ var _ = Describe("UnbindHost", func() {
eventstest.WithInfraEnvIdMatcher(infraEnvID.String()),
eventstest.WithSeverityMatcher(models.EventSeverityInfo)))
mockHostApi.EXPECT().UnbindHost(ctx, gomock.Any(), gomock.Any(), false)
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrue(gomock.Any(), gomock.Any()).Return(nil).Times(1)
mockClusterApi.EXPECT().RefreshSchedulableMastersForcedTrueWithClusterID(gomock.Any(), gomock.Any()).Return(nil).Times(1)
response := bm.UnbindHost(ctx, params)
Expect(response).To(BeAssignableToTypeOf(&installer.UnbindHostOK{}))
})
Expand Down
27 changes: 17 additions & 10 deletions internal/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,8 @@ type API interface {
PermanentClustersDeletion(ctx context.Context, olderThan strfmt.DateTime, objectHandler s3wrapper.API) error
DeregisterInactiveCluster(ctx context.Context, maxDeregisterPerInterval int, inactiveSince strfmt.DateTime) error
TransformClusterToDay2(ctx context.Context, cluster *common.Cluster, db *gorm.DB) error
RefreshSchedulableMastersForcedTrue(ctx context.Context, clusterID strfmt.UUID) error
RefreshSchedulableMastersForcedTrue(ctx context.Context, cluster *common.Cluster) error
RefreshSchedulableMastersForcedTrueWithClusterID(ctx context.Context, clusterID strfmt.UUID) error
HandleVerifyVipsResponse(ctx context.Context, clusterID strfmt.UUID, stepReply string) error
UpdateFinalizingStage(ctx context.Context, clusterID strfmt.UUID, finalizingStage models.FinalizingStage) error
}
Expand Down Expand Up @@ -656,12 +657,12 @@ func (m *Manager) ClusterMonitoring() {
m.triggerLeaseTimeoutEvent(ctx, cluster)
}

if err := m.RefreshSchedulableMastersForcedTrue(ctx, *cluster.ID); err != nil {
if err := m.RefreshSchedulableMastersForcedTrue(ctx, cluster); err != nil {
log.WithError(err).Errorf("failed to refresh cluster with ID '%s' masters schedulability", string(*cluster.ID))
}
duration := float64(time.Since(startTime).Milliseconds())

m.metricAPI.MonitoredClustersDurationMs(duration)
m.metricAPI.MonitoredClustersDurationMs(ctx, *cluster.ID, duration)
}
}
offset += limit
Expand Down Expand Up @@ -1734,8 +1735,19 @@ func (m *Manager) TransformClusterToDay2(ctx context.Context, cluster *common.Cl
return nil
}

func (m *Manager) RefreshSchedulableMastersForcedTrue(ctx context.Context, clusterID strfmt.UUID) error {
func (m *Manager) RefreshSchedulableMastersForcedTrue(ctx context.Context, cluster *common.Cluster) error {
// Refresh the value of SchedulableMastersForcedTrue which depends on the number of hosts registered with the cluster
var err error

newSchedulableMastersForcedTrue := common.ShouldMastersBeSchedulable(&cluster.Cluster)
if cluster.SchedulableMastersForcedTrue == nil || newSchedulableMastersForcedTrue != *cluster.SchedulableMastersForcedTrue {
err = m.updateSchedulableMastersForcedTrue(ctx, *cluster.ID, newSchedulableMastersForcedTrue)
}

return err
}

func (m *Manager) RefreshSchedulableMastersForcedTrueWithClusterID(ctx context.Context, clusterID strfmt.UUID) error {
log := logutil.FromContext(ctx, m.log)
var cluster *common.Cluster
var err error
Expand All @@ -1745,12 +1757,7 @@ func (m *Manager) RefreshSchedulableMastersForcedTrue(ctx context.Context, clust
return err
}

newSchedulableMastersForcedTrue := common.ShouldMastersBeSchedulable(&cluster.Cluster)
if cluster.SchedulableMastersForcedTrue == nil || newSchedulableMastersForcedTrue != *cluster.SchedulableMastersForcedTrue {
err = m.updateSchedulableMastersForcedTrue(ctx, clusterID, newSchedulableMastersForcedTrue)
}

return err
return m.RefreshSchedulableMastersForcedTrue(ctx, cluster)
}

func (m *Manager) updateSchedulableMastersForcedTrue(ctx context.Context, clusterID strfmt.UUID, newSchedulableMastersForcedTrue bool) error {
Expand Down
28 changes: 14 additions & 14 deletions internal/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ var _ = Describe("TestClusterMonitoring", func() {
It("finalizing -> installed (kubeconfig exist, operator status available)", func() {
shouldHaveUpdated = true
expectedState = models.ClusterStatusInstalled
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any()).Times(0)
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockS3Client.EXPECT().DoesObjectExist(gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes()
mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), expectedState, models.ClusterStatusFinalizing, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
Expect(db.Model(c.MonitoredOperators[0]).Updates(map[string]interface{}{"status": models.OperatorStatusAvailable}).Error).To(Not(HaveOccurred()))
Expand All @@ -435,7 +435,7 @@ var _ = Describe("TestClusterMonitoring", func() {
createWorkerHost(id, models.HostStatusInstallingInProgress, db)
shouldHaveUpdated = false
expectedState = models.ClusterStatusFinalizing
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any()).Times(0)
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockS3Client.EXPECT().DoesObjectExist(gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes()
mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), expectedState, models.ClusterStatusFinalizing, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
Expect(db.Model(c.MonitoredOperators[0]).Updates(map[string]interface{}{"status": models.OperatorStatusAvailable}).Error).To(Not(HaveOccurred()))
Expand All @@ -446,7 +446,7 @@ var _ = Describe("TestClusterMonitoring", func() {
createWorkerHost(id, models.HostStatusError, db)
shouldHaveUpdated = true
expectedState = models.ClusterStatusInstalled
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any()).Times(0)
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockS3Client.EXPECT().DoesObjectExist(gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes()
mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), expectedState, models.ClusterStatusFinalizing, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
Expect(db.Model(c.MonitoredOperators[0]).Updates(map[string]interface{}{"status": models.OperatorStatusAvailable}).Error).To(Not(HaveOccurred()))
Expand All @@ -456,7 +456,7 @@ var _ = Describe("TestClusterMonitoring", func() {
Context("from installed state", func() {
BeforeEach(func() {
c = createCluster(&id, models.ClusterStatusInstalled, statusInfoInstalled)
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any()).AnyTimes()
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
})

Expand Down Expand Up @@ -640,7 +640,7 @@ var _ = Describe("TestClusterMonitoring", func() {
mockEvents.EXPECT().SendClusterEvent(gomock.Any(), eventstest.NewEventMatcher(
eventstest.WithClusterIdMatcher(c.ID.String()))).AnyTimes()
mockHostAPIIsRequireUserActionResetFalse()
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any()).AnyTimes()
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
clusterApi.ClusterMonitoring()
after := time.Now().Truncate(10 * time.Millisecond)
c = getClusterFromDB(id, db)
Expand Down Expand Up @@ -697,7 +697,7 @@ var _ = Describe("TestClusterMonitoring", func() {

}

mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any()).Times(nClusters)
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).Times(nClusters)
clusterApi.ClusterMonitoring()

var count int64
Expand Down Expand Up @@ -734,7 +734,7 @@ var _ = Describe("TestClusterMonitoring", func() {

Expect(db.Create(&c).Error).ShouldNot(HaveOccurred())
Expect(err).ShouldNot(HaveOccurred())
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any()).AnyTimes()
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
mockEvents.EXPECT().SendClusterEvent(gomock.Any(), eventstest.NewEventMatcher(
eventstest.WithNameMatcher(eventgen.ClusterStatusUpdatedEventName))).Times(0)
mockHostAPI.EXPECT().IsRequireUserActionReset(gomock.Any()).Return(false).Times(0)
Expand Down Expand Up @@ -785,7 +785,7 @@ var _ = Describe("lease timeout event", func() {
clusterApi = NewManager(getDefaultConfig(), common.GetTestLog().WithField("pkg", "cluster-monitor"), db, commontesting.GetDummyNotificationStream(ctrl),
mockEvents, mockEventsUploader, mockHostAPI, mockMetric, nil, dummy, mockOperators, nil, nil, nil, nil, nil, false, nil)

mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any()).Times(1)
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
mockMetric.EXPECT().Duration("ClusterMonitoring", gomock.Any()).AnyTimes()
mockNoChangeInOperatorDependencies(mockOperators)
mockOperators.EXPECT().ValidateCluster(gomock.Any(), gomock.Any()).AnyTimes().Return([]api.ValidationResult{
Expand Down Expand Up @@ -917,7 +917,7 @@ var _ = Describe("Auto assign machine CIDR", func() {
clusterApi = NewManager(getDefaultConfig(), common.GetTestLog().WithField("pkg", "cluster-monitor"), db, commontesting.GetDummyNotificationStream(ctrl),
mockEvents, mockEventsUploader, mockHostAPI, mockMetric, nil, dummy, mockOperators, nil, nil, nil, nil, nil, false, nil)

mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any()).Times(1)
mockMetric.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
mockMetric.EXPECT().Duration("ClusterMonitoring", gomock.Any()).AnyTimes()
mockNoChangeInOperatorDependencies(mockOperators)
mockOperators.EXPECT().ValidateCluster(gomock.Any(), gomock.Any()).AnyTimes().Return([]api.ValidationResult{
Expand Down Expand Up @@ -2328,7 +2328,7 @@ var _ = Describe("Majority groups", func() {
}}
Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred())

mockMetricApi.EXPECT().MonitoredClustersDurationMs(gomock.Any()).AnyTimes()
mockMetricApi.EXPECT().MonitoredClustersDurationMs(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
mockMetricApi.EXPECT().Duration("ClusterMonitoring", gomock.Any()).AnyTimes()
mockNoChangeInOperatorDependencies(mockOperators)
mockOperators.EXPECT().ValidateCluster(gomock.Any(), gomock.Any()).AnyTimes().Return([]api.ValidationResult{
Expand Down Expand Up @@ -3930,7 +3930,7 @@ var _ = Describe("Test RefreshSchedulableMastersForcedTrue", func() {
createWorkerHost(*cluster.ID, "", db)
}

err := clusterApi.RefreshSchedulableMastersForcedTrue(ctx, *cluster.ID)
err := clusterApi.RefreshSchedulableMastersForcedTrueWithClusterID(ctx, *cluster.ID)
Expect(err).ToNot(HaveOccurred())

cluster = getClusterFromDB(*cluster.ID, db)
Expand All @@ -3943,7 +3943,7 @@ var _ = Describe("Test RefreshSchedulableMastersForcedTrue", func() {
createHost(*cluster.ID, "", db)
}

err := clusterApi.RefreshSchedulableMastersForcedTrue(ctx, *cluster.ID)
err := clusterApi.RefreshSchedulableMastersForcedTrueWithClusterID(ctx, *cluster.ID)
Expect(err).ToNot(HaveOccurred())

cluster = getClusterFromDB(*cluster.ID, db)
Expand All @@ -3953,7 +3953,7 @@ var _ = Describe("Test RefreshSchedulableMastersForcedTrue", func() {
It("schedulableMastersForcedTrue should set a value when the existing value is nil", func() {
cluster := createCluster(nil)

err := clusterApi.RefreshSchedulableMastersForcedTrue(ctx, *cluster.ID)
err := clusterApi.RefreshSchedulableMastersForcedTrueWithClusterID(ctx, *cluster.ID)
Expect(err).ToNot(HaveOccurred())

cluster = getClusterFromDB(*cluster.ID, db)
Expand All @@ -3962,7 +3962,7 @@ var _ = Describe("Test RefreshSchedulableMastersForcedTrue", func() {

It("schedulableMastersForcedTrue should return an error when the cluster does not exists", func() {
invalidClusterID := strfmt.UUID(uuid.New().String())
err := clusterApi.RefreshSchedulableMastersForcedTrue(ctx, invalidClusterID)
err := clusterApi.RefreshSchedulableMastersForcedTrueWithClusterID(ctx, invalidClusterID)
Expect(err).To(HaveOccurred())
})
})
Expand Down
22 changes: 18 additions & 4 deletions internal/cluster/mock_cluster_api.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading