diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index e25c13ea7..fad4c23b8 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -232,6 +232,12 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + + // trigger MonitoringConsole reconcile by changing the splunk/image-tag annotation + err = changeMonitoringConsoleAnnotations(ctx, client, cr) + if err != nil { + return result, err + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. @@ -497,7 +503,8 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller return false, err } - // check if an image upgrade is happening and whether the ClusterManager is ready for the upgrade + // check if an image upgrade is happening and whether LM has finished updating yet, return false to stop + // further reconcile operations on CM until LM is ready if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || lmImage != cr.Spec.Image) { return false, nil } @@ -539,7 +546,6 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } return err } - if len(objectList.Items) == 0 { return nil } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 63314c870..01be64670 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -531,9 +531,14 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { runtime.InNamespace("test"), runtime.MatchingLabels(labels), } + listOpts1 := []runtime.ListOption{ + runtime.InNamespace("test"), + } listmockCall := []spltest.MockFuncCall{ - {ListOpts: listOpts}} - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[7], funcCalls[10], funcCalls[12]}, "List": {listmockCall[0], listmockCall[0]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[13]}} + {ListOpts: listOpts}, + {ListOpts: listOpts1}, + } + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[7], funcCalls[10], funcCalls[12]}, "List": {listmockCall[0], listmockCall[0], listmockCall[1]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[13]}} updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[8]}, "List": {listmockCall[0]}} current := enterpriseApi.ClusterManager{ diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 9b9b1f534..50c16891a 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -137,6 +137,12 @@ func ApplyMonitoringConsole(ctx context.Context, client splcommon.ControllerClie return result, err } + // check if the Monitoring Console is ready for version upgrade, if required + continueReconcile, err := isMonitoringConsoleReadyForUpgrade(ctx, client, cr) + if err != nil || !continueReconcile { + return result, err + } + mgr := splctrl.DefaultStatefulSetPodManager{} phase, err := mgr.Update(ctx, client, statefulSet, 1) if err != nil { @@ -357,6 +363,65 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } +// isMonitoringConsoleReadyForUpgrade checks if MonitoringConsole can be upgraded if a version upgrade is in-progress +// No-operation otherwise; returns bool, err accordingly +func isMonitoringConsoleReadyForUpgrade(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.MonitoringConsole) (bool, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("isMonitoringConsoleReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + + // check if a LicenseManager is attached to the instance + clusterManagerRef := cr.Spec.ClusterManagerRef + if clusterManagerRef.Name == "" { + return true, nil + } + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkMonitoringConsole, cr.GetName()), + } + + // check if the stateful set is created at this instance + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil && k8serrors.IsNotFound(err) { + return true, nil + } + + namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: clusterManagerRef.Name} + clusterManager := &enterpriseApi.ClusterManager{} + + // get the cluster manager referred in monitoring console + err = c.Get(ctx, namespacedName, clusterManager) + if err != nil { + eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not find the Cluster Manager. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager") + return true, err + } + + cmImage, err := getCurrentImage(ctx, c, cr, SplunkClusterManager) + if err != nil { + eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager current image") + return false, err + } + + mcImage, err := getCurrentImage(ctx, c, cr, SplunkMonitoringConsole) + if err != nil { + eventPublisher.Warning(ctx, "isMonitoringConsolerReadyForUpgrade", fmt.Sprintf("Could not get the Monitoring Console Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get monitoring console current image") + return false, err + } + + // check if an image upgrade is happening and whether CM has finished updating yet, return false to stop + // further reconcile operations on MC until CM is ready + if (cr.Spec.Image != mcImage) && (clusterManager.Status.Phase != enterpriseApi.PhaseReady || cmImage != cr.Spec.Image) { + return false, nil + } + + return true, nil +} + // changeMonitoringConsoleAnnotations updates the splunk/image-tag field of the MonitoringConsole annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { @@ -391,6 +456,9 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } return err } + if len(objectList.Items) == 0 { + return nil + } // check if instance has the required ClusterManagerRef for _, mc := range objectList.Items { diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index 72efd15a7..e72750ec1 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -1100,3 +1100,173 @@ func TestGetMonitoringConsoleList(t *testing.T) { t.Errorf("Got wrong number of IndexerCluster objects. Expected=%d, Got=%d", 1, numOfObjects) } } + +func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { + ctx := context.TODO() + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create Cluster Manager + cm := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + + err := client.Create(ctx, &cm) + _, err = ApplyClusterManager(ctx, client, &cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + cm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, &cm) + if err != nil { + t.Errorf("Unexpected status update %v", err) + debug.PrintStack() + } + + // Create Monitoring Console + mc := enterpriseApi.MonitoringConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + + err = client.Create(ctx, &mc) + _, err = ApplyMonitoringConsole(ctx, client, &mc) + if err != nil { + t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) + } + + mc.Spec.Image = "splunk2" + cm.Spec.Image = "splunk2" + _, err = ApplyClusterManager(ctx, client, &cm) + + monitoringConsole := &enterpriseApi.MonitoringConsole{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, monitoringConsole) + if err != nil { + t.Errorf("isMonitoringConsoleReadyForUpgrade should not have returned error=%v", err) + } + + check, err := isMonitoringConsoleReadyForUpgrade(ctx, client, monitoringConsole) + + if err != nil { + t.Errorf("Unexpected upgradeScenario error %v", err) + } + + if !check { + t.Errorf("isMonitoringConsoleReadyForUpgrade: MC should be ready for upgrade") + } +} + +func TestChangeMonitoringConsoleAnnotations(t *testing.T) { + ctx := context.TODO() + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // define CM and MC + cm := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + + mc := &enterpriseApi.MonitoringConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + cm.Spec.Image = "splunk/splunk:latest" + + // Create the instances + client.Create(ctx, cm) + _, err := ApplyClusterManager(ctx, client, cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + cm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, cm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + client.Create(ctx, mc) + _, err = ApplyMonitoringConsole(ctx, client, mc) + if err != nil { + t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) + } + + err = changeMonitoringConsoleAnnotations(ctx, client, cm) + if err != nil { + t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) + } + monitoringConsole := &enterpriseApi.MonitoringConsole{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, monitoringConsole) + if err != nil { + t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) + } + + annotations := monitoringConsole.GetAnnotations() + if annotations["splunk/image-tag"] != cm.Spec.Image { + t.Errorf("changeMonitoringConsoleAnnotations should have set the checkUpdateImage annotation field to the current image") + } +}