diff --git a/controller/sync.go b/controller/sync.go index 783183c17fc7c..3643fb0965200 100644 --- a/controller/sync.go +++ b/controller/sync.go @@ -407,7 +407,7 @@ func normalizeTargetResources(cr *comparisonResult) ([]*unstructured.Unstructure } // generate a minimal patch that uses the fields from targetPatch (template) // with livePatch values - patch, err := compilePatch(targetPatch, livePatch) + patch, err := compilePatch(targetPatch, livePatch, live.Object) if err != nil { return nil, err } @@ -426,7 +426,7 @@ func normalizeTargetResources(cr *comparisonResult) ([]*unstructured.Unstructure // compilePatch will generate a patch using the fields from templatePatch with // the values from valuePatch. -func compilePatch(templatePatch, valuePatch []byte) ([]byte, error) { +func compilePatch(templatePatch, valuePatch []byte, liveObjectMap map[string]interface{}) ([]byte, error) { templateMap := make(map[string]interface{}) err := json.Unmarshal(templatePatch, &templateMap) if err != nil { @@ -437,49 +437,117 @@ func compilePatch(templatePatch, valuePatch []byte) ([]byte, error) { if err != nil { return nil, err } - resultMap := intersectMap(templateMap, valueMap) + resultMap := intersectMap(templateMap, valueMap, liveObjectMap) return json.Marshal(resultMap) } // intersectMap will return map with the fields intersection from the 2 provided // maps populated with the valueMap values. -func intersectMap(templateMap, valueMap map[string]interface{}) map[string]interface{} { +func intersectMap(templateMap, valueMap, liveMap map[string]interface{}) map[string]interface{} { result := make(map[string]interface{}) for k, v := range templateMap { if innerTMap, ok := v.(map[string]interface{}); ok { if innerVMap, ok := valueMap[k].(map[string]interface{}); ok { - result[k] = intersectMap(innerTMap, innerVMap) + if innerLMap, ok := liveMap[k].(map[string]interface{}); ok { + result[k] = intersectMap(innerTMap, innerVMap, innerLMap) + } + } else if _, ok := liveMap[k].(map[string]interface{}); !ok { + result[k] = innerTMap } } else if innerTSlice, ok := v.([]interface{}); ok { if innerVSlice, ok := valueMap[k].([]interface{}); ok { - items := []interface{}{} - for idx, innerTSliceValue := range innerTSlice { - if idx < len(innerVSlice) { + if innerLSlice, ok := liveMap[k].([]interface{}); ok { + items := []interface{}{} + mergeKeyFieldName := "name" + + for idx, innerTSliceValue := range innerTSlice { if tSliceValueMap, ok := innerTSliceValue.(map[string]interface{}); ok { - if vSliceValueMap, ok := innerVSlice[idx].(map[string]interface{}); ok { - item := intersectMap(tSliceValueMap, vSliceValueMap) - items = append(items, item) + if idx < len(innerVSlice) { + if vSliceValueMap, ok := innerVSlice[idx].(map[string]interface{}); ok { + if lSliceValueMap, ok := innerLSlice[idx].(map[string]interface{}); ok { + mergedMap := map[string]interface{}{} + if _, ok := tSliceValueMap[mergeKeyFieldName].(string); ok { + if _, ok := vSliceValueMap[mergeKeyFieldName].(string); ok { + for tKey, tItem := range tSliceValueMap { + if vItem, ok := vSliceValueMap[tKey].(map[string]interface{}); ok { + if lItem, ok := lSliceValueMap[tKey].(map[string]interface{}); ok { + item := intersectMap(tItem.(map[string]interface{}), vItem, lItem) + mergedMap[tKey] = item + } + } else if vItem, ok := vSliceValueMap[tKey].([]interface{}); ok { + mergeKeys := []string{} + innerItems := []interface{}{} + mergeKeyItems := map[string]interface{}{} + + for _, tItemEl := range tItem.([]interface{}) { + if tItemElMap, ok := tItemEl.(map[string]interface{}); ok { + if tItemElName, ok := tItemElMap[mergeKeyFieldName].(string); ok { + if _, ok := mergeKeyItems[tItemElName]; !ok { + mergeKeys = append(mergeKeys, tItemElName) + } + mergeKeyItems[tItemElName] = tItemElMap + } + } + } + + for _, vItemEl := range vItem { + if vItemElMap, ok := vItemEl.(map[string]interface{}); ok { + if vItemElName, ok := vItemElMap[mergeKeyFieldName].(string); ok { + if _, ok := mergeKeyItems[vItemElName]; ok { + mergeKeyItems[vItemElName] = vItemElMap + } + } + } + } + + if len(mergeKeys) > 0 { + for _, key := range mergeKeys { + innerItems = append(innerItems, mergeKeyItems[key]) + } + mergedMap[tKey] = innerItems + } else { + // TODO: What should happen if there are no merge keys + // and there are array entries within both tItem and vItem? + mergedMap[tKey] = tItem + } + } else if vItem, ok := vSliceValueMap[tKey]; ok { + mergedMap[tKey] = vItem + } else { + mergedMap[tKey] = tItem + } + } + } + + items = append(items, mergedMap) + } + } else { + item := intersectMap(tSliceValueMap, vSliceValueMap, lSliceValueMap) + items = append(items, item) + } + } + } else { + items = append(items, tSliceValueMap) } } else { - items = append(items, innerVSlice[idx]) + items = append(items, tSliceValueMap) } } + if len(items) > 0 { + result[k] = items + } } - if len(items) > 0 { - result[k] = items - } - } - } else { - if _, ok := valueMap[k]; ok { - result[k] = valueMap[k] + } else { + result[k] = innerTSlice } + } else if _, ok := valueMap[k]; ok { + result[k] = valueMap[k] } } return result } // getMergePatch calculates and returns the patch between the original and the -// modified unstructures. +// modified unstructured. func getMergePatch(original, modified *unstructured.Unstructured) ([]byte, error) { originalJSON, err := original.MarshalJSON() if err != nil { @@ -493,7 +561,7 @@ func getMergePatch(original, modified *unstructured.Unstructured) ([]byte, error } // applyMergePatch will apply the given patch in the obj and return the patched -// unstructure. +// unstructured. func applyMergePatch(obj *unstructured.Unstructured, patch []byte) (*unstructured.Unstructured, error) { originalJSON, err := obj.MarshalJSON() if err != nil { @@ -513,7 +581,7 @@ func applyMergePatch(obj *unstructured.Unstructured, patch []byte) (*unstructure // hasSharedResourceCondition will check if the Application has any resource that has already // been synced by another Application. If the resource is found in another Application it returns -// true along with a human readable message of which specific resource has this condition. +// true along with a human-readable message of which specific resource has this condition. func hasSharedResourceCondition(app *v1alpha1.Application) (bool, string) { for _, condition := range app.Status.Conditions { if condition.Type == v1alpha1.ApplicationConditionSharedResourceWarning { diff --git a/controller/sync_test.go b/controller/sync_test.go index da68e5d9a3dfe..ef54e762d3298 100644 --- a/controller/sync_test.go +++ b/controller/sync_test.go @@ -361,7 +361,6 @@ func TestNormalizeTargetResources(t *testing.T) { assert.Equal(t, int64(4), replicas) }) t.Run("will keep new array entries not found in live state if not ignored", func(t *testing.T) { - t.Skip("limitation in the current implementation") // given ignores := []v1alpha1.ResourceIgnoreDifferences{ { @@ -385,4 +384,267 @@ func TestNormalizeTargetResources(t *testing.T) { require.True(t, ok) assert.Equal(t, 2, len(containers)) }) + t.Run("will correctly set array entries if new entries have been added", func(t *testing.T) { + // given + ignores := []v1alpha1.ResourceIgnoreDifferences{ + { + Group: "apps", + Kind: "Deployment", + JQPathExpressions: []string{".spec.template.spec.containers[].env[] | select(.name == \"SOME_ENV_VAR\")"}, + }, + } + f := setup(t, ignores) + live := test.YamlToUnstructured(testdata.LiveDeploymentEnvVarsYaml) + target := test.YamlToUnstructured(testdata.TargetDeploymentEnvVarsYaml) + f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live} + f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target} + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + containers, ok, err := unstructured.NestedSlice(targets[0].Object, "spec", "template", "spec", "containers") + require.NoError(t, err) + require.True(t, ok) + assert.Equal(t, 1, len(containers)) + + ports := containers[0].(map[string]interface{})["ports"].([]interface{}) + assert.Equal(t, 1, len(ports)) + + env := containers[0].(map[string]interface{})["env"].([]interface{}) + assert.Equal(t, 3, len(env)) + + first := env[0] + second := env[1] + third := env[2] + + // Currently the defined order at this time is the insertion order of the target manifest. + assert.Equal(t, "SOME_OTHER_ENV_VAR", first.(map[string]interface{})["name"]) + assert.Equal(t, "some_other_value", first.(map[string]interface{})["value"]) + + assert.Equal(t, "YET_ANOTHER_ENV_VAR", second.(map[string]interface{})["name"]) + assert.Equal(t, "yet_another_value", second.(map[string]interface{})["value"]) + + assert.Equal(t, "SOME_ENV_VAR", third.(map[string]interface{})["name"]) + assert.Equal(t, "some_value", third.(map[string]interface{})["value"]) + }) + t.Run("mutating-webhook-config", func(t *testing.T) { + // given + + ignores := []v1alpha1.ResourceIgnoreDifferences{ + { + Group: "admissionregistration.k8s.io", + Kind: "MutatingWebhookConfiguration", + JQPathExpressions: []string{".webhooks[]?.clientConfig.caBundle"}, + }, + } + f := setup(t, ignores) + live := test.YamlToUnstructured(testdata.LiveMutatingWebhookConfigYaml) + target := test.YamlToUnstructured(testdata.TargetMutatingWebhookConfigYaml) + f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live} + f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target} + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + webhooks, ok, err := unstructured.NestedSlice(targets[0].Object, "webhooks") + require.NoError(t, err) + require.True(t, ok) + assert.Equal(t, 3, len(webhooks)) + + first := webhooks[0] + second := webhooks[1] + third := webhooks[2] + + assert.Equal(t, "something", (first.(map[string]interface{})["clientConfig"]).(map[string]interface{})["caBundle"]) + assert.Equal(t, "something", (second.(map[string]interface{})["clientConfig"]).(map[string]interface{})["caBundle"]) + assert.Equal(t, "something-new", (third.(map[string]interface{})["clientConfig"]).(map[string]interface{})["caBundle"]) + }) + t.Run("rollout-obj", func(t *testing.T) { + // given + + ignores := []v1alpha1.ResourceIgnoreDifferences{ + { + Group: "argoproj.io", + Kind: "Rollout", + JQPathExpressions: []string{".spec.template.spec.containers[] | select(.name == \"init\") | .image"}, + }, + } + f := setup(t, ignores) + live := test.YamlToUnstructured(testdata.LiveRolloutYaml) + target := test.YamlToUnstructured(testdata.TargetRolloutYaml) + f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live} + f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target} + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + containers, ok, err := unstructured.NestedSlice(targets[0].Object, "spec", "template", "spec", "containers") + require.NoError(t, err) + require.True(t, ok) + assert.Equal(t, 1, len(containers)) + + container := containers[0] + + assert.Equal(t, int64(15), (container.(map[string]interface{})["livenessProbe"]).(map[string]interface{})["initialDelaySeconds"]) + }) + t.Run("ignore-deployment-image-replicas-changes-additive", func(t *testing.T) { + // given + + ignores := []v1alpha1.ResourceIgnoreDifferences{ + { + Group: "apps", + Kind: "Deployment", + JSONPointers: []string{"/spec/replicas"}, + }, { + Group: "apps", + Kind: "Deployment", + JQPathExpressions: []string{".spec.template.spec.containers[].image"}, + }, + } + f := setup(t, ignores) + live := test.YamlToUnstructured(testdata.MinimalImageReplicaDeploymentYaml) + target := test.YamlToUnstructured(testdata.AdditionalImageReplicaDeploymentYaml) + f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live} + f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target} + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + metadata, ok, err := unstructured.NestedMap(targets[0].Object, "metadata") + require.NoError(t, err) + require.True(t, ok) + labels, ok := metadata["labels"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, 2, len(labels)) + assert.Equal(t, "web", labels["appProcess"]) + + spec, ok, err := unstructured.NestedMap(targets[0].Object, "spec") + require.NoError(t, err) + require.True(t, ok) + + assert.Equal(t, int64(1), spec["replicas"]) + + template, ok := spec["template"].(map[string]interface{}) + require.True(t, ok) + + tMetadata, ok := template["metadata"].(map[string]interface{}) + require.True(t, ok) + tLabels, ok := tMetadata["labels"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, 2, len(tLabels)) + assert.Equal(t, "web", tLabels["appProcess"]) + + tSpec, ok := template["spec"].(map[string]interface{}) + require.True(t, ok) + containers, ok, err := unstructured.NestedSlice(tSpec, "containers") + require.NoError(t, err) + require.True(t, ok) + assert.Equal(t, 1, len(containers)) + + first := containers[0].(map[string]interface{}) + assert.Equal(t, "alpine:3", first["image"]) + + resources, ok := first["resources"].(map[string]interface{}) + require.True(t, ok) + requests, ok := resources["requests"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "400m", requests["cpu"]) + + env, ok, err := unstructured.NestedSlice(first, "env") + require.NoError(t, err) + require.True(t, ok) + assert.Equal(t, 1, len(env)) + + env0 := env[0].(map[string]interface{}) + assert.Equal(t, "EV", env0["name"]) + assert.Equal(t, "here", env0["value"]) + }) + t.Run("ignore-deployment-image-replicas-changes-reductive", func(t *testing.T) { + // given + + ignores := []v1alpha1.ResourceIgnoreDifferences{ + { + Group: "apps", + Kind: "Deployment", + JSONPointers: []string{"/spec/replicas"}, + }, { + Group: "apps", + Kind: "Deployment", + JQPathExpressions: []string{".spec.template.spec.containers[].image"}, + }, + } + f := setup(t, ignores) + live := test.YamlToUnstructured(testdata.AdditionalImageReplicaDeploymentYaml) + target := test.YamlToUnstructured(testdata.MinimalImageReplicaDeploymentYaml) + f.comparisonResult.reconciliationResult.Live = []*unstructured.Unstructured{live} + f.comparisonResult.reconciliationResult.Target = []*unstructured.Unstructured{target} + + // when + targets, err := normalizeTargetResources(f.comparisonResult) + + // then + require.NoError(t, err) + require.Equal(t, 1, len(targets)) + metadata, ok, err := unstructured.NestedMap(targets[0].Object, "metadata") + require.NoError(t, err) + require.True(t, ok) + labels, ok := metadata["labels"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, 1, len(labels)) + _, ok, err = unstructured.NestedMap(labels, "appProcess") + require.NoError(t, err) + require.False(t, ok) + + spec, ok, err := unstructured.NestedMap(targets[0].Object, "spec") + require.NoError(t, err) + require.True(t, ok) + + assert.Equal(t, int64(2), spec["replicas"]) + + template, ok := spec["template"].(map[string]interface{}) + require.True(t, ok) + + tMetadata, ok := template["metadata"].(map[string]interface{}) + require.True(t, ok) + tLabels, ok := tMetadata["labels"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, 1, len(tLabels)) + _, ok, err = unstructured.NestedMap(tLabels, "appProcess") + require.NoError(t, err) + require.False(t, ok) + + tSpec, ok := template["spec"].(map[string]interface{}) + require.True(t, ok) + containers, ok, err := unstructured.NestedSlice(tSpec, "containers") + require.NoError(t, err) + require.True(t, ok) + assert.Equal(t, 1, len(containers)) + + first := containers[0].(map[string]interface{}) + assert.Equal(t, "alpine:2", first["image"]) + + resources, ok := first["resources"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, 0, len(resources)) + _, ok, err = unstructured.NestedMap(resources, "requests") + require.NoError(t, err) + require.False(t, ok) + + _, ok, err = unstructured.NestedSlice(first, "env") + require.NoError(t, err) + require.False(t, ok) + + }) } diff --git a/controller/testdata/additional-image-replicas-deployment.yaml b/controller/testdata/additional-image-replicas-deployment.yaml new file mode 100644 index 0000000000000..0173314070338 --- /dev/null +++ b/controller/testdata/additional-image-replicas-deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: client + appProcess: web + name: client +spec: + replicas: 2 + selector: + matchLabels: + app: client + strategy: {} + template: + metadata: + labels: + app: client + appProcess: web + spec: + containers: + - image: alpine:2 + name: alpine + resources: + requests: + cpu: 400m + env: + - name: EV + value: here diff --git a/controller/testdata/data.go b/controller/testdata/data.go index a53c6a8a88b35..552326c312697 100644 --- a/controller/testdata/data.go +++ b/controller/testdata/data.go @@ -9,6 +9,30 @@ var ( //go:embed target-deployment.yaml TargetDeploymentYaml string + //go:embed live-deployment-env-vars.yaml + LiveDeploymentEnvVarsYaml string + + //go:embed target-deployment-env-vars.yaml + TargetDeploymentEnvVarsYaml string + + //go:embed minimal-image-replicas-deployment.yaml + MinimalImageReplicaDeploymentYaml string + + //go:embed additional-image-replicas-deployment.yaml + AdditionalImageReplicaDeploymentYaml string + + //go:embed live-mutating-webhook-config.yaml + LiveMutatingWebhookConfigYaml string + + //go:embed target-mutating-webhook-config.yaml + TargetMutatingWebhookConfigYaml string + + //go:embed live-rollout.yaml + LiveRolloutYaml string + + //go:embed target-rollout.yaml + TargetRolloutYaml string + //go:embed target-deployment-new-entries.yaml TargetDeploymentNewEntries string ) diff --git a/controller/testdata/live-deployment-env-vars.yaml b/controller/testdata/live-deployment-env-vars.yaml new file mode 100644 index 0000000000000..731b5b720714c --- /dev/null +++ b/controller/testdata/live-deployment-env-vars.yaml @@ -0,0 +1,177 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + argocd.argoproj.io/tracking-id: 'guestbook:apps/Deployment:default/kustomize-guestbook-ui' + deployment.kubernetes.io/revision: '9' + iksm-version: '2.0' + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"argocd.argoproj.io/tracking-id":"guestbook:apps/Deployment:default/kustomize-guestbook-ui","iksm-version":"2.0"},"name":"kustomize-guestbook-ui","namespace":"default"},"spec":{"replicas":4,"revisionHistoryLimit":3,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui"}},"spec":{"containers":[{"env":[{"name":"SOME_ENV_VAR","value":"some_value"}],"image":"gcr.io/heptio-images/ks-guestbook-demo:0.1","name":"guestbook-ui","ports":[{"containerPort":80}],"resources":{"requests":{"cpu":"50m","memory":"100Mi"}}}]}}}} + creationTimestamp: '2022-01-05T15:45:21Z' + generation: 119 + managedFields: + - apiVersion: apps/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + 'f:iksm-version': {} + manager: janitor + operation: Apply + time: '2022-01-06T18:21:04Z' + - apiVersion: apps/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:argocd.argoproj.io/tracking-id': {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + 'f:spec': + 'f:progressDeadlineSeconds': {} + 'f:replicas': {} + 'f:revisionHistoryLimit': {} + 'f:selector': {} + 'f:strategy': + 'f:rollingUpdate': + .: {} + 'f:maxSurge': {} + 'f:maxUnavailable': {} + 'f:type': {} + 'f:template': + 'f:metadata': + 'f:labels': + .: {} + 'f:app': {} + 'f:spec': + 'f:containers': + 'k:{"name":"guestbook-ui"}': + .: {} + 'f:env': + .: {} + 'k:{"name":"SOME_ENV_VAR"}': + .: {} + 'f:name': {} + 'f:value': {} + 'f:image': {} + 'f:imagePullPolicy': {} + 'f:name': {} + 'f:ports': + .: {} + 'k:{"containerPort":80,"protocol":"TCP"}': + .: {} + 'f:containerPort': {} + 'f:protocol': {} + 'f:resources': + .: {} + 'f:requests': + .: {} + 'f:cpu': {} + 'f:memory': {} + 'f:terminationMessagePath': {} + 'f:terminationMessagePolicy': {} + 'f:dnsPolicy': {} + 'f:restartPolicy': {} + 'f:schedulerName': {} + 'f:securityContext': {} + 'f:terminationGracePeriodSeconds': {} + manager: argocd + operation: Update + time: '2022-01-06T15:04:15Z' + - apiVersion: apps/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + 'f:deployment.kubernetes.io/revision': {} + 'f:status': + 'f:availableReplicas': {} + 'f:conditions': + .: {} + 'k:{"type":"Available"}': + .: {} + 'f:lastTransitionTime': {} + 'f:lastUpdateTime': {} + 'f:message': {} + 'f:reason': {} + 'f:status': {} + 'f:type': {} + 'k:{"type":"Progressing"}': + .: {} + 'f:lastTransitionTime': {} + 'f:lastUpdateTime': {} + 'f:message': {} + 'f:reason': {} + 'f:status': {} + 'f:type': {} + 'f:observedGeneration': {} + 'f:readyReplicas': {} + 'f:replicas': {} + 'f:updatedReplicas': {} + manager: kube-controller-manager + operation: Update + time: '2022-01-06T18:15:14Z' + name: kustomize-guestbook-ui + namespace: default + resourceVersion: '8289211' + uid: ef253575-ce44-4c5e-84ad-16e81d0df6eb +spec: + progressDeadlineSeconds: 600 + replicas: 4 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: guestbook-ui + spec: + containers: + - env: + - name: SOME_ENV_VAR + value: some_value + image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1' + imagePullPolicy: IfNotPresent + name: guestbook-ui + ports: + - containerPort: 80 + protocol: TCP + resources: + requests: + cpu: 50m + memory: 100Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 4 + conditions: + - lastTransitionTime: '2022-01-05T22:20:37Z' + lastUpdateTime: '2022-01-05T22:43:47Z' + message: >- + ReplicaSet "kustomize-guestbook-ui-6549d54677" has successfully + progressed. + reason: NewReplicaSetAvailable + status: 'True' + type: Progressing + - lastTransitionTime: '2022-01-06T18:15:14Z' + lastUpdateTime: '2022-01-06T18:15:14Z' + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: 'True' + type: Available + observedGeneration: 119 + readyReplicas: 4 + replicas: 4 + updatedReplicas: 4 diff --git a/controller/testdata/live-mutating-webhook-config.yaml b/controller/testdata/live-mutating-webhook-config.yaml new file mode 100644 index 0000000000000..655c15c04d967 --- /dev/null +++ b/controller/testdata/live-mutating-webhook-config.yaml @@ -0,0 +1,85 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"admissionregistration.k8s.io/v1","kind":"MutatingWebhookConfiguration","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"aws-lb-controller","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/name":"aws-load-balancer-controller","app.kubernetes.io/version":"v2.5.2","argocd.argoproj.io/instance":"my-aws-lb-controller","helm.sh/chart":"aws-load-balancer-controller-1.5.3"},"name":"aws-load-balancer-webhook"},"webhooks":[{"admissionReviewVersions":["v1beta1"],"clientConfig":{"caBundle":"something","service":{"name":"aws-load-balancer-webhook-service","namespace":"kube-system","path":"/mutate-v1-pod"}},"failurePolicy":"Fail","name":"mpod.elbv2.k8s.aws","namespaceSelector":{"matchExpressions":[{"key":"elbv2.k8s.aws/pod-readiness-gate-inject","operator":"In","values":["enabled"]}]},"objectSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"NotIn","values":["aws-load-balancer-controller"]}]},"rules":[{"apiGroups":[""],"apiVersions":["v1"],"operations":["CREATE"],"resources":["pods"]}],"sideEffects":"None"},{"admissionReviewVersions":["v1beta1"],"clientConfig":{"caBundle":"something","service":{"name":"aws-load-balancer-webhook-service","namespace":"kube-system","path":"/mutate-elbv2-k8s-aws-v1beta1-targetgroupbinding"}},"failurePolicy":"Fail","name":"mtargetgroupbinding.elbv2.k8s.aws","objectSelector":{},"rules":[{"apiGroups":["elbv2.k8s.aws"],"apiVersions":["v1beta1"],"operations":["CREATE"],"resources":["targetgroupbindings"]}],"sideEffects":"None"}]} + creationTimestamp: '2023-07-12T12:43:35Z' + generation: 2 + labels: + app.kubernetes.io/instance: aws-lb-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: aws-load-balancer-controller + app.kubernetes.io/version: v2.5.2 + argocd.argoproj.io/instance: my-aws-lb-controller + helm.sh/chart: aws-load-balancer-controller-1.5.3 + name: aws-load-balancer-webhook + resourceVersion: '6491247' + uid: 773ae9e5-a9a0-497f-a366-17b4dae4a667 +webhooks: + - admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: >- + something + service: + name: aws-load-balancer-webhook-service + namespace: kube-system + path: /mutate-v1-pod + port: 443 + failurePolicy: Fail + matchPolicy: Equivalent + name: mpod.elbv2.k8s.aws + namespaceSelector: + matchExpressions: + - key: elbv2.k8s.aws/pod-readiness-gate-inject + operator: In + values: + - enabled + objectSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: NotIn + values: + - aws-load-balancer-controller + reinvocationPolicy: Never + rules: + - apiGroups: + - '' + apiVersions: + - v1 + operations: + - CREATE + resources: + - pods + scope: '*' + sideEffects: None + timeoutSeconds: 10 + - admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: >- + something + service: + name: aws-load-balancer-webhook-service + namespace: kube-system + path: /mutate-elbv2-k8s-aws-v1beta1-targetgroupbinding + port: 443 + failurePolicy: Fail + matchPolicy: Equivalent + name: mtargetgroupbinding.elbv2.k8s.aws + namespaceSelector: {} + objectSelector: {} + reinvocationPolicy: Never + rules: + - apiGroups: + - elbv2.k8s.aws + apiVersions: + - v1beta1 + operations: + - CREATE + resources: + - targetgroupbindings + scope: '*' + sideEffects: None + timeoutSeconds: 10 \ No newline at end of file diff --git a/controller/testdata/live-rollout.yaml b/controller/testdata/live-rollout.yaml new file mode 100644 index 0000000000000..7eb1b2d045430 --- /dev/null +++ b/controller/testdata/live-rollout.yaml @@ -0,0 +1,122 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"argoproj.io/v1alpha1","kind":"Rollout","metadata":{"annotations":{},"labels":{"app.kubernetes.io/component":"manual","app.kubernetes.io/instance":"init-manual","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/name":"init-manual","app.kubernetes.io/part-of":"init","helm.sh/chart":"init-0.1.0"},"name":"init-manual","namespace":"init"},"spec":{"replicas":1,"revisionHistoryLimit":4,"selector":{"matchLabels":{"app":"init-manual"}},"strategy":{"blueGreen":{"activeService":"manual","previewService":"manual-preview"}},"template":{"metadata":{"annotations":{"config.alpha.linkerd.io/proxy-wait-before-exit-seconds":"10","config.linkerd.io/proxy-await":"enabled","instrumentation.opentelemetry.io/container-names":"init","instrumentation.opentelemetry.io/inject-nodejs":"otel/init-instrumentation","linkerd.io/inject":"enabled"},"labels":{"app":"init-manual","linkerd.io/proxy-deployment":"init-manual"}},"spec":{"containers":[{"env":[{"name":"APP","value":"manual"},{"name":"HTTP_PORT","value":"8080"}],"image":"REDACTED","imagePullPolicy":"IfNotPresent","livenessProbe":{"failureThreshold":3,"grpc":{"port":8080,"service":"init"},"initialDelaySeconds":20,"periodSeconds":10},"name":"init","ports":[{"containerPort":8080,"protocol":"TCP"}],"readinessProbe":{"failureThreshold":1,"grpc":{"port":8080,"service":"init"},"initialDelaySeconds":20,"periodSeconds":5},"resources":{"limits":{"ephemeral-storage":"512Mi"},"requests":{"cpu":"50m","ephemeral-storage":"256Mi","memory":"256Mi"}}}],"imagePullSecrets":[{"name":"REDACTED"}],"nodeSelector":{"cloud.google.com/gke-provisioning":"spot","iam.gke.io/gke-metadata-server-enabled":"true"},"serviceAccountName":"default"}}}} + rollout.argoproj.io/revision: '215' + creationTimestamp: '2023-04-26T14:16:22Z' + generation: 228 + labels: + app.kubernetes.io/component: manual + app.kubernetes.io/instance: init-manual + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: init-manual + app.kubernetes.io/part-of: init + helm.sh/chart: init-0.1.0 + k8slens-edit-resource-version: v1alpha1 + name: init-manual + namespace: init + resourceVersion: '97158718' + uid: ee833507-b862-47c4-ab1a-8c504ff1db71 +spec: + replicas: 1 + revisionHistoryLimit: 4 + selector: + matchLabels: + app: init-manual + strategy: + blueGreen: + activeService: manual + previewService: manual-preview + template: + metadata: + annotations: + config.alpha.linkerd.io/proxy-wait-before-exit-seconds: '10' + config.linkerd.io/proxy-await: enabled + instrumentation.opentelemetry.io/container-names: init + instrumentation.opentelemetry.io/inject-nodejs: otel/init-instrumentation + linkerd.io/inject: enabled + labels: + app: init-manual + linkerd.io/proxy-deployment: init-manual + spec: + containers: + - env: + - name: APP + value: manual + - name: HTTP_PORT + value: '8080' + image: 'REDACTED' + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + grpc: + port: 8080 + service: init + initialDelaySeconds: 20 + periodSeconds: 10 + name: init + ports: + - containerPort: 8080 + protocol: TCP + readinessProbe: + failureThreshold: 1 + grpc: + port: 8080 + service: init + initialDelaySeconds: 20 + periodSeconds: 5 + resources: + limits: + ephemeral-storage: 512Mi + requests: + cpu: 50m + ephemeral-storage: 256Mi + memory: 256Mi + imagePullSecrets: + - name: REDACTED + nodeSelector: + cloud.google.com/gke-provisioning: spot + iam.gke.io/gke-metadata-server-enabled: 'true' + serviceAccountName: default +status: + HPAReplicas: 1 + availableReplicas: 1 + blueGreen: + activeSelector: 6b6b5d8cd7 + previewSelector: 6b6b5d8cd7 + canary: {} + conditions: + - lastTransitionTime: '2023-08-30T09:45:41Z' + lastUpdateTime: '2023-08-30T09:45:41Z' + message: RolloutCompleted + reason: RolloutCompleted + status: 'True' + type: Completed + - lastTransitionTime: '2023-08-30T09:57:08Z' + lastUpdateTime: '2023-08-30T09:57:08Z' + message: Rollout is healthy + reason: RolloutHealthy + status: 'True' + type: Healthy + - lastTransitionTime: '2023-05-05T09:55:27Z' + lastUpdateTime: '2023-08-30T09:57:08Z' + message: ReplicaSet "init-manual-6b6b5d8cd7" has successfully progressed. + reason: NewReplicaSetAvailable + status: 'True' + type: Progressing + - lastTransitionTime: '2023-08-30T09:57:08Z' + lastUpdateTime: '2023-08-30T09:57:08Z' + message: Rollout has minimum availability + reason: AvailableReason + status: 'True' + type: Available + currentPodHash: 6b6b5d8cd7 + observedGeneration: '228' + phase: Healthy + readyReplicas: 1 + replicas: 1 + selector: 'app=init-manual,rollouts-pod-template-hash=6b6b5d8cd7' + stableRS: 6b6b5d8cd7 + updatedReplicas: 1 \ No newline at end of file diff --git a/controller/testdata/minimal-image-replicas-deployment.yaml b/controller/testdata/minimal-image-replicas-deployment.yaml new file mode 100644 index 0000000000000..51e38413bc6bd --- /dev/null +++ b/controller/testdata/minimal-image-replicas-deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: client + name: client +spec: + replicas: 1 + selector: + matchLabels: + app: client + strategy: {} + template: + metadata: + labels: + app: client + spec: + containers: + - image: alpine:3 + name: alpine + resources: {} diff --git a/controller/testdata/target-deployment-env-vars.yaml b/controller/testdata/target-deployment-env-vars.yaml new file mode 100644 index 0000000000000..714b0787463d8 --- /dev/null +++ b/controller/testdata/target-deployment-env-vars.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + argocd.argoproj.io/tracking-id: 'guestbook:apps/Deployment:default/kustomize-guestbook-ui' + iksm-version: '1.0' + name: kustomize-guestbook-ui + namespace: default +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: guestbook-ui + template: + metadata: + labels: + app: guestbook-ui + spec: + containers: + - env: + - name: SOME_OTHER_ENV_VAR + value: some_other_value + - name: YET_ANOTHER_ENV_VAR + value: yet_another_value + - name: SOME_ENV_VAR + value: different_value! + image: 'gcr.io/heptio-images/ks-guestbook-demo:0.1' + name: guestbook-ui + ports: + - containerPort: 80 + resources: + requests: + cpu: 50m + memory: 100Mi diff --git a/controller/testdata/target-mutating-webhook-config.yaml b/controller/testdata/target-mutating-webhook-config.yaml new file mode 100644 index 0000000000000..5fd494879d590 --- /dev/null +++ b/controller/testdata/target-mutating-webhook-config.yaml @@ -0,0 +1,94 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/instance: aws-lb-controller + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: aws-load-balancer-controller + app.kubernetes.io/version: v2.5.2 + argocd.argoproj.io/instance: my-aws-lb-controller + helm.sh/chart: aws-load-balancer-controller-1.5.3 + name: aws-load-balancer-webhook +webhooks: + - admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: >- + something-new + service: + name: aws-load-balancer-webhook-service + namespace: kube-system + path: /mutate-v1-pod + failurePolicy: Fail + name: mpod.elbv2.k8s.aws + namespaceSelector: + matchExpressions: + - key: elbv2.k8s.aws/pod-readiness-gate-inject + operator: In + values: + - enabled + objectSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: NotIn + values: + - aws-load-balancer-controller + rules: + - apiGroups: + - '' + apiVersions: + - v1 + operations: + - CREATE + resources: + - pods + sideEffects: None + - admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: >- + something-new + service: + name: aws-load-balancer-webhook-service + namespace: kube-system + path: /mutate-v1-service + failurePolicy: Fail + name: mservice.elbv2.k8s.aws + objectSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: NotIn + values: + - aws-load-balancer-controller + rules: + - apiGroups: + - '' + apiVersions: + - v1 + operations: + - CREATE + resources: + - services + sideEffects: None + - admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: >- + something-new + service: + name: aws-load-balancer-webhook-service + namespace: kube-system + path: /mutate-elbv2-k8s-aws-v1beta1-targetgroupbinding + failurePolicy: Fail + name: mtargetgroupbinding.elbv2.k8s.aws + rules: + - apiGroups: + - elbv2.k8s.aws + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - targetgroupbindings + sideEffects: None \ No newline at end of file diff --git a/controller/testdata/target-rollout.yaml b/controller/testdata/target-rollout.yaml new file mode 100644 index 0000000000000..0a9fe6226bdfd --- /dev/null +++ b/controller/testdata/target-rollout.yaml @@ -0,0 +1,74 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + labels: + app.kubernetes.io/component: manual + app.kubernetes.io/instance: init-manual + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: init-manual + app.kubernetes.io/part-of: init + helm.sh/chart: init-0.1.0 + k8slens-edit-resource-version: v1alpha1 + name: init-manual + namespace: init +spec: + replicas: 1 + revisionHistoryLimit: 4 + selector: + matchLabels: + app: init-manual + strategy: + blueGreen: + activeService: manual + previewService: manual-preview + template: + metadata: + annotations: + config.alpha.linkerd.io/proxy-wait-before-exit-seconds: '10' + config.linkerd.io/proxy-await: enabled + instrumentation.opentelemetry.io/container-names: init + instrumentation.opentelemetry.io/inject-nodejs: otel/init-instrumentation + linkerd.io/inject: enabled + labels: + app: init-manual + linkerd.io/proxy-deployment: init-manual + spec: + containers: + - env: + - name: APP + value: manual + - name: HTTP_PORT + value: '8080' + image: 'REDACTED' + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + grpc: + port: 8080 + service: init + initialDelaySeconds: 15 + periodSeconds: 10 + name: init + ports: + - containerPort: 8080 + protocol: TCP + readinessProbe: + failureThreshold: 1 + grpc: + port: 8080 + service: init + initialDelaySeconds: 20 + periodSeconds: 5 + resources: + limits: + ephemeral-storage: 512Mi + requests: + cpu: 50m + ephemeral-storage: 256Mi + memory: 256Mi + imagePullSecrets: + - name: REDACTED + nodeSelector: + cloud.google.com/gke-provisioning: spot + iam.gke.io/gke-metadata-server-enabled: 'true' + serviceAccountName: default