From d541d3c01a2f388cf6666c5d482766f778d24b4a Mon Sep 17 00:00:00 2001 From: savitaashture Date: Thu, 30 Apr 2020 19:03:14 +0530 Subject: [PATCH 01/15] Add multi container changes to deploy PodSpec --- pkg/apis/serving/v1/revision_helpers.go | 9 +- .../serving/v1/revision_lifecycle_test.go | 22 +- .../serving/v1alpha1/revision_lifecycle.go | 9 +- .../v1alpha1/revision_lifecycle_test.go | 24 +- pkg/reconciler/revision/resources/deploy.go | 55 +- .../revision/resources/deploy_test.go | 520 +++++++++++++--- .../revision/resources/queue_test.go | 22 +- pkg/reconciler/revision/table_test.go | 570 ++++++++++++------ pkg/testing/v1/revision.go | 7 - pkg/webhook/podspec_dryrun.go | 4 +- 10 files changed, 942 insertions(+), 300 deletions(-) diff --git a/pkg/apis/serving/v1/revision_helpers.go b/pkg/apis/serving/v1/revision_helpers.go index 3edb3ade1aa7..7986e4a4598b 100644 --- a/pkg/apis/serving/v1/revision_helpers.go +++ b/pkg/apis/serving/v1/revision_helpers.go @@ -72,8 +72,15 @@ func (e LastPinnedParseError) Error() string { // It is never nil and should be exactly the specified container as guaranteed // by validation. func (rs *RevisionSpec) GetContainer() *corev1.Container { - if len(rs.Containers) > 0 { + switch { + case len(rs.Containers) == 1: return &rs.Containers[0] + case len(rs.Containers) > 1: + for i := range rs.Containers { + if len(rs.Containers[i].Ports) != 0 { + return &rs.Containers[i] + } + } } // Should be unreachable post-validation, but here to ease testing. return &corev1.Container{} diff --git a/pkg/apis/serving/v1/revision_lifecycle_test.go b/pkg/apis/serving/v1/revision_lifecycle_test.go index 06ba0c17a9b7..f2fcd0ce48b1 100644 --- a/pkg/apis/serving/v1/revision_lifecycle_test.go +++ b/pkg/apis/serving/v1/revision_lifecycle_test.go @@ -765,12 +765,15 @@ func TestGetContainer(t *testing.T) { Image: "foo", }, }, { - name: "get first container info even after passing multiple", + name: "get serving container info even if there are multiple containers", status: RevisionSpec{ PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ Name: "firstContainer", Image: "firstImage", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, }, { Name: "secondContainer", Image: "secondImage", @@ -780,7 +783,24 @@ func TestGetContainer(t *testing.T) { want: &corev1.Container{ Name: "firstContainer", Image: "firstImage", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + }, + }, { + name: "get empty container when passed multiple containers without the container port", + status: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "firstContainer", + Image: "firstImage", + }, { + Name: "secondContainer", + Image: "secondImage", + }}, + }, }, + want: &corev1.Container{}, }} for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/apis/serving/v1alpha1/revision_lifecycle.go b/pkg/apis/serving/v1alpha1/revision_lifecycle.go index c9437423a3d2..b95810079b66 100644 --- a/pkg/apis/serving/v1alpha1/revision_lifecycle.go +++ b/pkg/apis/serving/v1alpha1/revision_lifecycle.go @@ -76,8 +76,15 @@ func (rs *RevisionSpec) GetContainer() *corev1.Container { if rs.DeprecatedContainer != nil { return rs.DeprecatedContainer } - if len(rs.Containers) > 0 { + switch { + case len(rs.Containers) == 1: return &rs.Containers[0] + case len(rs.Containers) > 1: + for i := range rs.Containers { + if len(rs.Containers[i].Ports) != 0 { + return &rs.Containers[i] + } + } } // Should be unreachable post-validation, but is here to ease testing. return &corev1.Container{} diff --git a/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go b/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go index 06bed42c766c..a35ab74572ac 100644 --- a/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go +++ b/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go @@ -759,13 +759,16 @@ func TestGetContainer(t *testing.T) { Image: "foo", }, }, { - name: "get first container info even after passing multiple", + name: "get serving container info even if there are multiple containers", status: RevisionSpec{ RevisionSpec: v1.RevisionSpec{ PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ Name: "firstContainer", Image: "firstImage", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, }, { Name: "secondContainer", Image: "secondImage", @@ -776,7 +779,26 @@ func TestGetContainer(t *testing.T) { want: &corev1.Container{ Name: "firstContainer", Image: "firstImage", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + }, + }, { + name: "get empty container when passed multiple containers without the container port", + status: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "firstContainer", + Image: "firstImage", + }, { + Name: "secondContainer", + Image: "secondImage", + }}, + }, + }, }, + want: &corev1.Container{}, }} for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index fe4607931814..29342688d727 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -126,8 +126,8 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig return nil, fmt.Errorf("failed to create queue-proxy container: %w", err) } - userContainer := BuildUserContainer(rev) - podSpec := BuildPodSpec(rev, []corev1.Container{*userContainer, *queueContainer}) + container := append(BuildUserContainer(rev), *queueContainer) + podSpec := BuildPodSpec(rev, container) if autoscalerConfig.EnableGracefulScaledown { podSpec.Volumes = append(podSpec.Volumes, labelVolume) @@ -137,8 +137,32 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig } // BuildUserContainer makes a container from the Revision template. -func BuildUserContainer(rev *v1.Revision) *corev1.Container { - userContainer := rev.Spec.GetContainer().DeepCopy() +func BuildUserContainer(rev *v1.Revision) []corev1.Container { + containers := []corev1.Container{} + + for i := range rev.Spec.PodSpec.Containers { + if len(rev.Spec.PodSpec.Containers[i].Ports) != 0 || len(rev.Spec.PodSpec.Containers) == 1 { + servingContainer := makeServingContainer(rev.Spec.GetContainer().DeepCopy(), rev) + updateImage(rev, &servingContainer) + containers = append(containers, servingContainer) + } else { + multiContainer := makeContainer(rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) + updateImage(rev, &multiContainer) + containers = append(containers, multiContainer) + } + } + return containers +} + +func updateImage(rev *v1.Revision, container *corev1.Container) { + for _, digest := range rev.Status.ContainerStatuses { + if digest.Name == container.Name { + container.Image = digest.ImageDigest + } + } +} + +func makeContainer(container *corev1.Container, rev *v1.Revision) corev1.Container { // Adding or removing an overwritten corev1.Container field here? Don't forget to // update the fieldmasks / validations in pkg/apis/serving varLogMount := varLogVolumeMount.DeepCopy() @@ -146,6 +170,17 @@ func BuildUserContainer(rev *v1.Revision) *corev1.Container { userContainer.VolumeMounts = append(userContainer.VolumeMounts, *varLogMount) userContainer.Lifecycle = userLifecycle + container.Env = append(container.Env, getKnativeEnvVar(rev)...) + // Explicitly disable stdin and tty allocation + container.Stdin = false + container.TTY = false + if container.TerminationMessagePolicy == "" { + container.TerminationMessagePolicy = corev1.TerminationMessageFallbackToLogsOnError + } + return *container +} + +func makeServingContainer(servingContainer *corev1.Container, rev *v1.Revision) corev1.Container { userPort := getUserPort(rev) userPortInt := int(userPort) userPortStr := strconv.Itoa(userPortInt) @@ -170,15 +205,19 @@ func BuildUserContainer(rev *v1.Revision) *corev1.Container { if userContainer.ReadinessProbe != nil { if userContainer.ReadinessProbe.HTTPGet != nil || userContainer.ReadinessProbe.TCPSocket != nil { + servingContainer.Ports = buildContainerPorts(userPort) + servingContainer.Env = append(servingContainer.Env, buildUserPortEnv(userPortStr)) + container := makeContainer(servingContainer, rev) + if container.ReadinessProbe != nil { + if container.ReadinessProbe.HTTPGet != nil || container.ReadinessProbe.TCPSocket != nil { // HTTP and TCP ReadinessProbes are executed by the queue-proxy directly against the // user-container instead of via kubelet. - userContainer.ReadinessProbe = nil + container.ReadinessProbe = nil } } - // If the client provides probes, we should fill in the port for them. - rewriteUserProbe(userContainer.LivenessProbe, userPortInt) - return userContainer + rewriteUserProbe(container.LivenessProbe, userPortInt) + return container } // BuildPodSpec creates a PodSpec from the given revision and containers. diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index 6a97ef2be742..e5c911e60c2d 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -43,12 +43,13 @@ import ( ) var ( - containerName = "my-container-name" + servingContainerName = "serving-container" + sidecarContainerName = "sidecar-container-1" sidecarIstioInjectAnnotation = "sidecar.istio.io/inject" - defaultUserContainer = &corev1.Container{ - Name: containerName, - Image: "busybox", - Ports: buildContainerPorts(v1.DefaultUserPort), + defaultServingContainer = &corev1.Container{ + Name: servingContainerName, + Image: "busybox", + Ports: buildContainerPorts(v1.DefaultUserPort), VolumeMounts: []corev1.VolumeMount{{ Name: varLogVolume.Name, MountPath: "/var/log", @@ -77,6 +78,24 @@ var ( }}, } + defaultSidecarContainer = &corev1.Container{ + Name: sidecarContainerName, + Image: "ubuntu", + VolumeMounts: []corev1.VolumeMount{varLogVolumeMount}, + Lifecycle: userLifecycle, + TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, + Stdin: false, + TTY: false, + Env: []corev1.EnvVar{{ + Name: "K_REVISION", + Value: "bar", + }, { + Name: "K_CONFIGURATION", + }, { + Name: "K_SERVICE", + }}, + } + defaultQueueContainer = &corev1.Container{ Name: QueueContainerName, Resources: createQueueResources(make(map[string]string), &corev1.Container{}), @@ -217,22 +236,18 @@ var ( }, }, } +) - defaultRevision = &v1.Revision{ +func defaultRevision() *v1.Revision { + return &v1.Revision{ ObjectMeta: metav1.ObjectMeta{ UID: "1234", }, Spec: v1.RevisionSpec{ TimeoutSeconds: ptr.Int64(45), - PodSpec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: containerName, - Image: "busybox", - }}, - }, }, } -) +} func refInt64(num int64) *int64 { return &num @@ -250,8 +265,12 @@ func container(container *corev1.Container, opts ...containerOption) corev1.Cont return *container } -func userContainer(opts ...containerOption) corev1.Container { - return container(defaultUserContainer.DeepCopy(), opts...) +func servingContainer(opts ...containerOption) corev1.Container { + return container(defaultServingContainer.DeepCopy(), opts...) +} + +func sidecarContainer(opts ...containerOption) corev1.Container { + return container(defaultSidecarContainer.DeepCopy(), opts...) } func queueContainer(opts ...containerOption) corev1.Container { @@ -350,7 +369,7 @@ func appsv1deployment(opts ...deploymentOption) *appsv1.Deployment { } func revision(name, ns string, opts ...revisionOption) *v1.Revision { - revision := defaultRevision.DeepCopy() + revision := defaultRevision().DeepCopy() revision.ObjectMeta.Name = name revision.ObjectMeta.Namespace = ns for _, option := range opts { @@ -381,6 +400,42 @@ func withOwnerReference(name string) revisionOption { } } +func withPodSpec(containers []corev1.Container) revisionOption { + return func(revision *v1.Revision) { + revision.Spec = v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: containers, + }, + TimeoutSeconds: ptr.Int64(45), + } + } +} + +func withContainer() containerOption { + return func(container *corev1.Container) { + container.Command = []string{"/bin/bash"} + container.Args = []string{"-c", "echo Hello world"} + container.Env = append([]corev1.EnvVar{{ + Name: "FOO", + Value: "bar", + }, { + Name: "BAZ", + Value: "blah", + }}, container.Env...) + container.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("666Mi"), + corev1.ResourceCPU: resource.MustParse("666m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("888Mi"), + corev1.ResourceCPU: resource.MustParse("888m"), + }, + } + container.TerminationMessagePolicy = corev1.TerminationMessageReadFile + } +} + func TestMakePodSpec(t *testing.T) { tests := []struct { name string @@ -391,6 +446,10 @@ func TestMakePodSpec(t *testing.T) { }{{ name: "user-defined user port, queue proxy have PORT env", rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), withContainerConcurrency(1), func(revision *v1.Revision) { revision.Spec.GetContainer().Ports = []corev1.ContainerPort{{ @@ -403,7 +462,7 @@ func TestMakePodSpec(t *testing.T) { ), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( func(container *corev1.Container) { container.Ports[0].ContainerPort = 8888 }, @@ -418,6 +477,10 @@ func TestMakePodSpec(t *testing.T) { }, { name: "volumes passed through", rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), withContainerConcurrency(1), func(revision *v1.Revision) { revision.Spec.GetContainer().Ports = []corev1.ContainerPort{{ @@ -442,7 +505,7 @@ func TestMakePodSpec(t *testing.T) { ), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( func(container *corev1.Container) { container.Ports[0].ContainerPort = 8888 }, @@ -468,6 +531,10 @@ func TestMakePodSpec(t *testing.T) { }, { name: "concurrency=1 no owner", rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), withContainerConcurrency(1), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), @@ -477,7 +544,7 @@ func TestMakePodSpec(t *testing.T) { ), want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "1"), ), @@ -485,10 +552,18 @@ func TestMakePodSpec(t *testing.T) { }, { name: "concurrency=1 no owner digest resolved", rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), withContainerConcurrency(1), func(revision *v1.Revision) { revision.Status = v1.RevisionStatus{ DeprecatedImageDigest: "busybox@sha256:deadbeef", + ContainerStatuses: []v1.ContainerStatuses{{ + Name: servingContainerName, + ImageDigest: "busybox@sha256:deadbeef", + }}, } container(revision.Spec.GetContainer(), withTCPReadinessProbe(), @@ -497,7 +572,7 @@ func TestMakePodSpec(t *testing.T) { ), want: podSpec( []corev1.Container{ - userContainer(func(container *corev1.Container) { + servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), queueContainer( @@ -507,6 +582,10 @@ func TestMakePodSpec(t *testing.T) { }, { name: "concurrency=1 with owner", rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), withContainerConcurrency(1), withOwnerReference("parent-config"), func(revision *v1.Revision) { @@ -517,7 +596,7 @@ func TestMakePodSpec(t *testing.T) { ), want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(), queueContainer( withEnvVar("SERVING_CONFIGURATION", "parent-config"), withEnvVar("CONTAINER_CONCURRENCY", "1"), @@ -525,14 +604,19 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with http readiness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withHTTPReadinessProbe(v1.DefaultUserPort), - ) - }), + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), + func(revision *v1.Revision) { + container(revision.Spec.GetContainer(), + withHTTPReadinessProbe(v1.DefaultUserPort), + ) + }), want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP","httpHeaders":[{"name":"K-Kubelet-Probe","value":"queue"}]}}`), @@ -543,9 +627,24 @@ func TestMakePodSpec(t *testing.T) { rev: revision("bar", "foo", func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), + func(revision *v1.Revision) { + container(revision.Spec.GetContainer(), + withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }), + ) + }), want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), @@ -553,14 +652,19 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with shell readiness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withExecReadinessProbe([]string{"echo", "hello"}), - ) - }), + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), + func(revision *v1.Revision) { + container(revision.Spec.GetContainer(), + withExecReadinessProbe([]string{"echo", "hello"}), + ) + }), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( withExecReadinessProbe([]string{"echo", "hello"})), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), @@ -569,19 +673,24 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with http liveness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - withLivenessProbe(corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/", - }, - }), - ) - }), + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), + func(revision *v1.Revision) { + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + withLivenessProbe(corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }), + ) + }), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( withLivenessProbe(corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/", @@ -599,17 +708,22 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with tcp liveness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - withLivenessProbe(corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{}, - }), - ) - }), + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), + func(revision *v1.Revision) { + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + withLivenessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }), + ) + }), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( withLivenessProbe(corev1.Handler{ TCPSocket: &corev1.TCPSocketAction{ Port: intstr.FromInt(v1.DefaultUserPort), @@ -622,17 +736,56 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with graceful scaledown enabled", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }), + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), + func(revision *v1.Revision) { + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + ) + }), + ac: autoscalerconfig.Config{ + EnableGracefulScaledown: true, + }, + want: podSpec( + []corev1.Container{ + servingContainer(), + queueContainer( + withEnvVar("DOWNWARD_API_LABELS_PATH", fmt.Sprintf("%s/%s", podInfoVolumePath, metadataLabelsPath)), + withPodLabelsVolumeMount(), + ), + }, + func(podSpec *corev1.PodSpec) { + podSpec.Volumes = append(podSpec.Volumes, labelVolume) + }, + ), + }, { + name: "with graceful scaledown enabled for multi container", + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8080, + }}, + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }}), + func(revision *v1.Revision) { + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + ) + }), ac: autoscalerconfig.Config{ EnableGracefulScaledown: true, }, want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(), + sidecarContainer(), queueContainer( withEnvVar("DOWNWARD_API_LABELS_PATH", fmt.Sprintf("%s/%s", podInfoVolumePath, metadataLabelsPath)), withPodLabelsVolumeMount(), @@ -645,6 +798,10 @@ func TestMakePodSpec(t *testing.T) { }, { name: "complex pod spec", rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + }}), withContainerConcurrency(1), func(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{ @@ -673,28 +830,83 @@ func TestMakePodSpec(t *testing.T) { ), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( + withContainer(), + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + withEnvVar("SERVING_SERVICE", "svc"), + ), + }), + }, { + name: "complex pod spec for multiple containers with container data to all containers", + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{ + { + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8080, + }}, + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }, { + Name: "sidecar-container-2", + Image: "alpine", + }, + }), + withContainerConcurrency(1), + func(revision *v1.Revision) { + revision.ObjectMeta.Labels = map[string]string{ + serving.ConfigurationLabelKey: "cfg", + serving.ServiceLabelKey: "svc", + } + // When there are multiple containers then only serving container will have readinessProbe. here + // container[0] acts as a serving container because of the provided containerPort so using withTCPReadinessProbe. + container(&revision.Spec.Containers[0], + withTCPReadinessProbe(), + ) + for i := range revision.Spec.Containers { + revision.Spec.Containers[i].Command = []string{"/bin/bash"} + revision.Spec.Containers[i].Args = []string{"-c", "echo Hello world"} + container(&revision.Spec.Containers[i], + withEnvVar("FOO", "bar"), + withEnvVar("BAZ", "blah"), + ) + revision.Spec.Containers[i].Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("666Mi"), + corev1.ResourceCPU: resource.MustParse("666m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("888Mi"), + corev1.ResourceCPU: resource.MustParse("888m"), + }, + } + revision.Spec.Containers[i].TerminationMessagePolicy = corev1.TerminationMessageReadFile + } + }, + ), + want: podSpec( + []corev1.Container{ + servingContainer( + withContainer(), + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + sidecarContainer( + withContainer(), + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + sidecarContainer( + withContainer(), func(container *corev1.Container) { - container.Command = []string{"/bin/bash"} - container.Args = []string{"-c", "echo Hello world"} - container.Env = append([]corev1.EnvVar{{ - Name: "FOO", - Value: "bar", - }, { - Name: "BAZ", - Value: "blah", - }}, container.Env...) - container.Resources = corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("666Mi"), - corev1.ResourceCPU: resource.MustParse("666m"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("888Mi"), - corev1.ResourceCPU: resource.MustParse("888m"), - }, - } - container.TerminationMessagePolicy = corev1.TerminationMessageReadFile + container.Name = "sidecar-container-2" + container.Image = "alpine" }, withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), @@ -704,6 +916,68 @@ func TestMakePodSpec(t *testing.T) { withEnvVar("SERVING_SERVICE", "svc"), ), }), + }, { + name: "complex pod spec for multiple containers with container data only to serving containers", + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{ + { + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }}), + withContainerConcurrency(1), + func(revision *v1.Revision) { + revision.ObjectMeta.Labels = map[string]string{ + serving.ConfigurationLabelKey: "cfg", + serving.ServiceLabelKey: "svc", + } + revision.Spec.GetContainer().Command = []string{"/bin/bash"} + revision.Spec.GetContainer().Args = []string{"-c", "echo Hello world"} + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + withEnvVar("FOO", "bar"), + withEnvVar("BAZ", "blah"), + ) + revision.Spec.GetContainer().Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("666Mi"), + corev1.ResourceCPU: resource.MustParse("666m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("888Mi"), + corev1.ResourceCPU: resource.MustParse("888m"), + }, + } + revision.Spec.GetContainer().TerminationMessagePolicy = corev1.TerminationMessageReadFile + }, + ), + want: podSpec( + []corev1.Container{ + servingContainer( + withContainer(), + func(container *corev1.Container) { + container.Ports[0].ContainerPort = 8888 + }, + withEnvVar("PORT", "8888"), + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + sidecarContainer( + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + withEnvVar("SERVING_SERVICE", "svc"), + withEnvVar("USER_PORT", "8888"), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"}}`), + ), + }), }} for _, test := range tests { @@ -738,6 +1012,18 @@ func TestMakeDeployment(t *testing.T) { }{{ name: "with concurrency=1", rev: revision("bar", "foo", + withPodSpec([]corev1.Container{ + { + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }, + }), withoutLabels, withContainerConcurrency(1), func(revision *v1.Revision) { @@ -747,6 +1033,10 @@ func TestMakeDeployment(t *testing.T) { }, { name: "with owner", rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "ubuntu", + }}), withoutLabels, withOwnerReference("parent-config"), func(revision *v1.Revision) { @@ -762,6 +1052,25 @@ func TestMakeDeployment(t *testing.T) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), want: appsv1deployment(func(deploy *appsv1.Deployment) { + rev: revision("bar", "foo", + withPodSpec([]corev1.Container{{ + Name: servingContainerName, + Image: "ubuntu", + }}), + withoutLabels, func(revision *v1.Revision) { + revision.ObjectMeta.Annotations = map[string]string{ + sidecarIstioInjectAnnotation: "false", + } + container(revision.Spec.GetContainer(), + withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }), + ) + }), + want: makeDeployment(func(deploy *appsv1.Deployment) { deploy.ObjectMeta.Annotations[sidecarIstioInjectAnnotation] = "false" deploy.Spec.Template.ObjectMeta.Annotations[sidecarIstioInjectAnnotation] = "false" }), @@ -798,3 +1107,52 @@ func TestMakeDeployment(t *testing.T) { }) } } + +func TestProgressDeadlineOverride(t *testing.T) { + rev := revision("bar", "foo", + withPodSpec([]corev1.Container{ + { + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }, + }), + withoutLabels, + func(revision *v1.Revision) { + container(revision.Spec.GetContainer(), + withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }), + ) + }, + ) + want := makeDeployment(func(d *appsv1.Deployment) { + d.Spec.ProgressDeadlineSeconds = ptr.Int32(42) + }) + + dc := &deployment.Config{ + ProgressDeadline: 42 * time.Second, + } + podSpec, err := makePodSpec(rev, &logConfig, &traceConfig, &obsConfig, &asConfig, dc) + if err != nil { + t.Fatal("makePodSpec returned error:", err) + } + want.Spec.Template.Spec = *podSpec + got, err := MakeDeployment(rev, &logConfig, &traceConfig, + &network.Config{}, &obsConfig, &asConfig, dc) + if err != nil { + t.Fatal("MakeDeployment returned error:", err) + } + if !cmp.Equal(want, got, cmp.AllowUnexported(resource.Quantity{})) { + t.Error("MakeDeployment (-want, +got) =", cmp.Diff(want, got, cmp.AllowUnexported(resource.Quantity{}))) + } +} +>>>>>>> Add multi container changes to deploy PodSpec diff --git a/pkg/reconciler/revision/resources/queue_test.go b/pkg/reconciler/revision/resources/queue_test.go index 6d421c6c6647..334afc3297cd 100644 --- a/pkg/reconciler/revision/resources/queue_test.go +++ b/pkg/reconciler/revision/resources/queue_test.go @@ -93,7 +93,7 @@ func TestMakeQueueContainer(t *testing.T) { func(revision *v1.Revision) { revision.Spec.PodSpec = corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, Ports: []corev1.ContainerPort{{ ContainerPort: 1955, @@ -250,7 +250,7 @@ func TestMakeQueueContainer(t *testing.T) { if len(test.rev.Spec.PodSpec.Containers) == 0 { test.rev.Spec.PodSpec = corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, }}, } @@ -287,7 +287,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { serving.QueueSideCarResourcePercentageAnnotation: "20", } revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{ @@ -313,7 +313,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { serving.QueueSideCarResourcePercentageAnnotation: "0.2", } revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -339,7 +339,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { serving.QueueSideCarResourcePercentageAnnotation: "foo", } revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -364,7 +364,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { serving.QueueSideCarResourcePercentageAnnotation: "100", } revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -406,7 +406,7 @@ func TestProbeGenerationHTTPDefaults(t *testing.T) { withContainerConcurrency(1), func(revision *v1.Revision) { revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ @@ -477,7 +477,7 @@ func TestProbeGenerationHTTP(t *testing.T) { withContainerConcurrency(1), func(revision *v1.Revision) { revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, Ports: []corev1.ContainerPort{{ ContainerPort: userPort, }}, @@ -565,7 +565,7 @@ func TestTCPProbeGeneration(t *testing.T) { TimeoutSeconds: ptr.Int64(45), PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: servingContainerName, Ports: []corev1.ContainerPort{{ ContainerPort: userPort, }}, @@ -601,7 +601,7 @@ func TestTCPProbeGeneration(t *testing.T) { TimeoutSeconds: ptr.Int64(45), PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: &corev1.Probe{ Handler: corev1.Handler{ TCPSocket: &corev1.TCPSocketAction{}, @@ -656,7 +656,7 @@ func TestTCPProbeGeneration(t *testing.T) { TimeoutSeconds: ptr.Int64(45), PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: servingContainerName, Ports: []corev1.ContainerPort{{ ContainerPort: userPort, }}, diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index 4a12c0599a0e..1fcb0ac818cc 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -74,19 +74,26 @@ func TestReconcile(t *testing.T) { // We feed in a well formed Revision where none of its sub-resources exist, // and we expect it to create them and initialize the Revision's status. Objects: []runtime.Object{ - rev("foo", "first-reconcile"), + rev("foo", "first-reconcile", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "first-reconcile", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }, WantCreates: []runtime.Object{ // The first reconciliation of a Revision creates the following resources. pa("foo", "first-reconcile"), - deploy(t, "foo", "first-reconcile"), + deploy(t, "foo", "first-reconcile", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "first-reconcile", + ImageDigest: "busybox@sha256:deadbeef"}, + })), image("foo", "first-reconcile"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "first-reconcile", // The first reconciliation Populates the following status properties. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "first-reconcile"}, + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "first-reconcile", + ImageDigest: "busybox@sha256:deadbeef"}, })), }}, Key: "foo/first-reconcile", @@ -99,19 +106,26 @@ func TestReconcile(t *testing.T) { InduceFailure("update", "revisions"), }, Objects: []runtime.Object{ - rev("foo", "update-status-failure"), + rev("foo", "update-status-failure", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "update-status-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "update-status-failure"), }, WantCreates: []runtime.Object{ // We still see the following creates before the failure is induced. - deploy(t, "foo", "update-status-failure"), + deploy(t, "foo", "update-status-failure", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "update-status-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), image("foo", "update-status-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "update-status-failure", // Despite failure, the following status properties are set. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "update-status-failure"}, + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "update-status-failure", + ImageDigest: "busybox@sha256:deadbeef"}, })), }}, WantEvents: []string{ @@ -128,19 +142,28 @@ func TestReconcile(t *testing.T) { InduceFailure("create", "podautoscalers"), }, Objects: []runtime.Object{ - rev("foo", "create-pa-failure"), + rev("foo", "create-pa-failure", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-pa-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }, WantCreates: []runtime.Object{ // We still see the following creates before the failure is induced. pa("foo", "create-pa-failure"), - deploy(t, "foo", "create-pa-failure"), + deploy(t, "foo", "create-pa-failure", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-pa-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), image("foo", "create-pa-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "create-pa-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-pa-failure"}})), + MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-pa-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `failed to create PA "create-pa-failure": inducing failure for create podautoscalers`), @@ -155,18 +178,27 @@ func TestReconcile(t *testing.T) { InduceFailure("create", "deployments"), }, Objects: []runtime.Object{ - rev("foo", "create-user-deploy-failure"), + rev("foo", "create-user-deploy-failure", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-user-deploy-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "create-user-deploy-failure"), }, WantCreates: []runtime.Object{ // We still see the following creates before the failure is induced. - deploy(t, "foo", "create-user-deploy-failure"), + deploy(t, "foo", "create-user-deploy-failure", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-user-deploy-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "create-user-deploy-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-user-deploy-failure"}})), + MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-user-deploy-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", @@ -181,10 +213,16 @@ func TestReconcile(t *testing.T) { // are necessary. Objects: []runtime.Object{ rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, - WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-reconcile"}})), + withContainerStatuses([]v1.ContainerStatuses{ + {Name: "stable-reconcile", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), - deploy(t, "foo", "stable-reconcile"), + deploy(t, "foo", "stable-reconcile", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "stable-reconcile", + ImageDigest: "busybox@sha256:deadbeef"}, + })), image("foo", "stable-reconcile"), }, // No changes are made to any objects. @@ -195,13 +233,19 @@ func TestReconcile(t *testing.T) { // with our desired spec. Objects: []runtime.Object{ rev("foo", "fix-containers", - WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-containers"}})), + WithLogURL, AllUnknownConditions, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "fix-containers", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), changeContainers(deploy(t, "foo", "fix-containers")), image("foo", "fix-containers"), }, WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: deploy(t, "foo", "fix-containers"), + Object: deploy(t, "foo", "fix-containers", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "fix-containers", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }}, Key: "foo/fix-containers", }, { @@ -213,13 +257,19 @@ func TestReconcile(t *testing.T) { }, Objects: []runtime.Object{ rev("foo", "failure-update-deploy", - withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "failure-update-deploy"}})), + withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "failure-update-deploy", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "failure-update-deploy"), changeContainers(deploy(t, "foo", "failure-update-deploy")), image("foo", "failure-update-deploy"), }, WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: deploy(t, "foo", "failure-update-deploy"), + Object: deploy(t, "foo", "failure-update-deploy", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "failure-update-deploy", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", @@ -234,10 +284,16 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "stable-deactivation", WithLogURL, MarkRevisionReady, - MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-deactivation"}})), + MarkInactive("NoTraffic", "This thing is inactive."), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "stable-deactivation", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "stable-deactivation", WithNoTraffic("NoTraffic", "This thing is inactive.")), - deploy(t, "foo", "stable-deactivation"), + deploy(t, "foo", "stable-deactivation", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "stable-deactivation", + ImageDigest: "busybox@sha256:deadbeef"}, + })), image("foo", "stable-deactivation"), }, Key: "foo/stable-deactivation", @@ -245,9 +301,15 @@ func TestReconcile(t *testing.T) { Name: "pa is ready", Objects: []runtime.Object{ rev("foo", "pa-ready", - withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions), + withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-ready", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "pa-ready", WithTraffic, WithPAStatusService("new-stuff"), WithReachability(asv1a1.ReachabilityUnknown)), - deploy(t, "foo", "pa-ready"), + deploy(t, "foo", "pa-ready", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-ready", + ImageDigest: "busybox@sha256:deadbeef"}, + })), image("foo", "pa-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -255,7 +317,10 @@ func TestReconcile(t *testing.T) { WithLogURL, // When the endpoint and pa are ready, then we will see the // Revision become ready. - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-ready"}})), + MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-ready", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }}, WantEvents: []string{ Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), @@ -267,16 +332,25 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "pa-not-ready", withK8sServiceName("somebody-told-me"), WithLogURL, - MarkRevisionReady), + MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-not-ready", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "pa-not-ready", WithPAStatusService("its-not-confidential"), WithBufferedTraffic("Something", "This is something longer")), - readyDeploy(deploy(t, "foo", "pa-not-ready")), + readyDeploy(deploy(t, "foo", "pa-not-ready", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-not-ready", + ImageDigest: "busybox@sha256:deadbeef"}, + }))), image("foo", "pa-not-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-not-ready", - WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-not-ready"}}), + WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-not-ready", + ImageDigest: "busybox@sha256:deadbeef"}, + }), withK8sServiceName("its-not-confidential"), // When we reconcile a ready state and our pa is in an activating // state, we should see the following mutation. @@ -289,15 +363,24 @@ func TestReconcile(t *testing.T) { // Test propagating the inactivity signal from the pa to the Revision. Objects: []runtime.Object{ rev("foo", "pa-inactive", - withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady), + withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-inactive", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive.")), - readyDeploy(deploy(t, "foo", "pa-inactive")), + readyDeploy(deploy(t, "foo", "pa-inactive", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-inactive", + ImageDigest: "busybox@sha256:deadbeef"}, + }))), image("foo", "pa-inactive"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-inactive", - WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-inactive"}}), + WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-inactive", + ImageDigest: "busybox@sha256:deadbeef"}, + }), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. MarkInactive("NoTraffic", "This thing is inactive.")), @@ -308,12 +391,18 @@ func TestReconcile(t *testing.T) { // Test propagating the inactivity signal from the pa to the Revision. // But propagate the service name. Objects: []runtime.Object{ - rev("foo", "pa-inactive", + rev("foo", "pa-inactive", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-inactive", + ImageDigest: "busybox@sha256:deadbeef"}, + }), withK8sServiceName("here-comes-the-sun"), WithLogURL, MarkRevisionReady), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive."), WithPAStatusService("pa-inactive-svc")), - readyDeploy(deploy(t, "foo", "pa-inactive")), + readyDeploy(deploy(t, "foo", "pa-inactive", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-inactive", + ImageDigest: "busybox@sha256:deadbeef"}, + }))), image("foo", "pa-inactive"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -322,7 +411,10 @@ func TestReconcile(t *testing.T) { withK8sServiceName("pa-inactive-svc"), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. - MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-inactive"}})), + MarkInactive("NoTraffic", "This thing is inactive."), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pa-inactive", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }}, Key: "foo/pa-inactive", }, { @@ -332,10 +424,16 @@ func TestReconcile(t *testing.T) { // Protocol type is the only thing that can be changed on PA Objects: []runtime.Object{ rev("foo", "fix-mutated-pa", - withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady), + withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "fix-mutated-pa", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "fix-mutated-pa", WithProtocolType(networking.ProtocolH2C), WithTraffic, WithPAStatusService("fix-mutated-pa")), - deploy(t, "foo", "fix-mutated-pa"), + deploy(t, "foo", "fix-mutated-pa", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "fix-mutated-pa", + ImageDigest: "busybox@sha256:deadbeef"}, + })), image("foo", "fix-mutated-pa"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -343,7 +441,10 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When our reconciliation has to change the service // we should see the following mutations to status. - withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-mutated-pa"}})), + withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "fix-mutated-pa", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: pa("foo", "fix-mutated-pa", WithTraffic, @@ -356,9 +457,15 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "fix-mutated-pa-fail", withK8sServiceName("some-old-stuff"), - WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-mutated-pa-fail"}})), + WithLogURL, AllUnknownConditions, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "fix-mutated-pa-fail", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), - deploy(t, "foo", "fix-mutated-pa-fail"), + deploy(t, "foo", "fix-mutated-pa-fail", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "fix-mutated-pa-fail", + ImageDigest: "busybox@sha256:deadbeef"}, + })), image("foo", "fix-mutated-pa-fail"), }, WantErr: true, @@ -381,9 +488,15 @@ func TestReconcile(t *testing.T) { // status of the Revision. Objects: []runtime.Object{ rev("foo", "deploy-timeout", - withK8sServiceName("the-taxman"), WithLogURL, MarkActive), + withK8sServiceName("the-taxman"), WithLogURL, MarkActive, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "deploy-timeout", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "deploy-timeout"), // pa can't be ready since deployment times out. - timeoutDeploy(deploy(t, "foo", "deploy-timeout"), "I timed out!"), + timeoutDeploy(deploy(t, "foo", "deploy-timeout", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "deploy-timeout", + ImageDigest: "busybox@sha256:deadbeef"}, + })), "I timed out!"), image("foo", "deploy-timeout"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -391,7 +504,10 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the PDE state. - MarkProgressDeadlineExceeded("I timed out!"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "deploy-timeout"}})), + MarkProgressDeadlineExceeded("I timed out!"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "deploy-timeout", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }}, Key: "foo/deploy-timeout", }, { @@ -402,9 +518,15 @@ func TestReconcile(t *testing.T) { // It then verifies that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ rev("foo", "deploy-replica-failure", - withK8sServiceName("the-taxman"), WithLogURL, MarkActive), + withK8sServiceName("the-taxman"), WithLogURL, MarkActive, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "deploy-replica-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), pa("foo", "deploy-replica-failure"), - replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure"), "I replica failed!"), + replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "deploy-replica-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), "I replica failed!"), image("foo", "deploy-replica-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -412,150 +534,217 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the FailedCreate state. - MarkResourcesUnavailable("FailedCreate", "I replica failed!"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "deploy-replica-failure"}})), + MarkResourcesUnavailable("FailedCreate", "I replica failed!"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "deploy-replica-failure", + ImageDigest: "busybox@sha256:deadbeef"}, + })), }}, Key: "foo/deploy-replica-failure", - }, - { - Name: "surface ImagePullBackoff", - // Test the propagation of ImagePullBackoff from user container. - Objects: []runtime.Object{ - rev("foo", "pull-backoff", - withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", ""), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), - pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. - pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), - timeoutDeploy(deploy(t, "foo", "pull-backoff"), "Timed out!"), - image("foo", "pull-backoff"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pull-backoff", - WithLogURL, AllUnknownConditions, - MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), - }}, - WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), - }}, - Key: "foo/pull-backoff", - }, { - Name: "surface pod errors", - // Test the propagation of the termination state of a Pod into the revision. - // This initializes the world to the stable state after its first reconcile, - // but changes the user deployment to have a failing pod. It then verifies - // that Reconcile propagates this into the status of the Revision. - Objects: []runtime.Object{ - rev("foo", "pod-error", - withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), - pa("foo", "pod-error"), // PA can't be ready, since no traffic. - pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), - deploy(t, "foo", "pod-error"), - image("foo", "pod-error"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-error", - WithLogURL, AllUnknownConditions, MarkContainerExiting(5, - v1.RevisionContainerExitingMessage("I failed man!")), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-error"}})), - }}, - Key: "foo/pod-error", - }, { - Name: "surface pod schedule errors", - // Test the propagation of the scheduling errors of Pod into the revision. - // This initializes the world to unschedule pod. It then verifies - // that Reconcile propagates this into the status of the Revision. - Objects: []runtime.Object{ - rev("foo", "pod-schedule-error", - withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), - pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. - pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), - deploy(t, "foo", "pod-schedule-error"), - image("foo", "pod-schedule-error"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-schedule-error", - WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", - "Unschedulable"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-schedule-error"}})), - }}, - Key: "foo/pod-schedule-error", - }, { - Name: "ready steady state", - // Test the transition that Reconcile makes when Endpoints become ready on the - // SKS owned services, which is signalled by pa having service name. - // This puts the world into the stable post-reconcile state for an Active - // Revision. It then creates an Endpoints resource with active subsets. - // This signal should make our Reconcile mark the Revision as Ready. - Objects: []runtime.Object{ - rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), - pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), - deploy(t, "foo", "steady-ready"), - image("foo", "steady-ready"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, - // All resources are ready to go, we should see the revision being - // marked ready - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "steady-ready"}})), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), - }, - Key: "foo/steady-ready", - }, { - Name: "lost pa owner ref", - WantErr: true, - Objects: []runtime.Object{ - rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady), - pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), - deploy(t, "foo", "missing-owners"), - image("foo", "missing-owners"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady, - // When we're missing the OwnerRef for PodAutoscaler we see this update. - MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), - }, - Key: "foo/missing-owners", - }, { - Name: "lost deployment owner ref", - WantErr: true, - Objects: []runtime.Object{ - rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), - pa("foo", "missing-owners", WithTraffic), - noOwner(deploy(t, "foo", "missing-owners")), - image("foo", "missing-owners"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, - // When we're missing the OwnerRef for Deployment we see this update. - MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), - }, - Key: "foo/missing-owners", - }, { - Name: "image pull secrets", - // This test case tests that the image pull secrets from revision propagate to deployment and image - Objects: []runtime.Object{ - rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret")), - }, - WantCreates: []runtime.Object{ - pa("foo", "image-pull-secrets"), - deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets"), "foo-secret"), - imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "image-pull-secrets", - WithImagePullSecrets("foo-secret"), - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "image-pull-secrets"}})), - }}, - Key: "foo/image-pull-secrets", - }} + }, { + Name: "surface ImagePullBackoff", + // Test the propagation of ImagePullBackoff from user container. + Objects: []runtime.Object{ + rev("foo", "pull-backoff", + withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", ""), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pull-backoff", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. + pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), + timeoutDeploy(deploy(t, "foo", "pull-backoff", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pull-backoff", + ImageDigest: "busybox@sha256:deadbeef"}, + })), "Timed out!"), + image("foo", "pull-backoff"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pull-backoff", + WithLogURL, AllUnknownConditions, + MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pull-backoff", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), + }}, + Key: "foo/pull-backoff", + }, { + Name: "surface pod errors", + // Test the propagation of the termination state of a Pod into the revision. + // This initializes the world to the stable state after its first reconcile, + // but changes the user deployment to have a failing pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-error", + withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pod-error", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + pa("foo", "pod-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), + deploy(t, "foo", "pod-error", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pod-error", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + image("foo", "pod-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-error", + WithLogURL, AllUnknownConditions, MarkContainerExiting(5, + v1.RevisionContainerExitingMessage("I failed man!")), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pod-error", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + Key: "foo/pod-error", + }, { + Name: "surface pod schedule errors", + // Test the propagation of the scheduling errors of Pod into the revision. + // This initializes the world to unschedule pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-schedule-error", + withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pod-schedule-error", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), + deploy(t, "foo", "pod-schedule-error", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pod-schedule-error", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + image("foo", "pod-schedule-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-schedule-error", + WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", + "Unschedulable"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "pod-schedule-error", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + Key: "foo/pod-schedule-error", + }, { + Name: "ready steady state", + // Test the transition that Reconcile makes when Endpoints become ready on the + // SKS owned services, which is signalled by pa having service name. + // This puts the world into the stable post-reconcile state for an Active + // Revision. It then creates an Endpoints resource with active subsets. + // This signal should make our Reconcile mark the Revision as Ready. + Objects: []runtime.Object{ + rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL, + withContainerStatuses([]v1.ContainerStatuses{ + {Name: "steady-ready", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), + deploy(t, "foo", "steady-ready", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "steady-ready", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + image("foo", "steady-ready"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, + // All resources are ready to go, we should see the revision being + // marked ready + MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "steady-ready", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), + }, + Key: "foo/steady-ready", + }, { + Name: "lost pa owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "missing-owners", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), + deploy(t, "foo", "missing-owners", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "missing-owners", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for PodAutoscaler we see this update. + MarkResourceNotOwned("PodAutoscaler", "missing-owners"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "missing-owners", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), + }, + Key: "foo/missing-owners", + }, { + Name: "lost deployment owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + {Name: "missing-owners", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + pa("foo", "missing-owners", WithTraffic), + noOwner(deploy(t, "foo", "missing-owners", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "missing-owners", + ImageDigest: "busybox@sha256:deadbeef"}, + }))), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for Deployment we see this update. + MarkResourceNotOwned("Deployment", "missing-owners-deployment"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "missing-owners", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), + }, + Key: "foo/missing-owners", + }, { + Name: "image pull secrets", + // This test case tests that the image pull secrets from revision propagate to deployment and image + Objects: []runtime.Object{ + rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret"), + withContainerStatuses([]v1.ContainerStatuses{ + {Name: "image-pull-secrets", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + }, + WantCreates: []runtime.Object{ + pa("foo", "image-pull-secrets"), + deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets", withContainerStatuses([]v1.ContainerStatuses{ + {Name: "image-pull-secrets", + ImageDigest: "busybox@sha256:deadbeef"}, + })), "foo-secret"), + imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "image-pull-secrets", + WithImagePullSecrets("foo-secret"), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ + {Name: "image-pull-secrets", + ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + Key: "foo/image-pull-secrets", + }} table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { r := &Reconciler{ @@ -661,6 +850,13 @@ func withK8sServiceName(sn string) RevisionOption { } } +// withContainerStatuses sets the .Status.ContainerStatuses to the Revision. +func withContainerStatuses(containerStatus []v1.ContainerStatuses) RevisionOption { + return func(r *v1.Revision) { + r.Status.ContainerStatuses = containerStatus + } +} + // TODO(mattmoor): Come up with a better name for this. func AllUnknownConditions(r *v1.Revision) { WithInitRevConditions(r) @@ -706,7 +902,7 @@ func image(namespace, name string, co ...configOption) *caching.Image { opt(config) } - return resources.MakeImageCache(rev(namespace, name), name, "") + return resources.MakeImageCache(rev(namespace, name), name, "busybox@sha256:deadbeef") } func pa(namespace, name string, ko ...PodAutoscalerOption) *asv1a1.PodAutoscaler { diff --git a/pkg/testing/v1/revision.go b/pkg/testing/v1/revision.go index 5e2efc9101a8..572e99079587 100644 --- a/pkg/testing/v1/revision.go +++ b/pkg/testing/v1/revision.go @@ -182,10 +182,3 @@ func WithRevisionLabel(key, value string) RevisionOption { config.Labels[key] = value } } - -// WithContainerStatuses sets the .Status.ContainerStatuses to the Revision. -func WithContainerStatuses(containerStatus []v1.ContainerStatuses) RevisionOption { - return func(r *v1.Revision) { - r.Status.ContainerStatuses = containerStatus - } -} diff --git a/pkg/webhook/podspec_dryrun.go b/pkg/webhook/podspec_dryrun.go index 7e8234782a12..9cae284127cf 100644 --- a/pkg/webhook/podspec_dryrun.go +++ b/pkg/webhook/podspec_dryrun.go @@ -58,9 +58,9 @@ func validatePodSpec(ctx context.Context, ps v1.RevisionSpec, namespace string, } rev.SetDefaults(ctx) userContainer := resources.BuildUserContainer(rev) - podSpec := resources.BuildPodSpec(rev, []corev1.Container{*userContainer}) + podSpec := resources.BuildPodSpec(rev, userContainer) - // Make a dummy pod with the template Revions & PodSpec and dryrun call to API-server + // Make a dummy pod with the template Revisions & PodSpec and dryrun call to API-server pod := &corev1.Pod{ ObjectMeta: om, Spec: *podSpec, From 8ac6e95262a56a021f3261533d997c1da5377441 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Sat, 2 May 2020 22:04:57 +0530 Subject: [PATCH 02/15] code refactor and added lock to make structure thread safe --- pkg/reconciler/revision/resources/deploy.go | 22 ++++++++++++--------- pkg/reconciler/revision/revision.go | 2 +- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 29342688d727..e74488785166 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -138,26 +138,31 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig // BuildUserContainer makes a container from the Revision template. func BuildUserContainer(rev *v1.Revision) []corev1.Container { - containers := []corev1.Container{} + containers := make([]corev1.Container, 0, len(rev.Spec.PodSpec.Containers)) for i := range rev.Spec.PodSpec.Containers { if len(rev.Spec.PodSpec.Containers[i].Ports) != 0 || len(rev.Spec.PodSpec.Containers) == 1 { servingContainer := makeServingContainer(rev.Spec.GetContainer().DeepCopy(), rev) - updateImage(rev, &servingContainer) - containers = append(containers, servingContainer) + containers = appendContainer(rev, containers, servingContainer) } else { - multiContainer := makeContainer(rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) - updateImage(rev, &multiContainer) - containers = append(containers, multiContainer) + nonServingContainer := makeContainer(rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) + containers = appendContainer(rev, containers, nonServingContainer) } } return containers } +func appendContainer(rev *v1.Revision, containers []corev1.Container, container corev1.Container) []corev1.Container { + updateImage(rev, &container) + containers = append(containers, container) + return containers +} + func updateImage(rev *v1.Revision, container *corev1.Container) { for _, digest := range rev.Status.ContainerStatuses { if digest.Name == container.Name { container.Image = digest.ImageDigest + return } } } @@ -182,8 +187,7 @@ func makeContainer(container *corev1.Container, rev *v1.Revision) corev1.Contain func makeServingContainer(servingContainer *corev1.Container, rev *v1.Revision) corev1.Container { userPort := getUserPort(rev) - userPortInt := int(userPort) - userPortStr := strconv.Itoa(userPortInt) + userPortStr := strconv.Itoa(int(userPort)) // Replacement is safe as only up to a single port is allowed on the Revision userContainer.Ports = buildContainerPorts(userPort) userContainer.Env = append(userContainer.Env, buildUserPortEnv(userPortStr)) @@ -216,7 +220,7 @@ func makeServingContainer(servingContainer *corev1.Container, rev *v1.Revision) } } // If the client provides probes, we should fill in the port for them. - rewriteUserProbe(container.LivenessProbe, userPortInt) + rewriteUserProbe(container.LivenessProbe, int(userPort)) return container } diff --git a/pkg/reconciler/revision/revision.go b/pkg/reconciler/revision/revision.go index 47546b435ddb..b8b16993bd6c 100644 --- a/pkg/reconciler/revision/revision.go +++ b/pkg/reconciler/revision/revision.go @@ -21,10 +21,10 @@ import ( "errors" "fmt" "strings" + "sync" "github.com/google/go-containerregistry/pkg/authn/k8schain" "golang.org/x/sync/errgroup" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" From de804ef63e74afa71624afe6fa2a3764c4dcd88d Mon Sep 17 00:00:00 2001 From: savitaashture Date: Sun, 3 May 2020 00:37:37 +0530 Subject: [PATCH 03/15] fix review comment --- pkg/reconciler/revision/resources/deploy.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index e74488785166..534866f1d675 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -142,10 +142,10 @@ func BuildUserContainer(rev *v1.Revision) []corev1.Container { for i := range rev.Spec.PodSpec.Containers { if len(rev.Spec.PodSpec.Containers[i].Ports) != 0 || len(rev.Spec.PodSpec.Containers) == 1 { - servingContainer := makeServingContainer(rev.Spec.GetContainer().DeepCopy(), rev) + servingContainer := makeServingContainer(*rev.Spec.GetContainer(), rev) containers = appendContainer(rev, containers, servingContainer) } else { - nonServingContainer := makeContainer(rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) + nonServingContainer := makeContainer(rev.Spec.PodSpec.Containers[i], rev) containers = appendContainer(rev, containers, nonServingContainer) } } @@ -167,7 +167,7 @@ func updateImage(rev *v1.Revision, container *corev1.Container) { } } -func makeContainer(container *corev1.Container, rev *v1.Revision) corev1.Container { +func makeContainer(container corev1.Container, rev *v1.Revision) corev1.Container { // Adding or removing an overwritten corev1.Container field here? Don't forget to // update the fieldmasks / validations in pkg/apis/serving varLogMount := varLogVolumeMount.DeepCopy() @@ -182,10 +182,10 @@ func makeContainer(container *corev1.Container, rev *v1.Revision) corev1.Contain if container.TerminationMessagePolicy == "" { container.TerminationMessagePolicy = corev1.TerminationMessageFallbackToLogsOnError } - return *container + return container } -func makeServingContainer(servingContainer *corev1.Container, rev *v1.Revision) corev1.Container { +func makeServingContainer(servingContainer corev1.Container, rev *v1.Revision) corev1.Container { userPort := getUserPort(rev) userPortStr := strconv.Itoa(int(userPort)) // Replacement is safe as only up to a single port is allowed on the Revision From e96a3aaefec59ce34c24ff2a1ef75a9aefb330f0 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Tue, 5 May 2020 19:10:48 +0530 Subject: [PATCH 04/15] fix review comments --- pkg/apis/serving/v1/revision_helpers.go | 5 +- .../serving/v1alpha1/revision_lifecycle.go | 5 +- pkg/reconciler/revision/resources/deploy.go | 16 ++--- .../revision/resources/deploy_test.go | 63 ++++++++++--------- pkg/reconciler/revision/revision.go | 2 +- pkg/webhook/podspec_dryrun.go | 2 +- 6 files changed, 48 insertions(+), 45 deletions(-) diff --git a/pkg/apis/serving/v1/revision_helpers.go b/pkg/apis/serving/v1/revision_helpers.go index 7986e4a4598b..2a9d178b6ddb 100644 --- a/pkg/apis/serving/v1/revision_helpers.go +++ b/pkg/apis/serving/v1/revision_helpers.go @@ -69,8 +69,9 @@ func (e LastPinnedParseError) Error() string { } // GetContainer returns a pointer to the relevant corev1.Container field. -// It is never nil and should be exactly the specified container as guaranteed -// by validation. +// It is never nil and should be exactly the specified container if len(containers) == 1 or +// If there are multiple containers it returns the container which has Ports +// as guaranteed by validation. func (rs *RevisionSpec) GetContainer() *corev1.Container { switch { case len(rs.Containers) == 1: diff --git a/pkg/apis/serving/v1alpha1/revision_lifecycle.go b/pkg/apis/serving/v1alpha1/revision_lifecycle.go index b95810079b66..a18c0022195f 100644 --- a/pkg/apis/serving/v1alpha1/revision_lifecycle.go +++ b/pkg/apis/serving/v1alpha1/revision_lifecycle.go @@ -70,8 +70,9 @@ func (r *Revision) GetGroupVersionKind() schema.GroupVersionKind { } // GetContainer returns a pointer to the relevant corev1.Container field. -// It is never nil and should be exactly the specified container as guaranteed -// by validation. +// It is never nil and should be exactly the specified container if len(containers) == 1 or +// If there are multiple containers it returns the container which has Ports +// as guaranteed by validation. func (rs *RevisionSpec) GetContainer() *corev1.Container { if rs.DeprecatedContainer != nil { return rs.DeprecatedContainer diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 534866f1d675..49224b165de7 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -126,7 +126,7 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig return nil, fmt.Errorf("failed to create queue-proxy container: %w", err) } - container := append(BuildUserContainer(rev), *queueContainer) + container := append(BuildUserContainers(rev), *queueContainer) podSpec := BuildPodSpec(rev, container) if autoscalerConfig.EnableGracefulScaledown { @@ -136,23 +136,23 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig return podSpec, nil } -// BuildUserContainer makes a container from the Revision template. -func BuildUserContainer(rev *v1.Revision) []corev1.Container { +// BuildUserContainers makes a container from the Revision template. +func BuildUserContainers(rev *v1.Revision) []corev1.Container { containers := make([]corev1.Container, 0, len(rev.Spec.PodSpec.Containers)) + var container corev1.Container for i := range rev.Spec.PodSpec.Containers { if len(rev.Spec.PodSpec.Containers[i].Ports) != 0 || len(rev.Spec.PodSpec.Containers) == 1 { - servingContainer := makeServingContainer(*rev.Spec.GetContainer(), rev) - containers = appendContainer(rev, containers, servingContainer) + container = makeServingContainer(*rev.Spec.GetContainer(), rev) } else { - nonServingContainer := makeContainer(rev.Spec.PodSpec.Containers[i], rev) - containers = appendContainer(rev, containers, nonServingContainer) + container = makeContainer(rev.Spec.PodSpec.Containers[i], rev) } + containers = appendContainers(rev, containers, container) } return containers } -func appendContainer(rev *v1.Revision, containers []corev1.Container, container corev1.Container) []corev1.Container { +func appendContainers(rev *v1.Revision, containers []corev1.Container, container corev1.Container) []corev1.Container { updateImage(rev, &container) containers = append(containers, container) return containers diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index e5c911e60c2d..038ea84eeb00 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -369,7 +369,7 @@ func appsv1deployment(opts ...deploymentOption) *appsv1.Deployment { } func revision(name, ns string, opts ...revisionOption) *v1.Revision { - revision := defaultRevision().DeepCopy() + revision := defaultRevision() revision.ObjectMeta.Name = name revision.ObjectMeta.Namespace = ns for _, option := range opts { @@ -400,7 +400,8 @@ func withOwnerReference(name string) revisionOption { } } -func withPodSpec(containers []corev1.Container) revisionOption { +// withContainers function helps to provide a podSpec to the revision for both single and multiple containers +func withContainers(containers []corev1.Container) revisionOption { return func(revision *v1.Revision) { revision.Spec = v1.RevisionSpec{ PodSpec: corev1.PodSpec{ @@ -446,15 +447,15 @@ func TestMakePodSpec(t *testing.T) { }{{ name: "user-defined user port, queue proxy have PORT env", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, }}), withContainerConcurrency(1), func(revision *v1.Revision) { - revision.Spec.GetContainer().Ports = []corev1.ContainerPort{{ - ContainerPort: 8888, - }} container(revision.Spec.GetContainer(), withTCPReadinessProbe(), ) @@ -477,19 +478,19 @@ func TestMakePodSpec(t *testing.T) { }, { name: "volumes passed through", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", - }}), - withContainerConcurrency(1), - func(revision *v1.Revision) { - revision.Spec.GetContainer().Ports = []corev1.ContainerPort{{ + Ports: []corev1.ContainerPort{{ ContainerPort: 8888, - }} - revision.Spec.GetContainer().VolumeMounts = []corev1.VolumeMount{{ + }}, + VolumeMounts: []corev1.VolumeMount{{ Name: "asdf", MountPath: "/asdf", - }} + }}, + }}), + withContainerConcurrency(1), + func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe(), ) @@ -531,7 +532,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "concurrency=1 no owner", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -552,7 +553,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "concurrency=1 no owner digest resolved", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -582,7 +583,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "concurrency=1 with owner", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -605,7 +606,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "with http readiness probe", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -628,7 +629,7 @@ func TestMakePodSpec(t *testing.T) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -653,7 +654,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "with shell readiness probe", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -674,7 +675,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "with http liveness probe", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -709,7 +710,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "with tcp liveness probe", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -737,7 +738,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "with graceful scaledown enabled", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -764,7 +765,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "with graceful scaledown enabled for multi container", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", Ports: []corev1.ContainerPort{{ @@ -798,7 +799,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "complex pod spec", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "busybox", }}), @@ -843,7 +844,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "complex pod spec for multiple containers with container data to all containers", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{ + withContainers([]corev1.Container{ { Name: servingContainerName, Image: "busybox", @@ -919,7 +920,7 @@ func TestMakePodSpec(t *testing.T) { }, { name: "complex pod spec for multiple containers with container data only to serving containers", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{ + withContainers([]corev1.Container{ { Name: servingContainerName, Image: "busybox", @@ -1012,7 +1013,7 @@ func TestMakeDeployment(t *testing.T) { }{{ name: "with concurrency=1", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{ + withContainers([]corev1.Container{ { Name: servingContainerName, Image: "busybox", @@ -1033,7 +1034,7 @@ func TestMakeDeployment(t *testing.T) { }, { name: "with owner", rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "ubuntu", }}), @@ -1053,7 +1054,7 @@ func TestMakeDeployment(t *testing.T) { }), want: appsv1deployment(func(deploy *appsv1.Deployment) { rev: revision("bar", "foo", - withPodSpec([]corev1.Container{{ + withContainers([]corev1.Container{{ Name: servingContainerName, Image: "ubuntu", }}), @@ -1110,7 +1111,7 @@ func TestMakeDeployment(t *testing.T) { func TestProgressDeadlineOverride(t *testing.T) { rev := revision("bar", "foo", - withPodSpec([]corev1.Container{ + withContainers([]corev1.Container{ { Name: servingContainerName, Image: "busybox", diff --git a/pkg/reconciler/revision/revision.go b/pkg/reconciler/revision/revision.go index b8b16993bd6c..47546b435ddb 100644 --- a/pkg/reconciler/revision/revision.go +++ b/pkg/reconciler/revision/revision.go @@ -21,10 +21,10 @@ import ( "errors" "fmt" "strings" - "sync" "github.com/google/go-containerregistry/pkg/authn/k8schain" "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" diff --git a/pkg/webhook/podspec_dryrun.go b/pkg/webhook/podspec_dryrun.go index 9cae284127cf..0d680863fa2c 100644 --- a/pkg/webhook/podspec_dryrun.go +++ b/pkg/webhook/podspec_dryrun.go @@ -57,7 +57,7 @@ func validatePodSpec(ctx context.Context, ps v1.RevisionSpec, namespace string, Spec: ps, } rev.SetDefaults(ctx) - userContainer := resources.BuildUserContainer(rev) + userContainer := resources.BuildUserContainers(rev) podSpec := resources.BuildPodSpec(rev, userContainer) // Make a dummy pod with the template Revisions & PodSpec and dryrun call to API-server From 693b3d90e9f33fb744057ff98943c42aceefcce4 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Wed, 6 May 2020 23:45:31 +0530 Subject: [PATCH 05/15] fix some nits --- pkg/apis/serving/v1/revision_helpers.go | 2 +- pkg/apis/serving/v1alpha1/revision_lifecycle.go | 2 +- pkg/reconciler/revision/resources/deploy.go | 11 +++-------- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/pkg/apis/serving/v1/revision_helpers.go b/pkg/apis/serving/v1/revision_helpers.go index 2a9d178b6ddb..e3dd2e6b62b3 100644 --- a/pkg/apis/serving/v1/revision_helpers.go +++ b/pkg/apis/serving/v1/revision_helpers.go @@ -70,7 +70,7 @@ func (e LastPinnedParseError) Error() string { // GetContainer returns a pointer to the relevant corev1.Container field. // It is never nil and should be exactly the specified container if len(containers) == 1 or -// If there are multiple containers it returns the container which has Ports +// if there are multiple containers it returns the container which has Ports // as guaranteed by validation. func (rs *RevisionSpec) GetContainer() *corev1.Container { switch { diff --git a/pkg/apis/serving/v1alpha1/revision_lifecycle.go b/pkg/apis/serving/v1alpha1/revision_lifecycle.go index a18c0022195f..46de3402d14a 100644 --- a/pkg/apis/serving/v1alpha1/revision_lifecycle.go +++ b/pkg/apis/serving/v1alpha1/revision_lifecycle.go @@ -71,7 +71,7 @@ func (r *Revision) GetGroupVersionKind() schema.GroupVersionKind { // GetContainer returns a pointer to the relevant corev1.Container field. // It is never nil and should be exactly the specified container if len(containers) == 1 or -// If there are multiple containers it returns the container which has Ports +// if there are multiple containers it returns the container which has Ports // as guaranteed by validation. func (rs *RevisionSpec) GetContainer() *corev1.Container { if rs.DeprecatedContainer != nil { diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 49224b165de7..7fcb491a1e12 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -140,24 +140,19 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig func BuildUserContainers(rev *v1.Revision) []corev1.Container { containers := make([]corev1.Container, 0, len(rev.Spec.PodSpec.Containers)) - var container corev1.Container for i := range rev.Spec.PodSpec.Containers { + var container corev1.Container if len(rev.Spec.PodSpec.Containers[i].Ports) != 0 || len(rev.Spec.PodSpec.Containers) == 1 { container = makeServingContainer(*rev.Spec.GetContainer(), rev) } else { container = makeContainer(rev.Spec.PodSpec.Containers[i], rev) } - containers = appendContainers(rev, containers, container) + updateImage(rev, &container) + containers = append(containers, container) } return containers } -func appendContainers(rev *v1.Revision, containers []corev1.Container, container corev1.Container) []corev1.Container { - updateImage(rev, &container) - containers = append(containers, container) - return containers -} - func updateImage(rev *v1.Revision, container *corev1.Container) { for _, digest := range rev.Status.ContainerStatuses { if digest.Name == container.Name { From 4cfb1b2e1fa9cc3183e9fd107963f81d9636f379 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Sun, 10 May 2020 23:29:11 +0530 Subject: [PATCH 06/15] Fix review comments --- pkg/reconciler/revision/resources/deploy.go | 41 +-- .../revision/resources/deploy_test.go | 213 +++++++++++--- pkg/reconciler/revision/table_test.go | 267 ++++-------------- pkg/testing/v1/revision.go | 7 + 4 files changed, 241 insertions(+), 287 deletions(-) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 7fcb491a1e12..33e1f1c527d9 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -139,7 +139,6 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig // BuildUserContainers makes a container from the Revision template. func BuildUserContainers(rev *v1.Revision) []corev1.Container { containers := make([]corev1.Container, 0, len(rev.Spec.PodSpec.Containers)) - for i := range rev.Spec.PodSpec.Containers { var container corev1.Container if len(rev.Spec.PodSpec.Containers[i].Ports) != 0 || len(rev.Spec.PodSpec.Containers) == 1 { @@ -147,30 +146,24 @@ func BuildUserContainers(rev *v1.Revision) []corev1.Container { } else { container = makeContainer(rev.Spec.PodSpec.Containers[i], rev) } - updateImage(rev, &container) + if rev.Status.ContainerStatuses != nil { + container.Image = rev.Status.ContainerStatuses[i].ImageDigest + } containers = append(containers, container) } return containers } -func updateImage(rev *v1.Revision, container *corev1.Container) { - for _, digest := range rev.Status.ContainerStatuses { - if digest.Name == container.Name { - container.Image = digest.ImageDigest - return - } - } -} - func makeContainer(container corev1.Container, rev *v1.Revision) corev1.Container { // Adding or removing an overwritten corev1.Container field here? Don't forget to // update the fieldmasks / validations in pkg/apis/serving varLogMount := varLogVolumeMount.DeepCopy() - varLogMount.SubPathExpr += userContainer.Name + varLogMount.SubPathExpr += container.Name - userContainer.VolumeMounts = append(userContainer.VolumeMounts, *varLogMount) - userContainer.Lifecycle = userLifecycle + container.VolumeMounts = append(container.VolumeMounts, *varLogMount) + container.Lifecycle = userLifecycle container.Env = append(container.Env, getKnativeEnvVar(rev)...) + container.Env = append(container.Env, buildVarLogSubpathEnvs()...) // Explicitly disable stdin and tty allocation container.Stdin = false container.TTY = false @@ -184,26 +177,6 @@ func makeServingContainer(servingContainer corev1.Container, rev *v1.Revision) c userPort := getUserPort(rev) userPortStr := strconv.Itoa(int(userPort)) // Replacement is safe as only up to a single port is allowed on the Revision - userContainer.Ports = buildContainerPorts(userPort) - userContainer.Env = append(userContainer.Env, buildUserPortEnv(userPortStr)) - userContainer.Env = append(userContainer.Env, getKnativeEnvVar(rev)...) - userContainer.Env = append(userContainer.Env, buildVarLogSubpathEnvs()...) - - // Explicitly disable stdin and tty allocation - userContainer.Stdin = false - userContainer.TTY = false - - // Prefer imageDigest from revision if available - if rev.Status.DeprecatedImageDigest != "" { - userContainer.Image = rev.Status.DeprecatedImageDigest - } - - if userContainer.TerminationMessagePolicy == "" { - userContainer.TerminationMessagePolicy = corev1.TerminationMessageFallbackToLogsOnError - } - - if userContainer.ReadinessProbe != nil { - if userContainer.ReadinessProbe.HTTPGet != nil || userContainer.ReadinessProbe.TCPSocket != nil { servingContainer.Ports = buildContainerPorts(userPort) servingContainer.Env = append(servingContainer.Env, buildUserPortEnv(userPortStr)) container := makeContainer(servingContainer, rev) diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index 038ea84eeb00..934295e24a0b 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -40,20 +40,23 @@ import ( autoscalerconfig "knative.dev/serving/pkg/autoscaler/config" "knative.dev/serving/pkg/deployment" "knative.dev/serving/pkg/network" + + . "knative.dev/serving/pkg/testing/v1" ) var ( servingContainerName = "serving-container" sidecarContainerName = "sidecar-container-1" + sidecarContainerName2 = "sidecar-container-2" sidecarIstioInjectAnnotation = "sidecar.istio.io/inject" defaultServingContainer = &corev1.Container{ - Name: servingContainerName, - Image: "busybox", - Ports: buildContainerPorts(v1.DefaultUserPort), + Name: servingContainerName, + Image: "busybox", + Ports: buildContainerPorts(v1.DefaultUserPort), VolumeMounts: []corev1.VolumeMount{{ Name: varLogVolume.Name, MountPath: "/var/log", - SubPathExpr: "$(K_INTERNAL_POD_NAMESPACE)_$(K_INTERNAL_POD_NAME)_my-container-name", + SubPathExpr: "$(K_INTERNAL_POD_NAMESPACE)_$(K_INTERNAL_POD_NAME)_" + servingContainerName, }}, Lifecycle: userLifecycle, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, @@ -78,24 +81,7 @@ var ( }}, } - defaultSidecarContainer = &corev1.Container{ - Name: sidecarContainerName, - Image: "ubuntu", - VolumeMounts: []corev1.VolumeMount{varLogVolumeMount}, - Lifecycle: userLifecycle, - TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, - Stdin: false, - TTY: false, - Env: []corev1.EnvVar{{ - Name: "K_REVISION", - Value: "bar", - }, { - Name: "K_CONFIGURATION", - }, { - Name: "K_SERVICE", - }}, - } - + //defaultSidecarContainer = defaultQueueContainer = &corev1.Container{ Name: QueueContainerName, Resources: createQueueResources(make(map[string]string), &corev1.Container{}), @@ -238,6 +224,36 @@ var ( } ) +func defaultSidecarContainer(containerName string) *corev1.Container { + return &corev1.Container{ + Name: containerName, + Image: "ubuntu", + VolumeMounts: []corev1.VolumeMount{{ + Name: varLogVolume.Name, + MountPath: "/var/log", + SubPathExpr: "$(K_INTERNAL_POD_NAMESPACE)_$(K_INTERNAL_POD_NAME)_" + containerName, + }}, + Lifecycle: userLifecycle, + TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, + Stdin: false, + TTY: false, + Env: []corev1.EnvVar{{ + Name: "K_REVISION", + Value: "bar", + }, { + Name: "K_CONFIGURATION", + }, { + Name: "K_SERVICE", + }, { + Name: "K_INTERNAL_POD_NAME", + ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}, + }, { + Name: "K_INTERNAL_POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}}, + }}, + } +} + func defaultRevision() *v1.Revision { return &v1.Revision{ ObjectMeta: metav1.ObjectMeta{ @@ -256,7 +272,8 @@ func refInt64(num int64) *int64 { type containerOption func(*corev1.Container) type podSpecOption func(*corev1.PodSpec) type deploymentOption func(*appsv1.Deployment) -type revisionOption func(*v1.Revision) + +//type revisionOption func(*v1.Revision) func container(container *corev1.Container, opts ...containerOption) corev1.Container { for _, option := range opts { @@ -269,8 +286,8 @@ func servingContainer(opts ...containerOption) corev1.Container { return container(defaultServingContainer.DeepCopy(), opts...) } -func sidecarContainer(opts ...containerOption) corev1.Container { - return container(defaultSidecarContainer.DeepCopy(), opts...) +func sidecarContainer(containerName string, opts ...containerOption) corev1.Container { + return container(defaultSidecarContainer(containerName).DeepCopy(), opts...) } func queueContainer(opts ...containerOption) corev1.Container { @@ -368,7 +385,7 @@ func appsv1deployment(opts ...deploymentOption) *appsv1.Deployment { return deploy } -func revision(name, ns string, opts ...revisionOption) *v1.Revision { +func revision(name, ns string, opts ...RevisionOption) *v1.Revision { revision := defaultRevision() revision.ObjectMeta.Name = name revision.ObjectMeta.Namespace = ns @@ -378,7 +395,7 @@ func revision(name, ns string, opts ...revisionOption) *v1.Revision { return revision } -func withContainerConcurrency(cc int64) revisionOption { +func withContainerConcurrency(cc int64) RevisionOption { return func(revision *v1.Revision) { revision.Spec.ContainerConcurrency = &cc } @@ -388,7 +405,7 @@ func withoutLabels(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{} } -func withOwnerReference(name string) revisionOption { +func withOwnerReference(name string) RevisionOption { return func(revision *v1.Revision) { revision.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ APIVersion: v1.SchemeGroupVersion.String(), @@ -401,7 +418,7 @@ func withOwnerReference(name string) revisionOption { } // withContainers function helps to provide a podSpec to the revision for both single and multiple containers -func withContainers(containers []corev1.Container) revisionOption { +func withContainers(containers []corev1.Container) RevisionOption { return func(revision *v1.Revision) { revision.Spec = v1.RevisionSpec{ PodSpec: corev1.PodSpec{ @@ -455,6 +472,9 @@ func TestMakePodSpec(t *testing.T) { }}, }}), withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe(), @@ -466,6 +486,7 @@ func TestMakePodSpec(t *testing.T) { servingContainer( func(container *corev1.Container) { container.Ports[0].ContainerPort = 8888 + container.Image = "busybox@sha256:deadbeef" }, withEnvVar("PORT", "8888"), ), @@ -490,6 +511,9 @@ func TestMakePodSpec(t *testing.T) { }}, }}), withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe(), @@ -509,6 +533,7 @@ func TestMakePodSpec(t *testing.T) { servingContainer( func(container *corev1.Container) { container.Ports[0].ContainerPort = 8888 + container.Image = "busybox@sha256:deadbeef" }, withEnvVar("PORT", "8888"), withPrependedVolumeMounts(corev1.VolumeMount{ @@ -537,6 +562,9 @@ func TestMakePodSpec(t *testing.T) { Image: "busybox", }}), withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe(), @@ -545,7 +573,9 @@ func TestMakePodSpec(t *testing.T) { ), want: podSpec( []corev1.Container{ - servingContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "1"), ), @@ -558,14 +588,10 @@ func TestMakePodSpec(t *testing.T) { Image: "busybox", }}), withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { - revision.Status = v1.RevisionStatus{ - DeprecatedImageDigest: "busybox@sha256:deadbeef", - ContainerStatuses: []v1.ContainerStatuses{{ - Name: servingContainerName, - ImageDigest: "busybox@sha256:deadbeef", - }}, - } container(revision.Spec.GetContainer(), withTCPReadinessProbe(), ) @@ -589,6 +615,9 @@ func TestMakePodSpec(t *testing.T) { }}), withContainerConcurrency(1), withOwnerReference("parent-config"), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe(), @@ -597,7 +626,9 @@ func TestMakePodSpec(t *testing.T) { ), want: podSpec( []corev1.Container{ - servingContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), queueContainer( withEnvVar("SERVING_CONFIGURATION", "parent-config"), withEnvVar("CONTAINER_CONCURRENCY", "1"), @@ -610,6 +641,9 @@ func TestMakePodSpec(t *testing.T) { Name: servingContainerName, Image: "busybox", }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withHTTPReadinessProbe(v1.DefaultUserPort), @@ -617,7 +651,9 @@ func TestMakePodSpec(t *testing.T) { }), want: podSpec( []corev1.Container{ - servingContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP","httpHeaders":[{"name":"K-Kubelet-Probe","value":"queue"}]}}`), @@ -633,6 +669,9 @@ func TestMakePodSpec(t *testing.T) { Name: servingContainerName, Image: "busybox", }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withReadinessProbe(corev1.Handler{ @@ -645,7 +684,9 @@ func TestMakePodSpec(t *testing.T) { }), want: podSpec( []corev1.Container{ - servingContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), @@ -658,6 +699,9 @@ func TestMakePodSpec(t *testing.T) { Name: servingContainerName, Image: "busybox", }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withExecReadinessProbe([]string{"echo", "hello"}), @@ -666,6 +710,9 @@ func TestMakePodSpec(t *testing.T) { want: podSpec( []corev1.Container{ servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }, withExecReadinessProbe([]string{"echo", "hello"})), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), @@ -679,6 +726,9 @@ func TestMakePodSpec(t *testing.T) { Name: servingContainerName, Image: "busybox", }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe(), @@ -692,6 +742,9 @@ func TestMakePodSpec(t *testing.T) { want: podSpec( []corev1.Container{ servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }, withLivenessProbe(corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/", @@ -714,6 +767,9 @@ func TestMakePodSpec(t *testing.T) { Name: servingContainerName, Image: "busybox", }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe(), @@ -725,6 +781,9 @@ func TestMakePodSpec(t *testing.T) { want: podSpec( []corev1.Container{ servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }, withLivenessProbe(corev1.Handler{ TCPSocket: &corev1.TCPSocketAction{ Port: intstr.FromInt(v1.DefaultUserPort), @@ -742,6 +801,9 @@ func TestMakePodSpec(t *testing.T) { Name: servingContainerName, Image: "busybox", }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe(), @@ -752,7 +814,9 @@ func TestMakePodSpec(t *testing.T) { }, want: podSpec( []corev1.Container{ - servingContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), queueContainer( withEnvVar("DOWNWARD_API_LABELS_PATH", fmt.Sprintf("%s/%s", podInfoVolumePath, metadataLabelsPath)), withPodLabelsVolumeMount(), @@ -775,6 +839,11 @@ func TestMakePodSpec(t *testing.T) { Name: sidecarContainerName, Image: "ubuntu", }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }, { + ImageDigest: "ubuntu@sha256:deadbffe", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe(), @@ -785,8 +854,12 @@ func TestMakePodSpec(t *testing.T) { }, want: podSpec( []corev1.Container{ - servingContainer(), - sidecarContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), + sidecarContainer(sidecarContainerName, func(container *corev1.Container) { + container.Image = "ubuntu@sha256:deadbffe" + }), queueContainer( withEnvVar("DOWNWARD_API_LABELS_PATH", fmt.Sprintf("%s/%s", podInfoVolumePath, metadataLabelsPath)), withPodLabelsVolumeMount(), @@ -804,6 +877,9 @@ func TestMakePodSpec(t *testing.T) { Image: "busybox", }}), withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{ serving.ConfigurationLabelKey: "cfg", @@ -832,6 +908,9 @@ func TestMakePodSpec(t *testing.T) { want: podSpec( []corev1.Container{ servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }, withContainer(), withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), @@ -860,6 +939,13 @@ func TestMakePodSpec(t *testing.T) { }, }), withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }, { + ImageDigest: "ubuntu@sha256:deadbffe", + }, { + ImageDigest: "alpine@sha256:deadbfff", + }}), func(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{ serving.ConfigurationLabelKey: "cfg", @@ -894,20 +980,26 @@ func TestMakePodSpec(t *testing.T) { want: podSpec( []corev1.Container{ servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }, withContainer(), withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), ), - sidecarContainer( + sidecarContainer(sidecarContainerName, + func(container *corev1.Container) { + container.Image = "ubuntu@sha256:deadbffe" + }, withContainer(), withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), ), - sidecarContainer( + sidecarContainer(sidecarContainerName2, withContainer(), func(container *corev1.Container) { container.Name = "sidecar-container-2" - container.Image = "alpine" + container.Image = "alpine@sha256:deadbfff" }, withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), @@ -932,6 +1024,11 @@ func TestMakePodSpec(t *testing.T) { Image: "ubuntu", }}), withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }, { + ImageDigest: "ubuntu@sha256:deadbffe", + }}), func(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{ serving.ConfigurationLabelKey: "cfg", @@ -960,6 +1057,9 @@ func TestMakePodSpec(t *testing.T) { want: podSpec( []corev1.Container{ servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }, withContainer(), func(container *corev1.Container) { container.Ports[0].ContainerPort = 8888 @@ -968,7 +1068,10 @@ func TestMakePodSpec(t *testing.T) { withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), ), - sidecarContainer( + sidecarContainer(sidecarContainerName, + func(container *corev1.Container) { + container.Image = "ubuntu@sha256:deadbffe" + }, withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), ), @@ -1027,6 +1130,11 @@ func TestMakeDeployment(t *testing.T) { }), withoutLabels, withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }, { + ImageDigest: "ubuntu@sha256:deadbffe", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), @@ -1040,6 +1148,9 @@ func TestMakeDeployment(t *testing.T) { }}), withoutLabels, withOwnerReference("parent-config"), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), @@ -1058,6 +1169,9 @@ func TestMakeDeployment(t *testing.T) { Name: servingContainerName, Image: "ubuntu", }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), withoutLabels, func(revision *v1.Revision) { revision.ObjectMeta.Annotations = map[string]string{ sidecarIstioInjectAnnotation: "false", @@ -1124,6 +1238,11 @@ func TestProgressDeadlineOverride(t *testing.T) { }, }), withoutLabels, + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }, { + ImageDigest: "ubuntu@sha256:deadbffe", + }}), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withReadinessProbe(corev1.Handler{ diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index 1fcb0ac818cc..76f1a4a74622 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -61,150 +61,6 @@ func TestReconcile(t *testing.T) { Name: "key not found", // Make sure Reconcile handles good keys that don't exist. Key: "foo/not-found", - }, { - Name: "nop deletion reconcile", - // Test that with a DeletionTimestamp we do nothing. - Objects: []runtime.Object{ - rev("foo", "delete-pending", WithRevisionDeletionTimestamp), - }, - Key: "foo/delete-pending", - }, { - Name: "first revision reconciliation", - // Test the simplest successful reconciliation flow. - // We feed in a well formed Revision where none of its sub-resources exist, - // and we expect it to create them and initialize the Revision's status. - Objects: []runtime.Object{ - rev("foo", "first-reconcile", withContainerStatuses([]v1.ContainerStatuses{ - {Name: "first-reconcile", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - }, - WantCreates: []runtime.Object{ - // The first reconciliation of a Revision creates the following resources. - pa("foo", "first-reconcile"), - deploy(t, "foo", "first-reconcile", withContainerStatuses([]v1.ContainerStatuses{ - {Name: "first-reconcile", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - image("foo", "first-reconcile"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "first-reconcile", - // The first reconciliation Populates the following status properties. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ - {Name: "first-reconcile", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - }}, - Key: "foo/first-reconcile", - }, { - Name: "failure updating revision status", - // This starts from the first reconciliation case above and induces a failure - // updating the revision status. - WantErr: true, - WithReactors: []clientgotesting.ReactionFunc{ - InduceFailure("update", "revisions"), - }, - Objects: []runtime.Object{ - rev("foo", "update-status-failure", withContainerStatuses([]v1.ContainerStatuses{ - {Name: "update-status-failure", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - pa("foo", "update-status-failure"), - }, - WantCreates: []runtime.Object{ - // We still see the following creates before the failure is induced. - deploy(t, "foo", "update-status-failure", withContainerStatuses([]v1.ContainerStatuses{ - {Name: "update-status-failure", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - image("foo", "update-status-failure"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "update-status-failure", - // Despite failure, the following status properties are set. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ - {Name: "update-status-failure", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", - "update-status-failure", "inducing failure for update revisions"), - }, - Key: "foo/update-status-failure", - }, { - Name: "failure creating pa", - // This starts from the first reconciliation case above and induces a failure - // creating the PA. - WantErr: true, - WithReactors: []clientgotesting.ReactionFunc{ - InduceFailure("create", "podautoscalers"), - }, - Objects: []runtime.Object{ - rev("foo", "create-pa-failure", withContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-pa-failure", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - }, - WantCreates: []runtime.Object{ - // We still see the following creates before the failure is induced. - pa("foo", "create-pa-failure"), - deploy(t, "foo", "create-pa-failure", withContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-pa-failure", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - image("foo", "create-pa-failure"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "create-pa-failure", - // Despite failure, the following status properties are set. - WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-pa-failure", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", `failed to create PA "create-pa-failure": inducing failure for create podautoscalers`), - }, - Key: "foo/create-pa-failure", - }, { - Name: "failure creating user deployment", - // This starts from the first reconciliation case above and induces a failure - // creating the user's deployment - WantErr: true, - WithReactors: []clientgotesting.ReactionFunc{ - InduceFailure("create", "deployments"), - }, - Objects: []runtime.Object{ - rev("foo", "create-user-deploy-failure", withContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-user-deploy-failure", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - pa("foo", "create-user-deploy-failure"), - }, - WantCreates: []runtime.Object{ - // We still see the following creates before the failure is induced. - deploy(t, "foo", "create-user-deploy-failure", withContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-user-deploy-failure", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "create-user-deploy-failure", - // Despite failure, the following status properties are set. - WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-user-deploy-failure", - ImageDigest: "busybox@sha256:deadbeef"}, - })), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", - `failed to create deployment "create-user-deploy-failure-deployment": inducing failure for create deployments`), - }, - Key: "foo/create-user-deploy-failure", }, { Name: "stable revision reconciliation", // Test a simple stable reconciliation of an Active Revision. @@ -213,13 +69,13 @@ func TestReconcile(t *testing.T) { // are necessary. Objects: []runtime.Object{ rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, - withContainerStatuses([]v1.ContainerStatuses{ + WithContainerStatuses([]v1.ContainerStatuses{ {Name: "stable-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), - deploy(t, "foo", "stable-reconcile", withContainerStatuses([]v1.ContainerStatuses{ + deploy(t, "foo", "stable-reconcile", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "stable-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -233,16 +89,19 @@ func TestReconcile(t *testing.T) { // with our desired spec. Objects: []runtime.Object{ rev("foo", "fix-containers", - WithLogURL, AllUnknownConditions, withContainerStatuses([]v1.ContainerStatuses{ + WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "fix-containers", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), - changeContainers(deploy(t, "foo", "fix-containers")), + changeContainers(deploy(t, "foo", "fix-containers", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "fix-containers", + ImageDigest: "busybox@sha256:deadbeef"}, + }))), image("foo", "fix-containers"), }, WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: deploy(t, "foo", "fix-containers", withContainerStatuses([]v1.ContainerStatuses{ + Object: deploy(t, "foo", "fix-containers", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "fix-containers", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -257,16 +116,19 @@ func TestReconcile(t *testing.T) { }, Objects: []runtime.Object{ rev("foo", "failure-update-deploy", - withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "failure-update-deploy", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "failure-update-deploy"), - changeContainers(deploy(t, "foo", "failure-update-deploy")), + changeContainers(deploy(t, "foo", "failure-update-deploy", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "failure-update-deploy", + ImageDigest: "busybox@sha256:deadbeef"}, + }))), image("foo", "failure-update-deploy"), }, WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: deploy(t, "foo", "failure-update-deploy", withContainerStatuses([]v1.ContainerStatuses{ + Object: deploy(t, "foo", "failure-update-deploy", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "failure-update-deploy", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -284,13 +146,13 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "stable-deactivation", WithLogURL, MarkRevisionReady, - MarkInactive("NoTraffic", "This thing is inactive."), withContainerStatuses([]v1.ContainerStatuses{ + MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "stable-deactivation", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "stable-deactivation", WithNoTraffic("NoTraffic", "This thing is inactive.")), - deploy(t, "foo", "stable-deactivation", withContainerStatuses([]v1.ContainerStatuses{ + deploy(t, "foo", "stable-deactivation", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "stable-deactivation", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -301,12 +163,12 @@ func TestReconcile(t *testing.T) { Name: "pa is ready", Objects: []runtime.Object{ rev("foo", "pa-ready", - withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions, withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pa-ready", WithTraffic, WithPAStatusService("new-stuff"), WithReachability(asv1a1.ReachabilityUnknown)), - deploy(t, "foo", "pa-ready", withContainerStatuses([]v1.ContainerStatuses{ + deploy(t, "foo", "pa-ready", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -317,7 +179,7 @@ func TestReconcile(t *testing.T) { WithLogURL, // When the endpoint and pa are ready, then we will see the // Revision become ready. - MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -332,14 +194,14 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "pa-not-ready", withK8sServiceName("somebody-told-me"), WithLogURL, - MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-not-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pa-not-ready", WithPAStatusService("its-not-confidential"), WithBufferedTraffic("Something", "This is something longer")), - readyDeploy(deploy(t, "foo", "pa-not-ready", withContainerStatuses([]v1.ContainerStatuses{ + readyDeploy(deploy(t, "foo", "pa-not-ready", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-not-ready", ImageDigest: "busybox@sha256:deadbeef"}, }))), @@ -347,7 +209,7 @@ func TestReconcile(t *testing.T) { }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-not-ready", - WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-not-ready", ImageDigest: "busybox@sha256:deadbeef"}, }), @@ -363,13 +225,13 @@ func TestReconcile(t *testing.T) { // Test propagating the inactivity signal from the pa to the Revision. Objects: []runtime.Object{ rev("foo", "pa-inactive", - withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive.")), - readyDeploy(deploy(t, "foo", "pa-inactive", withContainerStatuses([]v1.ContainerStatuses{ + readyDeploy(deploy(t, "foo", "pa-inactive", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, }))), @@ -377,7 +239,7 @@ func TestReconcile(t *testing.T) { }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-inactive", - WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, }), @@ -391,7 +253,7 @@ func TestReconcile(t *testing.T) { // Test propagating the inactivity signal from the pa to the Revision. // But propagate the service name. Objects: []runtime.Object{ - rev("foo", "pa-inactive", withContainerStatuses([]v1.ContainerStatuses{ + rev("foo", "pa-inactive", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, }), @@ -399,7 +261,7 @@ func TestReconcile(t *testing.T) { pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive."), WithPAStatusService("pa-inactive-svc")), - readyDeploy(deploy(t, "foo", "pa-inactive", withContainerStatuses([]v1.ContainerStatuses{ + readyDeploy(deploy(t, "foo", "pa-inactive", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, }))), @@ -411,7 +273,7 @@ func TestReconcile(t *testing.T) { withK8sServiceName("pa-inactive-svc"), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. - MarkInactive("NoTraffic", "This thing is inactive."), withContainerStatuses([]v1.ContainerStatuses{ + MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -424,13 +286,13 @@ func TestReconcile(t *testing.T) { // Protocol type is the only thing that can be changed on PA Objects: []runtime.Object{ rev("foo", "fix-mutated-pa", - withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "fix-mutated-pa", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "fix-mutated-pa", WithProtocolType(networking.ProtocolH2C), WithTraffic, WithPAStatusService("fix-mutated-pa")), - deploy(t, "foo", "fix-mutated-pa", withContainerStatuses([]v1.ContainerStatuses{ + deploy(t, "foo", "fix-mutated-pa", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "fix-mutated-pa", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -441,7 +303,7 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When our reconciliation has to change the service // we should see the following mutations to status. - withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "fix-mutated-pa", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -457,12 +319,12 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "fix-mutated-pa-fail", withK8sServiceName("some-old-stuff"), - WithLogURL, AllUnknownConditions, withContainerStatuses([]v1.ContainerStatuses{ + WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "fix-mutated-pa-fail", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), - deploy(t, "foo", "fix-mutated-pa-fail", withContainerStatuses([]v1.ContainerStatuses{ + deploy(t, "foo", "fix-mutated-pa-fail", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "fix-mutated-pa-fail", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -488,12 +350,12 @@ func TestReconcile(t *testing.T) { // status of the Revision. Objects: []runtime.Object{ rev("foo", "deploy-timeout", - withK8sServiceName("the-taxman"), WithLogURL, MarkActive, withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("the-taxman"), WithLogURL, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "deploy-timeout", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "deploy-timeout"), // pa can't be ready since deployment times out. - timeoutDeploy(deploy(t, "foo", "deploy-timeout", withContainerStatuses([]v1.ContainerStatuses{ + timeoutDeploy(deploy(t, "foo", "deploy-timeout", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "deploy-timeout", ImageDigest: "busybox@sha256:deadbeef"}, })), "I timed out!"), @@ -504,7 +366,7 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the PDE state. - MarkProgressDeadlineExceeded("I timed out!"), withContainerStatuses([]v1.ContainerStatuses{ + MarkProgressDeadlineExceeded("I timed out!"), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "deploy-timeout", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -518,12 +380,12 @@ func TestReconcile(t *testing.T) { // It then verifies that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ rev("foo", "deploy-replica-failure", - withK8sServiceName("the-taxman"), WithLogURL, MarkActive, withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("the-taxman"), WithLogURL, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "deploy-replica-failure", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "deploy-replica-failure"), - replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure", withContainerStatuses([]v1.ContainerStatuses{ + replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "deploy-replica-failure", ImageDigest: "busybox@sha256:deadbeef"}, })), "I replica failed!"), @@ -534,7 +396,7 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the FailedCreate state. - MarkResourcesUnavailable("FailedCreate", "I replica failed!"), withContainerStatuses([]v1.ContainerStatuses{ + MarkResourcesUnavailable("FailedCreate", "I replica failed!"), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "deploy-replica-failure", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -545,13 +407,13 @@ func TestReconcile(t *testing.T) { // Test the propagation of ImagePullBackoff from user container. Objects: []runtime.Object{ rev("foo", "pull-backoff", - withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", ""), withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("the-taxman"), WithLogURL, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pull-backoff", ImageDigest: "busybox@sha256:deadbeef"}, - })), + }), MarkActivating("Deploying", "")), pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), - timeoutDeploy(deploy(t, "foo", "pull-backoff", withContainerStatuses([]v1.ContainerStatuses{ + timeoutDeploy(deploy(t, "foo", "pull-backoff", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pull-backoff", ImageDigest: "busybox@sha256:deadbeef"}, })), "Timed out!"), @@ -560,7 +422,7 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pull-backoff", WithLogURL, AllUnknownConditions, - MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), withContainerStatuses([]v1.ContainerStatuses{ + MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pull-backoff", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -577,13 +439,13 @@ func TestReconcile(t *testing.T) { // that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ rev("foo", "pod-error", - withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive, withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pod-error", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pod-error"), // PA can't be ready, since no traffic. pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), - deploy(t, "foo", "pod-error", withContainerStatuses([]v1.ContainerStatuses{ + deploy(t, "foo", "pod-error", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pod-error", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -592,7 +454,7 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pod-error", WithLogURL, AllUnknownConditions, MarkContainerExiting(5, - v1.RevisionContainerExitingMessage("I failed man!")), withContainerStatuses([]v1.ContainerStatuses{ + v1.RevisionContainerExitingMessage("I failed man!")), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pod-error", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -605,13 +467,13 @@ func TestReconcile(t *testing.T) { // that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ rev("foo", "pod-schedule-error", - withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive, withContainerStatuses([]v1.ContainerStatuses{ + withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pod-schedule-error", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), - deploy(t, "foo", "pod-schedule-error", withContainerStatuses([]v1.ContainerStatuses{ + deploy(t, "foo", "pod-schedule-error", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pod-schedule-error", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -620,7 +482,7 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pod-schedule-error", WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", - "Unschedulable"), withContainerStatuses([]v1.ContainerStatuses{ + "Unschedulable"), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "pod-schedule-error", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -635,12 +497,12 @@ func TestReconcile(t *testing.T) { // This signal should make our Reconcile mark the Revision as Ready. Objects: []runtime.Object{ rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL, - withContainerStatuses([]v1.ContainerStatuses{ + WithContainerStatuses([]v1.ContainerStatuses{ {Name: "steady-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), - deploy(t, "foo", "steady-ready", withContainerStatuses([]v1.ContainerStatuses{ + deploy(t, "foo", "steady-ready", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "steady-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -650,7 +512,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, // All resources are ready to go, we should see the revision being // marked ready - MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "steady-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -664,12 +526,12 @@ func TestReconcile(t *testing.T) { WantErr: true, Objects: []runtime.Object{ rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), - deploy(t, "foo", "missing-owners", withContainerStatuses([]v1.ContainerStatuses{ + deploy(t, "foo", "missing-owners", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -679,7 +541,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, MarkRevisionReady, // When we're missing the OwnerRef for PodAutoscaler we see this update. - MarkResourceNotOwned("PodAutoscaler", "missing-owners"), withContainerStatuses([]v1.ContainerStatuses{ + MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -693,12 +555,12 @@ func TestReconcile(t *testing.T) { WantErr: true, Objects: []runtime.Object{ rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, withContainerStatuses([]v1.ContainerStatuses{ + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "missing-owners", WithTraffic), - noOwner(deploy(t, "foo", "missing-owners", withContainerStatuses([]v1.ContainerStatuses{ + noOwner(deploy(t, "foo", "missing-owners", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, }))), @@ -708,7 +570,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, MarkRevisionReady, // When we're missing the OwnerRef for Deployment we see this update. - MarkResourceNotOwned("Deployment", "missing-owners-deployment"), withContainerStatuses([]v1.ContainerStatuses{ + MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -722,14 +584,14 @@ func TestReconcile(t *testing.T) { // This test case tests that the image pull secrets from revision propagate to deployment and image Objects: []runtime.Object{ rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret"), - withContainerStatuses([]v1.ContainerStatuses{ + WithContainerStatuses([]v1.ContainerStatuses{ {Name: "image-pull-secrets", ImageDigest: "busybox@sha256:deadbeef"}, })), }, WantCreates: []runtime.Object{ pa("foo", "image-pull-secrets"), - deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets", withContainerStatuses([]v1.ContainerStatuses{ + deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets", WithContainerStatuses([]v1.ContainerStatuses{ {Name: "image-pull-secrets", ImageDigest: "busybox@sha256:deadbeef"}, })), "foo-secret"), @@ -738,7 +600,7 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret"), - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withContainerStatuses([]v1.ContainerStatuses{ + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "image-pull-secrets", ImageDigest: "busybox@sha256:deadbeef"}, })), @@ -850,13 +712,6 @@ func withK8sServiceName(sn string) RevisionOption { } } -// withContainerStatuses sets the .Status.ContainerStatuses to the Revision. -func withContainerStatuses(containerStatus []v1.ContainerStatuses) RevisionOption { - return func(r *v1.Revision) { - r.Status.ContainerStatuses = containerStatus - } -} - // TODO(mattmoor): Come up with a better name for this. func AllUnknownConditions(r *v1.Revision) { WithInitRevConditions(r) diff --git a/pkg/testing/v1/revision.go b/pkg/testing/v1/revision.go index 572e99079587..5e2efc9101a8 100644 --- a/pkg/testing/v1/revision.go +++ b/pkg/testing/v1/revision.go @@ -182,3 +182,10 @@ func WithRevisionLabel(key, value string) RevisionOption { config.Labels[key] = value } } + +// WithContainerStatuses sets the .Status.ContainerStatuses to the Revision. +func WithContainerStatuses(containerStatus []v1.ContainerStatuses) RevisionOption { + return func(r *v1.Revision) { + r.Status.ContainerStatuses = containerStatus + } +} From 5fcc729ba80412de89e4020aaacd201448d145db Mon Sep 17 00:00:00 2001 From: savitaashture Date: Wed, 13 May 2020 13:21:59 +0530 Subject: [PATCH 07/15] addressed review comments --- pkg/reconciler/revision/resources/deploy.go | 5 ++--- pkg/reconciler/revision/resources/deploy_test.go | 2 -- pkg/webhook/podspec_dryrun.go | 3 +-- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 33e1f1c527d9..5b679f31a217 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -126,8 +126,7 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig return nil, fmt.Errorf("failed to create queue-proxy container: %w", err) } - container := append(BuildUserContainers(rev), *queueContainer) - podSpec := BuildPodSpec(rev, container) + podSpec := BuildPodSpec(rev, append(BuildUserContainers(rev), *queueContainer)) if autoscalerConfig.EnableGracefulScaledown { podSpec.Volumes = append(podSpec.Volumes, labelVolume) @@ -142,7 +141,7 @@ func BuildUserContainers(rev *v1.Revision) []corev1.Container { for i := range rev.Spec.PodSpec.Containers { var container corev1.Container if len(rev.Spec.PodSpec.Containers[i].Ports) != 0 || len(rev.Spec.PodSpec.Containers) == 1 { - container = makeServingContainer(*rev.Spec.GetContainer(), rev) + container = makeServingContainer(rev.Spec.PodSpec.Containers[i], rev) } else { container = makeContainer(rev.Spec.PodSpec.Containers[i], rev) } diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index 934295e24a0b..eb1e93154b3c 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -273,8 +273,6 @@ type containerOption func(*corev1.Container) type podSpecOption func(*corev1.PodSpec) type deploymentOption func(*appsv1.Deployment) -//type revisionOption func(*v1.Revision) - func container(container *corev1.Container, opts ...containerOption) corev1.Container { for _, option := range opts { option(container) diff --git a/pkg/webhook/podspec_dryrun.go b/pkg/webhook/podspec_dryrun.go index 0d680863fa2c..e810d095c0b0 100644 --- a/pkg/webhook/podspec_dryrun.go +++ b/pkg/webhook/podspec_dryrun.go @@ -57,8 +57,7 @@ func validatePodSpec(ctx context.Context, ps v1.RevisionSpec, namespace string, Spec: ps, } rev.SetDefaults(ctx) - userContainer := resources.BuildUserContainers(rev) - podSpec := resources.BuildPodSpec(rev, userContainer) + podSpec := resources.BuildPodSpec(rev, resources.BuildUserContainers(rev)) // Make a dummy pod with the template Revisions & PodSpec and dryrun call to API-server pod := &corev1.Pod{ From c38974f93df072255c8461de705123724d6f7acb Mon Sep 17 00:00:00 2001 From: savitaashture Date: Wed, 13 May 2020 16:19:50 +0530 Subject: [PATCH 08/15] Refactored testcase --- .../revision/resources/deploy_test.go | 436 +++++++----------- .../revision/resources/queue_test.go | 43 +- pkg/reconciler/revision/table_test.go | 294 +++++++----- 3 files changed, 399 insertions(+), 374 deletions(-) diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index eb1e93154b3c..57b9d7accef3 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -81,7 +81,6 @@ var ( }}, } - //defaultSidecarContainer = defaultQueueContainer = &corev1.Container{ Name: QueueContainerName, Resources: createQueueResources(make(map[string]string), &corev1.Container{}), @@ -314,36 +313,32 @@ func withPodLabelsVolumeMount() containerOption { } } -func withReadinessProbe(handler corev1.Handler) containerOption { - return func(container *corev1.Container) { - container.ReadinessProbe = &corev1.Probe{Handler: handler} - } -} - -func withTCPReadinessProbe() containerOption { - return withReadinessProbe(corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Host: "127.0.0.1", - Port: intstr.FromInt(v1.DefaultUserPort), - }, - }) +func withTCPReadinessProbe(port int) *corev1.Probe { + return &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(port), + }}} } -func withHTTPReadinessProbe(port int) containerOption { - return withReadinessProbe(corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(port), - Path: "/", - }, - }) +func withHTTPReadinessProbe(port int) *corev1.Probe { + return &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(port), + Path: "/", + }, + }} } -func withExecReadinessProbe(command []string) containerOption { - return withReadinessProbe(corev1.Handler{ - Exec: &corev1.ExecAction{ - Command: command, - }, - }) +func withExecReadinessProbe(command []string) *corev1.Probe { + return &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: command, + }, + }} } func withLivenessProbe(handler corev1.Handler) containerOption { @@ -427,7 +422,32 @@ func withContainers(containers []corev1.Container) RevisionOption { } } -func withContainer() containerOption { +func getContainer() containerOption { + return func(container *corev1.Container) { + container.Command = []string{"/bin/bash"} + container.Args = []string{"-c", "echo Hello world"} + container.Env = append([]corev1.EnvVar{{ + Name: "FOO", + Value: "bar", + }, { + Name: "BAZ", + Value: "blah", + }}, container.Env...) + container.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("666Mi"), + corev1.ResourceCPU: resource.MustParse("666m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("888Mi"), + corev1.ResourceCPU: resource.MustParse("888m"), + }, + } + container.TerminationMessagePolicy = corev1.TerminationMessageReadFile + } +} + +func updateContainer() containerOption { return func(container *corev1.Container) { container.Command = []string{"/bin/bash"} container.Args = []string{"-c", "echo Hello world"} @@ -468,16 +488,12 @@ func TestMakePodSpec(t *testing.T) { Ports: []corev1.ContainerPort{{ ContainerPort: 8888, }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), }}), withContainerConcurrency(1), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }, ), want: podSpec( []corev1.Container{ @@ -492,8 +508,7 @@ func TestMakePodSpec(t *testing.T) { withEnvVar("CONTAINER_CONCURRENCY", "1"), withEnvVar("USER_PORT", "8888"), withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"}}`), - ), - }), + )}), }, { name: "volumes passed through", rev: revision("bar", "foo", @@ -507,15 +522,13 @@ func TestMakePodSpec(t *testing.T) { Name: "asdf", MountPath: "/asdf", }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), }}), withContainerConcurrency(1), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) revision.Spec.Volumes = []corev1.Volume{{ Name: "asdf", VolumeSource: corev1.VolumeSource{ @@ -556,18 +569,14 @@ func TestMakePodSpec(t *testing.T) { name: "concurrency=1 no owner", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), }}), withContainerConcurrency(1), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }, ), want: podSpec( []corev1.Container{ @@ -582,18 +591,14 @@ func TestMakePodSpec(t *testing.T) { name: "concurrency=1 no owner digest resolved", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), }}), withContainerConcurrency(1), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }, ), want: podSpec( []corev1.Container{ @@ -608,19 +613,15 @@ func TestMakePodSpec(t *testing.T) { name: "concurrency=1 with owner", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), }}), withContainerConcurrency(1), withOwnerReference("parent-config"), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }, ), want: podSpec( []corev1.Container{ @@ -636,17 +637,14 @@ func TestMakePodSpec(t *testing.T) { name: "with http readiness probe", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withHTTPReadinessProbe(v1.DefaultUserPort), }}), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withHTTPReadinessProbe(v1.DefaultUserPort), - ) - }), + ), want: podSpec( []corev1.Container{ servingContainer(func(container *corev1.Container) { @@ -664,22 +662,14 @@ func TestMakePodSpec(t *testing.T) { }), rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(12345), }}), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withReadinessProbe(corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Host: "127.0.0.1", - Port: intstr.FromInt(12345), - }, - }), - ) - }), + ), want: podSpec( []corev1.Container{ servingContainer(func(container *corev1.Container) { @@ -694,24 +684,21 @@ func TestMakePodSpec(t *testing.T) { name: "with shell readiness probe", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withExecReadinessProbe([]string{"echo", "hello"}), }}), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withExecReadinessProbe([]string{"echo", "hello"}), - ) - }), + ), want: podSpec( []corev1.Container{ servingContainer( func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" - }, - withExecReadinessProbe([]string{"echo", "hello"})), + container.ReadinessProbe = withExecReadinessProbe([]string{"echo", "hello"}) + }), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), @@ -721,22 +708,21 @@ func TestMakePodSpec(t *testing.T) { name: "with http liveness probe", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }, + }, }}), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - withLivenessProbe(corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/", - }, - }), - ) - }), + ), want: podSpec( []corev1.Container{ servingContainer( @@ -762,20 +748,18 @@ func TestMakePodSpec(t *testing.T) { name: "with tcp liveness probe", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", - }}), + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }}}}, + ), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - withLivenessProbe(corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{}, - }), - ) - }), + ), want: podSpec( []corev1.Container{ servingContainer( @@ -796,17 +780,14 @@ func TestMakePodSpec(t *testing.T) { name: "with graceful scaledown enabled", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), }}), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }), + ), ac: autoscalerconfig.Config{ EnableGracefulScaledown: true, }, @@ -831,8 +812,9 @@ func TestMakePodSpec(t *testing.T) { Name: servingContainerName, Image: "busybox", Ports: []corev1.ContainerPort{{ - ContainerPort: 8080, + ContainerPort: v1.DefaultUserPort, }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), }, { Name: sidecarContainerName, Image: "ubuntu", @@ -842,11 +824,7 @@ func TestMakePodSpec(t *testing.T) { }, { ImageDigest: "ubuntu@sha256:deadbffe", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }), + ), ac: autoscalerconfig.Config{ EnableGracefulScaledown: true, }, @@ -871,8 +849,9 @@ func TestMakePodSpec(t *testing.T) { name: "complex pod spec", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), }}), withContainerConcurrency(1), WithContainerStatuses([]v1.ContainerStatuses{{ @@ -883,24 +862,7 @@ func TestMakePodSpec(t *testing.T) { serving.ConfigurationLabelKey: "cfg", serving.ServiceLabelKey: "svc", } - revision.Spec.GetContainer().Command = []string{"/bin/bash"} - revision.Spec.GetContainer().Args = []string{"-c", "echo Hello world"} - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - withEnvVar("FOO", "bar"), - withEnvVar("BAZ", "blah"), - ) - revision.Spec.GetContainer().Resources = corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("666Mi"), - corev1.ResourceCPU: resource.MustParse("666m"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("888Mi"), - corev1.ResourceCPU: resource.MustParse("888m"), - }, - } - revision.Spec.GetContainer().TerminationMessagePolicy = corev1.TerminationMessageReadFile + container(revision.Spec.GetContainer(), updateContainer()) }, ), want: podSpec( @@ -909,33 +871,31 @@ func TestMakePodSpec(t *testing.T) { func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }, - withContainer(), + getContainer(), withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), ), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "1"), withEnvVar("SERVING_SERVICE", "svc"), - ), - }), + )}), }, { name: "complex pod spec for multiple containers with container data to all containers", rev: revision("bar", "foo", - withContainers([]corev1.Container{ - { - Name: servingContainerName, - Image: "busybox", - Ports: []corev1.ContainerPort{{ - ContainerPort: 8080, - }}, - }, { - Name: sidecarContainerName, - Image: "ubuntu", - }, { - Name: "sidecar-container-2", - Image: "alpine", - }, - }), + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: v1.DefaultUserPort, + }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }, { + Name: "sidecar-container-2", + Image: "alpine", + }}), withContainerConcurrency(1), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", @@ -949,29 +909,8 @@ func TestMakePodSpec(t *testing.T) { serving.ConfigurationLabelKey: "cfg", serving.ServiceLabelKey: "svc", } - // When there are multiple containers then only serving container will have readinessProbe. here - // container[0] acts as a serving container because of the provided containerPort so using withTCPReadinessProbe. - container(&revision.Spec.Containers[0], - withTCPReadinessProbe(), - ) for i := range revision.Spec.Containers { - revision.Spec.Containers[i].Command = []string{"/bin/bash"} - revision.Spec.Containers[i].Args = []string{"-c", "echo Hello world"} - container(&revision.Spec.Containers[i], - withEnvVar("FOO", "bar"), - withEnvVar("BAZ", "blah"), - ) - revision.Spec.Containers[i].Resources = corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("666Mi"), - corev1.ResourceCPU: resource.MustParse("666m"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("888Mi"), - corev1.ResourceCPU: resource.MustParse("888m"), - }, - } - revision.Spec.Containers[i].TerminationMessagePolicy = corev1.TerminationMessageReadFile + container(&revision.Spec.Containers[i], updateContainer()) } }, ), @@ -981,7 +920,7 @@ func TestMakePodSpec(t *testing.T) { func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }, - withContainer(), + getContainer(), withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), ), @@ -989,12 +928,12 @@ func TestMakePodSpec(t *testing.T) { func(container *corev1.Container) { container.Image = "ubuntu@sha256:deadbffe" }, - withContainer(), + getContainer(), withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), ), sidecarContainer(sidecarContainerName2, - withContainer(), + getContainer(), func(container *corev1.Container) { container.Name = "sidecar-container-2" container.Image = "alpine@sha256:deadbfff" @@ -1010,17 +949,17 @@ func TestMakePodSpec(t *testing.T) { }, { name: "complex pod spec for multiple containers with container data only to serving containers", rev: revision("bar", "foo", - withContainers([]corev1.Container{ - { - Name: servingContainerName, - Image: "busybox", - Ports: []corev1.ContainerPort{{ - ContainerPort: 8888, - }}, - }, { - Name: sidecarContainerName, - Image: "ubuntu", - }}), + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }}), withContainerConcurrency(1), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", @@ -1032,24 +971,7 @@ func TestMakePodSpec(t *testing.T) { serving.ConfigurationLabelKey: "cfg", serving.ServiceLabelKey: "svc", } - revision.Spec.GetContainer().Command = []string{"/bin/bash"} - revision.Spec.GetContainer().Args = []string{"-c", "echo Hello world"} - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - withEnvVar("FOO", "bar"), - withEnvVar("BAZ", "blah"), - ) - revision.Spec.GetContainer().Resources = corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("666Mi"), - corev1.ResourceCPU: resource.MustParse("666m"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("888Mi"), - corev1.ResourceCPU: resource.MustParse("888m"), - }, - } - revision.Spec.GetContainer().TerminationMessagePolicy = corev1.TerminationMessageReadFile + container(revision.Spec.GetContainer(), updateContainer()) }, ), want: podSpec( @@ -1058,7 +980,7 @@ func TestMakePodSpec(t *testing.T) { func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }, - withContainer(), + getContainer(), func(container *corev1.Container) { container.Ports[0].ContainerPort = 8888 }, @@ -1114,18 +1036,17 @@ func TestMakeDeployment(t *testing.T) { }{{ name: "with concurrency=1", rev: revision("bar", "foo", - withContainers([]corev1.Container{ - { - Name: servingContainerName, - Image: "busybox", - Ports: []corev1.ContainerPort{{ - ContainerPort: 8888, - }}, - }, { - Name: sidecarContainerName, - Image: "ubuntu", - }, - }), + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + ReadinessProbe: withTCPReadinessProbe(12345), + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }}), withoutLabels, withContainerConcurrency(1), WithContainerStatuses([]v1.ContainerStatuses{{ @@ -1137,12 +1058,15 @@ func TestMakeDeployment(t *testing.T) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), want: appsv1deployment(), + ), + want: makeDeployment(), }, { name: "with owner", rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "ubuntu", + Name: servingContainerName, + Image: "ubuntu", + ReadinessProbe: withTCPReadinessProbe(12345), }}), withoutLabels, withOwnerReference("parent-config"), @@ -1153,6 +1077,8 @@ func TestMakeDeployment(t *testing.T) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), want: appsv1deployment(), + ), + want: makeDeployment(), }, { name: "with sidecar annotation override", rev: revision("bar", "foo", withoutLabels, func(revision *v1.Revision) { @@ -1164,8 +1090,9 @@ func TestMakeDeployment(t *testing.T) { want: appsv1deployment(func(deploy *appsv1.Deployment) { rev: revision("bar", "foo", withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "ubuntu", + Name: servingContainerName, + Image: "ubuntu", + ReadinessProbe: withTCPReadinessProbe(12345), }}), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", @@ -1174,18 +1101,10 @@ func TestMakeDeployment(t *testing.T) { revision.ObjectMeta.Annotations = map[string]string{ sidecarIstioInjectAnnotation: "false", } - container(revision.Spec.GetContainer(), - withReadinessProbe(corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Host: "127.0.0.1", - Port: intstr.FromInt(12345), - }, - }), - ) }), want: makeDeployment(func(deploy *appsv1.Deployment) { - deploy.ObjectMeta.Annotations[sidecarIstioInjectAnnotation] = "false" - deploy.Spec.Template.ObjectMeta.Annotations[sidecarIstioInjectAnnotation] = "false" + deploy.Annotations[sidecarIstioInjectAnnotation] = "false" + deploy.Spec.Template.Annotations[sidecarIstioInjectAnnotation] = "false" }), }, { name: "with ProgressDeadline override", @@ -1223,34 +1142,23 @@ func TestMakeDeployment(t *testing.T) { func TestProgressDeadlineOverride(t *testing.T) { rev := revision("bar", "foo", - withContainers([]corev1.Container{ - { - Name: servingContainerName, - Image: "busybox", - Ports: []corev1.ContainerPort{{ - ContainerPort: 8888, - }}, - }, { - Name: sidecarContainerName, - Image: "ubuntu", - }, - }), + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + ReadinessProbe: withTCPReadinessProbe(12345), + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }}), withoutLabels, WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", }, { ImageDigest: "ubuntu@sha256:deadbffe", }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withReadinessProbe(corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Host: "127.0.0.1", - Port: intstr.FromInt(12345), - }, - }), - ) - }, ) want := makeDeployment(func(d *appsv1.Deployment) { d.Spec.ProgressDeadlineSeconds = ptr.Int32(42) diff --git a/pkg/reconciler/revision/resources/queue_test.go b/pkg/reconciler/revision/resources/queue_test.go index 334afc3297cd..2a5a674492a2 100644 --- a/pkg/reconciler/revision/resources/queue_test.go +++ b/pkg/reconciler/revision/resources/queue_test.go @@ -77,10 +77,14 @@ func TestMakeQueueContainer(t *testing.T) { }{{ name: "no owner no autoscaler single", rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), withContainerConcurrency(1), func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), + ), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ "CONTAINER_CONCURRENCY": "1", @@ -89,6 +93,14 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "custom sidecar image, container port, protocol", rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + ReadinessProbe: testProbe, + Ports: []corev1.ContainerPort{{ + ContainerPort: 1955, + Name: string(networking.ProtocolH2C), + }}, + }}), withContainerConcurrency(1), func(revision *v1.Revision) { revision.Spec.PodSpec = corev1.PodSpec{ @@ -102,6 +114,7 @@ func TestMakeQueueContainer(t *testing.T) { }}, } }), + ), cc: deployment.Config{ QueueSidecarImage: "alpine", }, @@ -117,12 +130,14 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "service name in labels", rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), withContainerConcurrency(1), func(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{ serving.ServiceLabelKey: "svc", } - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ @@ -132,6 +147,9 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "config owner as env var, zero concurrency", rev: revision("blah", "baz", + withContainers([]corev1.Container{{ + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), withContainerConcurrency(0), func(revision *v1.Revision) { revision.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ @@ -141,7 +159,6 @@ func TestMakeQueueContainer(t *testing.T) { Controller: ptr.Bool(true), BlockOwnerDeletion: ptr.Bool(true), }} - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ @@ -158,6 +175,10 @@ func TestMakeQueueContainer(t *testing.T) { func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), + withContainers([]corev1.Container{{ + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), + withContainerConcurrency(0)), lc: logging.Config{ LoggingConfig: "The logging configuration goes here", LoggingLevel: map[string]zapcore.Level{ @@ -179,6 +200,10 @@ func TestMakeQueueContainer(t *testing.T) { func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), + withContainers([]corev1.Container{{ + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), + withContainerConcurrency(10)), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ "CONTAINER_CONCURRENCY": "10", @@ -191,6 +216,10 @@ func TestMakeQueueContainer(t *testing.T) { func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), + withContainers([]corev1.Container{{ + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), + withContainerConcurrency(0)), oc: metrics.ObservabilityConfig{ RequestLogTemplate: "test template", EnableProbeRequestLog: true, @@ -208,6 +237,10 @@ func TestMakeQueueContainer(t *testing.T) { func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), + withContainers([]corev1.Container{{ + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), + withContainerConcurrency(0)), oc: metrics.ObservabilityConfig{ RequestMetricsBackend: "prometheus", }, @@ -223,6 +256,12 @@ func TestMakeQueueContainer(t *testing.T) { func(revision *v1.Revision) { container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), +======= + withContainers([]corev1.Container{{ + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), + withContainerConcurrency(0)), +>>>>>>> Refactored testcase oc: metrics.ObservabilityConfig{EnableProfiling: true}, want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index 76f1a4a74622..e71e775118ba 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -61,6 +61,138 @@ func TestReconcile(t *testing.T) { Name: "key not found", // Make sure Reconcile handles good keys that don't exist. Key: "foo/not-found", + }, { + Name: "nop deletion reconcile", + // Test that with a DeletionTimestamp we do nothing. + Objects: []runtime.Object{ + rev("foo", "delete-pending", WithRevisionDeletionTimestamp), + }, + Key: "foo/delete-pending", + }, { + Name: "first revision reconciliation", + // Test the simplest successful reconciliation flow. + // We feed in a well formed Revision where none of its sub-resources exist, + // and we expect it to create them and initialize the Revision's status. + Objects: []runtime.Object{ + rev("foo", "first-reconcile", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "first-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, + })), + }, + WantCreates: []runtime.Object{ + // The first reconciliation of a Revision creates the following resources. + pa("foo", "first-reconcile"), + deploy(t, "foo", "first-reconcile", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "first-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, + })), + image("foo", "first-reconcile"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "first-reconcile", + // The first reconciliation Populates the following status properties. + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "first-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + Key: "foo/first-reconcile", + }, { + Name: "failure updating revision status", + // This starts from the first reconciliation case above and induces a failure + // updating the revision status. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "revisions"), + }, + Objects: []runtime.Object{ + rev("foo", "update-status-failure", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "update-status-failure", ImageDigest: "busybox@sha256:deadbeef"}, + })), + pa("foo", "update-status-failure"), + }, + WantCreates: []runtime.Object{ + // We still see the following creates before the failure is induced. + deploy(t, "foo", "update-status-failure", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "update-status-failure", ImageDigest: "busybox@sha256:deadbeef"}, + })), + image("foo", "update-status-failure"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "update-status-failure", + // Despite failure, the following status properties are set. + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "update-status-failure", ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", + "update-status-failure", "inducing failure for update revisions"), + }, + Key: "foo/update-status-failure", + }, { + Name: "failure creating pa", + // This starts from the first reconciliation case above and induces a failure + // creating the PA. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "podautoscalers"), + }, + Objects: []runtime.Object{ + rev("foo", "create-pa-failure", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-pa-failure", ImageDigest: "busybox@sha256:deadbeef"}, + })), + }, + WantCreates: []runtime.Object{ + // We still see the following creates before the failure is induced. + pa("foo", "create-pa-failure"), + deploy(t, "foo", "create-pa-failure", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-pa-failure", ImageDigest: "busybox@sha256:deadbeef"}, + })), + image("foo", "create-pa-failure"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "create-pa-failure", + // Despite failure, the following status properties are set. + WithLogURL, WithInitRevConditions, + MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-pa-failure", ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `failed to create PA "create-pa-failure": inducing failure for create podautoscalers`), + }, + Key: "foo/create-pa-failure", + }, { + Name: "failure creating user deployment", + // This starts from the first reconciliation case above and induces a failure + // creating the user's deployment + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "deployments"), + }, + Objects: []runtime.Object{ + rev("foo", "create-user-deploy-failure", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-user-deploy-failure", ImageDigest: "busybox@sha256:deadbeef"}, + })), + pa("foo", "create-user-deploy-failure"), + }, + WantCreates: []runtime.Object{ + // We still see the following creates before the failure is induced. + deploy(t, "foo", "create-user-deploy-failure", WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-user-deploy-failure", ImageDigest: "busybox@sha256:deadbeef"}, + })), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "create-user-deploy-failure", + // Despite failure, the following status properties are set. + WithLogURL, WithInitRevConditions, + MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "create-user-deploy-failure", ImageDigest: "busybox@sha256:deadbeef"}, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + `failed to create deployment "create-user-deploy-failure-deployment": inducing failure for create deployments`), + }, + Key: "foo/create-user-deploy-failure", }, { Name: "stable revision reconciliation", // Test a simple stable reconciliation of an Active Revision. @@ -70,14 +202,12 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "stable-reconcile", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "stable-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "stable-reconcile", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "stable-reconcile", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "stable-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, })), image("foo", "stable-reconcile"), }, @@ -90,20 +220,17 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "fix-containers", WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-containers", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "fix-containers", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), changeContainers(deploy(t, "foo", "fix-containers", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-containers", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "fix-containers", ImageDigest: "busybox@sha256:deadbeef"}, }))), image("foo", "fix-containers"), }, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: deploy(t, "foo", "fix-containers", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-containers", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "fix-containers", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, Key: "foo/fix-containers", @@ -117,20 +244,17 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "failure-update-deploy", withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "failure-update-deploy", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "failure-update-deploy", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "failure-update-deploy"), changeContainers(deploy(t, "foo", "failure-update-deploy", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "failure-update-deploy", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "failure-update-deploy", ImageDigest: "busybox@sha256:deadbeef"}, }))), image("foo", "failure-update-deploy"), }, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: deploy(t, "foo", "failure-update-deploy", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "failure-update-deploy", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "failure-update-deploy", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, WantEvents: []string{ @@ -147,14 +271,12 @@ func TestReconcile(t *testing.T) { rev("foo", "stable-deactivation", WithLogURL, MarkRevisionReady, MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "stable-deactivation", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "stable-deactivation", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "stable-deactivation", WithNoTraffic("NoTraffic", "This thing is inactive.")), deploy(t, "foo", "stable-deactivation", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "stable-deactivation", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "stable-deactivation", ImageDigest: "busybox@sha256:deadbeef"}, })), image("foo", "stable-deactivation"), }, @@ -164,13 +286,11 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "pa-ready", withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-ready", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pa-ready", WithTraffic, WithPAStatusService("new-stuff"), WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "pa-ready", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-ready", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), image("foo", "pa-ready"), }, @@ -180,8 +300,7 @@ func TestReconcile(t *testing.T) { // When the endpoint and pa are ready, then we will see the // Revision become ready. MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-ready", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, WantEvents: []string{ @@ -195,23 +314,20 @@ func TestReconcile(t *testing.T) { rev("foo", "pa-not-ready", withK8sServiceName("somebody-told-me"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-not-ready", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-not-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pa-not-ready", WithPAStatusService("its-not-confidential"), WithBufferedTraffic("Something", "This is something longer")), readyDeploy(deploy(t, "foo", "pa-not-ready", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-not-ready", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-not-ready", ImageDigest: "busybox@sha256:deadbeef"}, }))), image("foo", "pa-not-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-not-ready", WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-not-ready", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-not-ready", ImageDigest: "busybox@sha256:deadbeef"}, }), withK8sServiceName("its-not-confidential"), // When we reconcile a ready state and our pa is in an activating @@ -226,22 +342,19 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "pa-inactive", withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive.")), readyDeploy(deploy(t, "foo", "pa-inactive", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, }))), image("foo", "pa-inactive"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-inactive", WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, }), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. @@ -254,16 +367,14 @@ func TestReconcile(t *testing.T) { // But propagate the service name. Objects: []runtime.Object{ rev("foo", "pa-inactive", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, }), withK8sServiceName("here-comes-the-sun"), WithLogURL, MarkRevisionReady), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive."), WithPAStatusService("pa-inactive-svc")), readyDeploy(deploy(t, "foo", "pa-inactive", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, }))), image("foo", "pa-inactive"), }, @@ -274,8 +385,7 @@ func TestReconcile(t *testing.T) { // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, Key: "foo/pa-inactive", @@ -287,14 +397,12 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "fix-mutated-pa", withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "fix-mutated-pa", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "fix-mutated-pa", WithProtocolType(networking.ProtocolH2C), WithTraffic, WithPAStatusService("fix-mutated-pa")), deploy(t, "foo", "fix-mutated-pa", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "fix-mutated-pa", ImageDigest: "busybox@sha256:deadbeef"}, })), image("foo", "fix-mutated-pa"), }, @@ -304,8 +412,7 @@ func TestReconcile(t *testing.T) { // When our reconciliation has to change the service // we should see the following mutations to status. withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "fix-mutated-pa", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ @@ -320,13 +427,11 @@ func TestReconcile(t *testing.T) { rev("foo", "fix-mutated-pa-fail", withK8sServiceName("some-old-stuff"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa-fail", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "fix-mutated-pa-fail", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "fix-mutated-pa-fail", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa-fail", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "fix-mutated-pa-fail", ImageDigest: "busybox@sha256:deadbeef"}, })), image("foo", "fix-mutated-pa-fail"), }, @@ -351,13 +456,11 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "deploy-timeout", withK8sServiceName("the-taxman"), WithLogURL, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-timeout", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "deploy-timeout", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "deploy-timeout"), // pa can't be ready since deployment times out. timeoutDeploy(deploy(t, "foo", "deploy-timeout", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-timeout", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "deploy-timeout", ImageDigest: "busybox@sha256:deadbeef"}, })), "I timed out!"), image("foo", "deploy-timeout"), }, @@ -367,8 +470,7 @@ func TestReconcile(t *testing.T) { // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the PDE state. MarkProgressDeadlineExceeded("I timed out!"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-timeout", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "deploy-timeout", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, Key: "foo/deploy-timeout", @@ -381,13 +483,11 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "deploy-replica-failure", withK8sServiceName("the-taxman"), WithLogURL, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-replica-failure", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "deploy-replica-failure", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "deploy-replica-failure"), replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-replica-failure", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "deploy-replica-failure", ImageDigest: "busybox@sha256:deadbeef"}, })), "I replica failed!"), image("foo", "deploy-replica-failure"), }, @@ -397,8 +497,7 @@ func TestReconcile(t *testing.T) { // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the FailedCreate state. MarkResourcesUnavailable("FailedCreate", "I replica failed!"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-replica-failure", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "deploy-replica-failure", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, Key: "foo/deploy-replica-failure", @@ -408,14 +507,12 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "pull-backoff", withK8sServiceName("the-taxman"), WithLogURL, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pull-backoff", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pull-backoff", ImageDigest: "busybox@sha256:deadbeef"}, }), MarkActivating("Deploying", "")), pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), timeoutDeploy(deploy(t, "foo", "pull-backoff", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pull-backoff", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pull-backoff", ImageDigest: "busybox@sha256:deadbeef"}, })), "Timed out!"), image("foo", "pull-backoff"), }, @@ -423,8 +520,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "pull-backoff", WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pull-backoff", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pull-backoff", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ @@ -440,14 +536,12 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "pod-error", withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-error", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pod-error", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pod-error"), // PA can't be ready, since no traffic. pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), deploy(t, "foo", "pod-error", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-error", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pod-error", ImageDigest: "busybox@sha256:deadbeef"}, })), image("foo", "pod-error"), }, @@ -455,8 +549,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "pod-error", WithLogURL, AllUnknownConditions, MarkContainerExiting(5, v1.RevisionContainerExitingMessage("I failed man!")), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-error", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pod-error", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, Key: "foo/pod-error", @@ -468,14 +561,12 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "pod-schedule-error", withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-schedule-error", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pod-schedule-error", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), deploy(t, "foo", "pod-schedule-error", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-schedule-error", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pod-schedule-error", ImageDigest: "busybox@sha256:deadbeef"}, })), image("foo", "pod-schedule-error"), }, @@ -483,8 +574,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "pod-schedule-error", WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", "Unschedulable"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-schedule-error", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "pod-schedule-error", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, Key: "foo/pod-schedule-error", @@ -498,13 +588,11 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "steady-ready", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "steady-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), deploy(t, "foo", "steady-ready", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "steady-ready", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "steady-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), image("foo", "steady-ready"), }, @@ -513,8 +601,7 @@ func TestReconcile(t *testing.T) { // All resources are ready to go, we should see the revision being // marked ready MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "steady-ready", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "steady-ready", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, WantEvents: []string{ @@ -527,13 +614,11 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), deploy(t, "foo", "missing-owners", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), image("foo", "missing-owners"), }, @@ -542,8 +627,7 @@ func TestReconcile(t *testing.T) { MarkRevisionReady, // When we're missing the OwnerRef for PodAutoscaler we see this update. MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, WantEvents: []string{ @@ -556,13 +640,11 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), pa("foo", "missing-owners", WithTraffic), noOwner(deploy(t, "foo", "missing-owners", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, }))), image("foo", "missing-owners"), }, @@ -571,8 +653,7 @@ func TestReconcile(t *testing.T) { MarkRevisionReady, // When we're missing the OwnerRef for Deployment we see this update. MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, WantEvents: []string{ @@ -585,15 +666,13 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "image-pull-secrets", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "image-pull-secrets", ImageDigest: "busybox@sha256:deadbeef"}, })), }, WantCreates: []runtime.Object{ pa("foo", "image-pull-secrets"), deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "image-pull-secrets", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "image-pull-secrets", ImageDigest: "busybox@sha256:deadbeef"}, })), "foo-secret"), imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), }, @@ -601,8 +680,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret"), WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "image-pull-secrets", - ImageDigest: "busybox@sha256:deadbeef"}, + {Name: "image-pull-secrets", ImageDigest: "busybox@sha256:deadbeef"}, })), }}, Key: "foo/image-pull-secrets", From 29615e3d4a973264f1a3f00eb7b80e226e35063e Mon Sep 17 00:00:00 2001 From: savitaashture Date: Fri, 15 May 2020 00:49:03 +0530 Subject: [PATCH 09/15] Add testcase for missing scenario --- .../serving/v1alpha1/revision_lifecycle_test.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go b/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go index a35ab74572ac..d42db94a4e5b 100644 --- a/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go +++ b/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go @@ -758,6 +758,22 @@ func TestGetContainer(t *testing.T) { Name: "deprecatedContainer", Image: "foo", }, + }, { + name: "get container info", + status: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "servingContainer", + Image: "firstImage", + }}, + }, + }, + }, + want: &corev1.Container{ + Name: "servingContainer", + Image: "firstImage", + }, }, { name: "get serving container info even if there are multiple containers", status: RevisionSpec{ From 47dafd15e22f6c2c1e0729b10fa8f522c1d1fd6d Mon Sep 17 00:00:00 2001 From: savitaashture Date: Fri, 15 May 2020 10:53:19 +0530 Subject: [PATCH 10/15] reverted back to use DeepCopy for containers --- pkg/reconciler/revision/resources/deploy.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 5b679f31a217..5c82984ed88a 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -135,15 +135,15 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig return podSpec, nil } -// BuildUserContainers makes a container from the Revision template. +// BuildUserContainers makes an array of containers from the Revision template. func BuildUserContainers(rev *v1.Revision) []corev1.Container { containers := make([]corev1.Container, 0, len(rev.Spec.PodSpec.Containers)) for i := range rev.Spec.PodSpec.Containers { var container corev1.Container if len(rev.Spec.PodSpec.Containers[i].Ports) != 0 || len(rev.Spec.PodSpec.Containers) == 1 { - container = makeServingContainer(rev.Spec.PodSpec.Containers[i], rev) + container = makeServingContainer(*rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) } else { - container = makeContainer(rev.Spec.PodSpec.Containers[i], rev) + container = makeContainer(*rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) } if rev.Status.ContainerStatuses != nil { container.Image = rev.Status.ContainerStatuses[i].ImageDigest From 9ab79ea1570d59f8676eb588b7901133d0b2838b Mon Sep 17 00:00:00 2001 From: savitaashture Date: Mon, 18 May 2020 21:34:27 +0530 Subject: [PATCH 11/15] fix image override when digest value is empty --- pkg/reconciler/revision/resources/deploy.go | 5 +- pkg/reconciler/revision/table_test.go | 284 ++++++-------------- 2 files changed, 87 insertions(+), 202 deletions(-) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 5c82984ed88a..9a361141cc77 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -146,7 +146,10 @@ func BuildUserContainers(rev *v1.Revision) []corev1.Container { container = makeContainer(*rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) } if rev.Status.ContainerStatuses != nil { - container.Image = rev.Status.ContainerStatuses[i].ImageDigest + // Override container image with digest value if the ContainerStatuses contains non empty imageDigest value. + if rev.Status.ContainerStatuses[i].ImageDigest != "" { + container.Image = rev.Status.ContainerStatuses[i].ImageDigest + } } containers = append(containers, container) } diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index e71e775118ba..dc1cde528ce1 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -74,24 +74,19 @@ func TestReconcile(t *testing.T) { // We feed in a well formed Revision where none of its sub-resources exist, // and we expect it to create them and initialize the Revision's status. Objects: []runtime.Object{ - rev("foo", "first-reconcile", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "first-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, - })), + rev("foo", "first-reconcile"), }, WantCreates: []runtime.Object{ // The first reconciliation of a Revision creates the following resources. pa("foo", "first-reconcile"), - deploy(t, "foo", "first-reconcile", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "first-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "first-reconcile"), image("foo", "first-reconcile"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "first-reconcile", // The first reconciliation Populates the following status properties. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "first-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, - })), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), + withDefaultContainerStatuses("first-reconcile")), }}, Key: "foo/first-reconcile", }, { @@ -103,24 +98,19 @@ func TestReconcile(t *testing.T) { InduceFailure("update", "revisions"), }, Objects: []runtime.Object{ - rev("foo", "update-status-failure", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "update-status-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + rev("foo", "update-status-failure"), pa("foo", "update-status-failure"), }, WantCreates: []runtime.Object{ // We still see the following creates before the failure is induced. - deploy(t, "foo", "update-status-failure", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "update-status-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "update-status-failure"), image("foo", "update-status-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "update-status-failure", // Despite failure, the following status properties are set. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "update-status-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), + withDefaultContainerStatuses("update-status-failure")), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", @@ -136,25 +126,19 @@ func TestReconcile(t *testing.T) { InduceFailure("create", "podautoscalers"), }, Objects: []runtime.Object{ - rev("foo", "create-pa-failure", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-pa-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + rev("foo", "create-pa-failure"), }, WantCreates: []runtime.Object{ // We still see the following creates before the failure is induced. pa("foo", "create-pa-failure"), - deploy(t, "foo", "create-pa-failure", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-pa-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "create-pa-failure"), image("foo", "create-pa-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "create-pa-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-pa-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkDeploying("Deploying"), withDefaultContainerStatuses("create-pa-failure")), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `failed to create PA "create-pa-failure": inducing failure for create podautoscalers`), @@ -169,24 +153,18 @@ func TestReconcile(t *testing.T) { InduceFailure("create", "deployments"), }, Objects: []runtime.Object{ - rev("foo", "create-user-deploy-failure", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-user-deploy-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + rev("foo", "create-user-deploy-failure"), pa("foo", "create-user-deploy-failure"), }, WantCreates: []runtime.Object{ // We still see the following creates before the failure is induced. - deploy(t, "foo", "create-user-deploy-failure", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-user-deploy-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "create-user-deploy-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "create-user-deploy-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "create-user-deploy-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkDeploying("Deploying"), withDefaultContainerStatuses("create-user-deploy-failure")), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", @@ -201,14 +179,10 @@ func TestReconcile(t *testing.T) { // are necessary. Objects: []runtime.Object{ rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, - WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "stable-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withDefaultContainerStatuses("stable-reconcile")), pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), - deploy(t, "foo", "stable-reconcile", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "stable-reconcile", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "stable-reconcile"), image("foo", "stable-reconcile"), }, // No changes are made to any objects. @@ -219,19 +193,13 @@ func TestReconcile(t *testing.T) { // with our desired spec. Objects: []runtime.Object{ rev("foo", "fix-containers", - WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-containers", ImageDigest: "busybox@sha256:deadbeef"}, - })), + WithLogURL, AllUnknownConditions, withDefaultContainerStatuses("fix-containers")), pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), - changeContainers(deploy(t, "foo", "fix-containers", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-containers", ImageDigest: "busybox@sha256:deadbeef"}, - }))), + changeContainers(deploy(t, "foo", "fix-containers")), image("foo", "fix-containers"), }, WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: deploy(t, "foo", "fix-containers", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-containers", ImageDigest: "busybox@sha256:deadbeef"}, - })), + Object: deploy(t, "foo", "fix-containers"), }}, Key: "foo/fix-containers", }, { @@ -243,19 +211,14 @@ func TestReconcile(t *testing.T) { }, Objects: []runtime.Object{ rev("foo", "failure-update-deploy", - withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "failure-update-deploy", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, + withDefaultContainerStatuses("failure-update-deploy")), pa("foo", "failure-update-deploy"), - changeContainers(deploy(t, "foo", "failure-update-deploy", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "failure-update-deploy", ImageDigest: "busybox@sha256:deadbeef"}, - }))), + changeContainers(deploy(t, "foo", "failure-update-deploy")), image("foo", "failure-update-deploy"), }, WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: deploy(t, "foo", "failure-update-deploy", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "failure-update-deploy", ImageDigest: "busybox@sha256:deadbeef"}, - })), + Object: deploy(t, "foo", "failure-update-deploy"), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", @@ -270,14 +233,11 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "stable-deactivation", WithLogURL, MarkRevisionReady, - MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "stable-deactivation", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkInactive("NoTraffic", "This thing is inactive."), + withDefaultContainerStatuses("stable-deactivation")), pa("foo", "stable-deactivation", WithNoTraffic("NoTraffic", "This thing is inactive.")), - deploy(t, "foo", "stable-deactivation", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "stable-deactivation", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "stable-deactivation"), image("foo", "stable-deactivation"), }, Key: "foo/stable-deactivation", @@ -285,13 +245,9 @@ func TestReconcile(t *testing.T) { Name: "pa is ready", Objects: []runtime.Object{ rev("foo", "pa-ready", - withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-ready", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions), pa("foo", "pa-ready", WithTraffic, WithPAStatusService("new-stuff"), WithReachability(asv1a1.ReachabilityUnknown)), - deploy(t, "foo", "pa-ready", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-ready", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "pa-ready"), image("foo", "pa-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -299,9 +255,7 @@ func TestReconcile(t *testing.T) { WithLogURL, // When the endpoint and pa are ready, then we will see the // Revision become ready. - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-ready", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkRevisionReady, withDefaultContainerStatuses("pa-ready")), }}, WantEvents: []string{ Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), @@ -313,22 +267,16 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "pa-not-ready", withK8sServiceName("somebody-told-me"), WithLogURL, - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-not-ready", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkRevisionReady), pa("foo", "pa-not-ready", WithPAStatusService("its-not-confidential"), WithBufferedTraffic("Something", "This is something longer")), - readyDeploy(deploy(t, "foo", "pa-not-ready", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-not-ready", ImageDigest: "busybox@sha256:deadbeef"}, - }))), + readyDeploy(deploy(t, "foo", "pa-not-ready")), image("foo", "pa-not-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-not-ready", - WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-not-ready", ImageDigest: "busybox@sha256:deadbeef"}, - }), + WithLogURL, MarkRevisionReady, withDefaultContainerStatuses("pa-not-ready"), withK8sServiceName("its-not-confidential"), // When we reconcile a ready state and our pa is in an activating // state, we should see the following mutation. @@ -341,21 +289,15 @@ func TestReconcile(t *testing.T) { // Test propagating the inactivity signal from the pa to the Revision. Objects: []runtime.Object{ rev("foo", "pa-inactive", - withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive.")), - readyDeploy(deploy(t, "foo", "pa-inactive", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, - }))), + readyDeploy(deploy(t, "foo", "pa-inactive")), image("foo", "pa-inactive"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-inactive", - WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, - }), + WithLogURL, MarkRevisionReady, withDefaultContainerStatuses("pa-inactive"), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. MarkInactive("NoTraffic", "This thing is inactive.")), @@ -366,16 +308,12 @@ func TestReconcile(t *testing.T) { // Test propagating the inactivity signal from the pa to the Revision. // But propagate the service name. Objects: []runtime.Object{ - rev("foo", "pa-inactive", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, - }), + rev("foo", "pa-inactive", withK8sServiceName("here-comes-the-sun"), WithLogURL, MarkRevisionReady), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive."), WithPAStatusService("pa-inactive-svc")), - readyDeploy(deploy(t, "foo", "pa-inactive", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, - }))), + readyDeploy(deploy(t, "foo", "pa-inactive")), image("foo", "pa-inactive"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -384,9 +322,8 @@ func TestReconcile(t *testing.T) { withK8sServiceName("pa-inactive-svc"), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. - MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pa-inactive", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkInactive("NoTraffic", "This thing is inactive."), + withDefaultContainerStatuses("pa-inactive")), }}, Key: "foo/pa-inactive", }, { @@ -396,14 +333,10 @@ func TestReconcile(t *testing.T) { // Protocol type is the only thing that can be changed on PA Objects: []runtime.Object{ rev("foo", "fix-mutated-pa", - withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady), pa("foo", "fix-mutated-pa", WithProtocolType(networking.ProtocolH2C), WithTraffic, WithPAStatusService("fix-mutated-pa")), - deploy(t, "foo", "fix-mutated-pa", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "fix-mutated-pa"), image("foo", "fix-mutated-pa"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -411,9 +344,8 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When our reconciliation has to change the service // we should see the following mutations to status. - withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, + withDefaultContainerStatuses("fix-mutated-pa")), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: pa("foo", "fix-mutated-pa", WithTraffic, @@ -426,13 +358,9 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "fix-mutated-pa-fail", withK8sServiceName("some-old-stuff"), - WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa-fail", ImageDigest: "busybox@sha256:deadbeef"}, - })), + WithLogURL, AllUnknownConditions, withDefaultContainerStatuses("fix-mutated-pa-fail")), pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), - deploy(t, "foo", "fix-mutated-pa-fail", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "fix-mutated-pa-fail", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "fix-mutated-pa-fail"), image("foo", "fix-mutated-pa-fail"), }, WantErr: true, @@ -455,13 +383,9 @@ func TestReconcile(t *testing.T) { // status of the Revision. Objects: []runtime.Object{ rev("foo", "deploy-timeout", - withK8sServiceName("the-taxman"), WithLogURL, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-timeout", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withK8sServiceName("the-taxman"), WithLogURL, MarkActive), pa("foo", "deploy-timeout"), // pa can't be ready since deployment times out. - timeoutDeploy(deploy(t, "foo", "deploy-timeout", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-timeout", ImageDigest: "busybox@sha256:deadbeef"}, - })), "I timed out!"), + timeoutDeploy(deploy(t, "foo", "deploy-timeout"), "I timed out!"), image("foo", "deploy-timeout"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -469,9 +393,7 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the PDE state. - MarkProgressDeadlineExceeded("I timed out!"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-timeout", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkProgressDeadlineExceeded("I timed out!"), withDefaultContainerStatuses("deploy-timeout")), }}, Key: "foo/deploy-timeout", }, { @@ -482,13 +404,9 @@ func TestReconcile(t *testing.T) { // It then verifies that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ rev("foo", "deploy-replica-failure", - withK8sServiceName("the-taxman"), WithLogURL, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-replica-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withK8sServiceName("the-taxman"), WithLogURL, MarkActive), pa("foo", "deploy-replica-failure"), - replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-replica-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), "I replica failed!"), + replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure"), "I replica failed!"), image("foo", "deploy-replica-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ @@ -496,9 +414,8 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the FailedCreate state. - MarkResourcesUnavailable("FailedCreate", "I replica failed!"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "deploy-replica-failure", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkResourcesUnavailable("FailedCreate", "I replica failed!"), + withDefaultContainerStatuses("deploy-replica-failure")), }}, Key: "foo/deploy-replica-failure", }, { @@ -506,22 +423,16 @@ func TestReconcile(t *testing.T) { // Test the propagation of ImagePullBackoff from user container. Objects: []runtime.Object{ rev("foo", "pull-backoff", - withK8sServiceName("the-taxman"), WithLogURL, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pull-backoff", ImageDigest: "busybox@sha256:deadbeef"}, - }), MarkActivating("Deploying", "")), + withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", "")), pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), - timeoutDeploy(deploy(t, "foo", "pull-backoff", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pull-backoff", ImageDigest: "busybox@sha256:deadbeef"}, - })), "Timed out!"), + timeoutDeploy(deploy(t, "foo", "pull-backoff"), "Timed out!"), image("foo", "pull-backoff"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pull-backoff", WithLogURL, AllUnknownConditions, - MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pull-backoff", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), withDefaultContainerStatuses("pull-backoff")), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), @@ -535,22 +446,16 @@ func TestReconcile(t *testing.T) { // that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ rev("foo", "pod-error", - withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-error", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), pa("foo", "pod-error"), // PA can't be ready, since no traffic. pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), - deploy(t, "foo", "pod-error", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-error", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "pod-error"), image("foo", "pod-error"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pod-error", WithLogURL, AllUnknownConditions, MarkContainerExiting(5, - v1.RevisionContainerExitingMessage("I failed man!")), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-error", ImageDigest: "busybox@sha256:deadbeef"}, - })), + v1.RevisionContainerExitingMessage("I failed man!")), withDefaultContainerStatuses("pod-error")), }}, Key: "foo/pod-error", }, { @@ -560,22 +465,16 @@ func TestReconcile(t *testing.T) { // that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ rev("foo", "pod-schedule-error", - withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-schedule-error", ImageDigest: "busybox@sha256:deadbeef"}, - })), + withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), - deploy(t, "foo", "pod-schedule-error", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-schedule-error", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "pod-schedule-error"), image("foo", "pod-schedule-error"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pod-schedule-error", WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", - "Unschedulable"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "pod-schedule-error", ImageDigest: "busybox@sha256:deadbeef"}, - })), + "Unschedulable"), withDefaultContainerStatuses("pod-schedule-error")), }}, Key: "foo/pod-schedule-error", }, { @@ -586,23 +485,16 @@ func TestReconcile(t *testing.T) { // Revision. It then creates an Endpoints resource with active subsets. // This signal should make our Reconcile mark the Revision as Ready. Objects: []runtime.Object{ - rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL, - WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "steady-ready", ImageDigest: "busybox@sha256:deadbeef"}, - })), + rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), - deploy(t, "foo", "steady-ready", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "steady-ready", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "steady-ready"), image("foo", "steady-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, // All resources are ready to go, we should see the revision being // marked ready - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "steady-ready", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkRevisionReady, withDefaultContainerStatuses("steady-ready")), }}, WantEvents: []string{ Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), @@ -613,22 +505,16 @@ func TestReconcile(t *testing.T) { WantErr: true, Objects: []runtime.Object{ rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkRevisionReady), pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), - deploy(t, "foo", "missing-owners", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, - })), + deploy(t, "foo", "missing-owners"), image("foo", "missing-owners"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, MarkRevisionReady, // When we're missing the OwnerRef for PodAutoscaler we see this update. - MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkResourceNotOwned("PodAutoscaler", "missing-owners"), withDefaultContainerStatuses("missing-owners")), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), @@ -639,22 +525,16 @@ func TestReconcile(t *testing.T) { WantErr: true, Objects: []runtime.Object{ rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkRevisionReady), pa("foo", "missing-owners", WithTraffic), - noOwner(deploy(t, "foo", "missing-owners", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, - }))), + noOwner(deploy(t, "foo", "missing-owners")), image("foo", "missing-owners"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, MarkRevisionReady, // When we're missing the OwnerRef for Deployment we see this update. - MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "missing-owners", ImageDigest: "busybox@sha256:deadbeef"}, - })), + MarkResourceNotOwned("Deployment", "missing-owners-deployment"), withDefaultContainerStatuses("missing-owners")), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), @@ -664,24 +544,17 @@ func TestReconcile(t *testing.T) { Name: "image pull secrets", // This test case tests that the image pull secrets from revision propagate to deployment and image Objects: []runtime.Object{ - rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret"), - WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "image-pull-secrets", ImageDigest: "busybox@sha256:deadbeef"}, - })), + rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret")), }, WantCreates: []runtime.Object{ pa("foo", "image-pull-secrets"), - deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets", WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "image-pull-secrets", ImageDigest: "busybox@sha256:deadbeef"}, - })), "foo-secret"), + deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets"), "foo-secret"), imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret"), - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "image-pull-secrets", ImageDigest: "busybox@sha256:deadbeef"}, - })), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withDefaultContainerStatuses("image-pull-secrets")), }}, Key: "foo/image-pull-secrets", }} @@ -790,6 +663,15 @@ func withK8sServiceName(sn string) RevisionOption { } } +func withDefaultContainerStatuses(name string) RevisionOption { + return func(r *v1.Revision) { + r.Status.ContainerStatuses = []v1.ContainerStatuses{{ + Name: name, + ImageDigest: "", + }} + } +} + // TODO(mattmoor): Come up with a better name for this. func AllUnknownConditions(r *v1.Revision) { WithInitRevConditions(r) @@ -835,7 +717,7 @@ func image(namespace, name string, co ...configOption) *caching.Image { opt(config) } - return resources.MakeImageCache(rev(namespace, name), name, "busybox@sha256:deadbeef") + return resources.MakeImageCache(rev(namespace, name), name, "") } func pa(namespace, name string, ko ...PodAutoscalerOption) *asv1a1.PodAutoscaler { From a3b505924bb9053ab29d1ed0b41873804d647af2 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Wed, 20 May 2020 02:14:19 +0530 Subject: [PATCH 12/15] update comments --- pkg/reconciler/revision/resources/deploy.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 9a361141cc77..5ac3f4447293 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -145,8 +145,10 @@ func BuildUserContainers(rev *v1.Revision) []corev1.Container { } else { container = makeContainer(*rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) } + // The below logic usually mislead because of the operation on status during creation + // But as all images will be resolved to digests before calling this function + // so rev.Status.ContainerStatuses can be used to override the container images. if rev.Status.ContainerStatuses != nil { - // Override container image with digest value if the ContainerStatuses contains non empty imageDigest value. if rev.Status.ContainerStatuses[i].ImageDigest != "" { container.Image = rev.Status.ContainerStatuses[i].ImageDigest } From 71ae1e14551ed4cccbac0d81d134c359a9806685 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Wed, 20 May 2020 10:23:16 +0530 Subject: [PATCH 13/15] Add len check instead of nil --- pkg/reconciler/revision/resources/deploy.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 5ac3f4447293..2ea2c26033e3 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -145,10 +145,10 @@ func BuildUserContainers(rev *v1.Revision) []corev1.Container { } else { container = makeContainer(*rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) } - // The below logic usually mislead because of the operation on status during creation - // But as all images will be resolved to digests before calling this function - // so rev.Status.ContainerStatuses can be used to override the container images. - if rev.Status.ContainerStatuses != nil { + // The below logic is safe because the image digests in Status.ContainerStatus will have been resolved + // before this method is called. We check for an empty array here because the method can also be + // called during DryRun, where ContainerStatuses will not yet have been resolved. + if len(rev.Status.ContainerStatuses) != 0 { if rev.Status.ContainerStatuses[i].ImageDigest != "" { container.Image = rev.Status.ContainerStatuses[i].ImageDigest } From e153672705d271b4bce576b082c0b4c8291d3b29 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Sun, 31 May 2020 10:01:25 +0530 Subject: [PATCH 14/15] addressed review comments --- pkg/reconciler/revision/table_test.go | 50 +++++++++++++-------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index dc1cde528ce1..f128bf703903 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -86,7 +86,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "first-reconcile", // The first reconciliation Populates the following status properties. WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), - withDefaultContainerStatuses("first-reconcile")), + withDefaultContainerStatuses()), }}, Key: "foo/first-reconcile", }, { @@ -110,7 +110,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "update-status-failure", // Despite failure, the following status properties are set. WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), - withDefaultContainerStatuses("update-status-failure")), + withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", @@ -138,7 +138,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "create-pa-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), withDefaultContainerStatuses("create-pa-failure")), + MarkDeploying("Deploying"), withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `failed to create PA "create-pa-failure": inducing failure for create podautoscalers`), @@ -164,7 +164,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "create-user-deploy-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), withDefaultContainerStatuses("create-user-deploy-failure")), + MarkDeploying("Deploying"), withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", @@ -179,7 +179,7 @@ func TestReconcile(t *testing.T) { // are necessary. Objects: []runtime.Object{ rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, - withDefaultContainerStatuses("stable-reconcile")), + withDefaultContainerStatuses()), pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "stable-reconcile"), @@ -193,7 +193,7 @@ func TestReconcile(t *testing.T) { // with our desired spec. Objects: []runtime.Object{ rev("foo", "fix-containers", - WithLogURL, AllUnknownConditions, withDefaultContainerStatuses("fix-containers")), + WithLogURL, AllUnknownConditions, withDefaultContainerStatuses()), pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), changeContainers(deploy(t, "foo", "fix-containers")), image("foo", "fix-containers"), @@ -212,7 +212,7 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "failure-update-deploy", withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, - withDefaultContainerStatuses("failure-update-deploy")), + withDefaultContainerStatuses()), pa("foo", "failure-update-deploy"), changeContainers(deploy(t, "foo", "failure-update-deploy")), image("foo", "failure-update-deploy"), @@ -234,7 +234,7 @@ func TestReconcile(t *testing.T) { rev("foo", "stable-deactivation", WithLogURL, MarkRevisionReady, MarkInactive("NoTraffic", "This thing is inactive."), - withDefaultContainerStatuses("stable-deactivation")), + withDefaultContainerStatuses()), pa("foo", "stable-deactivation", WithNoTraffic("NoTraffic", "This thing is inactive.")), deploy(t, "foo", "stable-deactivation"), @@ -255,7 +255,7 @@ func TestReconcile(t *testing.T) { WithLogURL, // When the endpoint and pa are ready, then we will see the // Revision become ready. - MarkRevisionReady, withDefaultContainerStatuses("pa-ready")), + MarkRevisionReady, withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), @@ -276,7 +276,7 @@ func TestReconcile(t *testing.T) { }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-not-ready", - WithLogURL, MarkRevisionReady, withDefaultContainerStatuses("pa-not-ready"), + WithLogURL, MarkRevisionReady, withDefaultContainerStatuses(), withK8sServiceName("its-not-confidential"), // When we reconcile a ready state and our pa is in an activating // state, we should see the following mutation. @@ -297,7 +297,7 @@ func TestReconcile(t *testing.T) { }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-inactive", - WithLogURL, MarkRevisionReady, withDefaultContainerStatuses("pa-inactive"), + WithLogURL, MarkRevisionReady, withDefaultContainerStatuses(), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. MarkInactive("NoTraffic", "This thing is inactive.")), @@ -323,7 +323,7 @@ func TestReconcile(t *testing.T) { // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. MarkInactive("NoTraffic", "This thing is inactive."), - withDefaultContainerStatuses("pa-inactive")), + withDefaultContainerStatuses()), }}, Key: "foo/pa-inactive", }, { @@ -345,7 +345,7 @@ func TestReconcile(t *testing.T) { // When our reconciliation has to change the service // we should see the following mutations to status. withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, - withDefaultContainerStatuses("fix-mutated-pa")), + withDefaultContainerStatuses()), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: pa("foo", "fix-mutated-pa", WithTraffic, @@ -358,7 +358,7 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "fix-mutated-pa-fail", withK8sServiceName("some-old-stuff"), - WithLogURL, AllUnknownConditions, withDefaultContainerStatuses("fix-mutated-pa-fail")), + WithLogURL, AllUnknownConditions, withDefaultContainerStatuses()), pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "fix-mutated-pa-fail"), image("foo", "fix-mutated-pa-fail"), @@ -393,7 +393,7 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the PDE state. - MarkProgressDeadlineExceeded("I timed out!"), withDefaultContainerStatuses("deploy-timeout")), + MarkProgressDeadlineExceeded("I timed out!"), withDefaultContainerStatuses()), }}, Key: "foo/deploy-timeout", }, { @@ -415,7 +415,7 @@ func TestReconcile(t *testing.T) { // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the FailedCreate state. MarkResourcesUnavailable("FailedCreate", "I replica failed!"), - withDefaultContainerStatuses("deploy-replica-failure")), + withDefaultContainerStatuses()), }}, Key: "foo/deploy-replica-failure", }, { @@ -432,7 +432,7 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pull-backoff", WithLogURL, AllUnknownConditions, - MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), withDefaultContainerStatuses("pull-backoff")), + MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), withDefaultContainerStatuses()), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), @@ -455,7 +455,7 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pod-error", WithLogURL, AllUnknownConditions, MarkContainerExiting(5, - v1.RevisionContainerExitingMessage("I failed man!")), withDefaultContainerStatuses("pod-error")), + v1.RevisionContainerExitingMessage("I failed man!")), withDefaultContainerStatuses()), }}, Key: "foo/pod-error", }, { @@ -474,7 +474,7 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pod-schedule-error", WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", - "Unschedulable"), withDefaultContainerStatuses("pod-schedule-error")), + "Unschedulable"), withDefaultContainerStatuses()), }}, Key: "foo/pod-schedule-error", }, { @@ -494,7 +494,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, // All resources are ready to go, we should see the revision being // marked ready - MarkRevisionReady, withDefaultContainerStatuses("steady-ready")), + MarkRevisionReady, withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), @@ -514,7 +514,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, MarkRevisionReady, // When we're missing the OwnerRef for PodAutoscaler we see this update. - MarkResourceNotOwned("PodAutoscaler", "missing-owners"), withDefaultContainerStatuses("missing-owners")), + MarkResourceNotOwned("PodAutoscaler", "missing-owners"), withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), @@ -534,7 +534,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, MarkRevisionReady, // When we're missing the OwnerRef for Deployment we see this update. - MarkResourceNotOwned("Deployment", "missing-owners-deployment"), withDefaultContainerStatuses("missing-owners")), + MarkResourceNotOwned("Deployment", "missing-owners-deployment"), withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), @@ -554,7 +554,7 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret"), - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withDefaultContainerStatuses("image-pull-secrets")), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withDefaultContainerStatuses()), }}, Key: "foo/image-pull-secrets", }} @@ -663,10 +663,10 @@ func withK8sServiceName(sn string) RevisionOption { } } -func withDefaultContainerStatuses(name string) RevisionOption { +func withDefaultContainerStatuses() RevisionOption { return func(r *v1.Revision) { r.Status.ContainerStatuses = []v1.ContainerStatuses{{ - Name: name, + Name: r.Name, ImageDigest: "", }} } From 85d51c1308ff3af1ea56690eb97ada37b0def194 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Wed, 3 Jun 2020 16:54:14 +0530 Subject: [PATCH 15/15] Fix merge conflict --- .../revision/resources/deploy_test.go | 81 +++------------ .../revision/resources/queue_test.go | 98 +++++-------------- 2 files changed, 35 insertions(+), 144 deletions(-) diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index 57b9d7accef3..aa007881f300 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -657,9 +657,6 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with tcp readiness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), rev: revision("bar", "foo", withContainers([]corev1.Container{{ Name: servingContainerName, @@ -1053,13 +1050,8 @@ func TestMakeDeployment(t *testing.T) { ImageDigest: "busybox@sha256:deadbeef", }, { ImageDigest: "ubuntu@sha256:deadbffe", - }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + }})), want: appsv1deployment(), - ), - want: makeDeployment(), }, { name: "with owner", rev: revision("bar", "foo", @@ -1072,22 +1064,10 @@ func TestMakeDeployment(t *testing.T) { withOwnerReference("parent-config"), WithContainerStatuses([]v1.ContainerStatuses{{ ImageDigest: "busybox@sha256:deadbeef", - }}), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + }})), want: appsv1deployment(), - ), - want: makeDeployment(), }, { name: "with sidecar annotation override", - rev: revision("bar", "foo", withoutLabels, func(revision *v1.Revision) { - revision.ObjectMeta.Annotations = map[string]string{ - sidecarIstioInjectAnnotation: "false", - } - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), - want: appsv1deployment(func(deploy *appsv1.Deployment) { rev: revision("bar", "foo", withContainers([]corev1.Container{{ Name: servingContainerName, @@ -1102,7 +1082,7 @@ func TestMakeDeployment(t *testing.T) { sidecarIstioInjectAnnotation: "false", } }), - want: makeDeployment(func(deploy *appsv1.Deployment) { + want: appsv1deployment(func(deploy *appsv1.Deployment) { deploy.Annotations[sidecarIstioInjectAnnotation] = "false" deploy.Spec.Template.Annotations[sidecarIstioInjectAnnotation] = "false" }), @@ -1111,9 +1091,15 @@ func TestMakeDeployment(t *testing.T) { dc: deployment.Config{ ProgressDeadline: 42 * time.Second, }, - rev: revision("bar", "foo", withoutLabels, func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "ubuntu", + ReadinessProbe: withTCPReadinessProbe(12345), + }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), withoutLabels), want: appsv1deployment(func(deploy *appsv1.Deployment) { deploy.Spec.ProgressDeadlineSeconds = ptr.Int32(42) }), @@ -1139,46 +1125,3 @@ func TestMakeDeployment(t *testing.T) { }) } } - -func TestProgressDeadlineOverride(t *testing.T) { - rev := revision("bar", "foo", - withContainers([]corev1.Container{{ - Name: servingContainerName, - Image: "busybox", - Ports: []corev1.ContainerPort{{ - ContainerPort: 8888, - }}, - ReadinessProbe: withTCPReadinessProbe(12345), - }, { - Name: sidecarContainerName, - Image: "ubuntu", - }}), - withoutLabels, - WithContainerStatuses([]v1.ContainerStatuses{{ - ImageDigest: "busybox@sha256:deadbeef", - }, { - ImageDigest: "ubuntu@sha256:deadbffe", - }}), - ) - want := makeDeployment(func(d *appsv1.Deployment) { - d.Spec.ProgressDeadlineSeconds = ptr.Int32(42) - }) - - dc := &deployment.Config{ - ProgressDeadline: 42 * time.Second, - } - podSpec, err := makePodSpec(rev, &logConfig, &traceConfig, &obsConfig, &asConfig, dc) - if err != nil { - t.Fatal("makePodSpec returned error:", err) - } - want.Spec.Template.Spec = *podSpec - got, err := MakeDeployment(rev, &logConfig, &traceConfig, - &network.Config{}, &obsConfig, &asConfig, dc) - if err != nil { - t.Fatal("MakeDeployment returned error:", err) - } - if !cmp.Equal(want, got, cmp.AllowUnexported(resource.Quantity{})) { - t.Error("MakeDeployment (-want, +got) =", cmp.Diff(want, got, cmp.AllowUnexported(resource.Quantity{}))) - } -} ->>>>>>> Add multi container changes to deploy PodSpec diff --git a/pkg/reconciler/revision/resources/queue_test.go b/pkg/reconciler/revision/resources/queue_test.go index 2a5a674492a2..1a1a8cdd6788 100644 --- a/pkg/reconciler/revision/resources/queue_test.go +++ b/pkg/reconciler/revision/resources/queue_test.go @@ -56,6 +56,12 @@ var ( }, } + containers = []corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }} + // The default CM values. logConfig logging.Config traceConfig tracingconfig.Config @@ -77,14 +83,8 @@ func TestMakeQueueContainer(t *testing.T) { }{{ name: "no owner no autoscaler single", rev: revision("bar", "foo", - withContainers([]corev1.Container{{ - ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), - }}), - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), - ), + withContainers(containers), + withContainerConcurrency(1)), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ "CONTAINER_CONCURRENCY": "1", @@ -101,20 +101,7 @@ func TestMakeQueueContainer(t *testing.T) { Name: string(networking.ProtocolH2C), }}, }}), - withContainerConcurrency(1), - func(revision *v1.Revision) { - revision.Spec.PodSpec = corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: servingContainerName, - ReadinessProbe: testProbe, - Ports: []corev1.ContainerPort{{ - ContainerPort: 1955, - Name: string(networking.ProtocolH2C), - }}, - }}, - } - }), - ), + withContainerConcurrency(1)), cc: deployment.Config{ QueueSidecarImage: "alpine", }, @@ -130,9 +117,7 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "service name in labels", rev: revision("bar", "foo", - withContainers([]corev1.Container{{ - ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), - }}), + withContainers(containers), withContainerConcurrency(1), func(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{ @@ -147,9 +132,7 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "config owner as env var, zero concurrency", rev: revision("blah", "baz", - withContainers([]corev1.Container{{ - ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), - }}), + withContainers(containers), withContainerConcurrency(0), func(revision *v1.Revision) { revision.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ @@ -171,14 +154,8 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "logging configuration as env var", rev: revision("this", "log", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), - withContainers([]corev1.Container{{ - ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), - }}), - withContainerConcurrency(0)), + withContainers(containers), + withContainerConcurrency(1)), lc: logging.Config{ LoggingConfig: "The logging configuration goes here", LoggingLevel: map[string]zapcore.Level{ @@ -196,13 +173,7 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "container concurrency 10", rev: revision("bar", "foo", - withContainerConcurrency(10), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), - withContainers([]corev1.Container{{ - ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), - }}), + withContainers(containers), withContainerConcurrency(10)), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ @@ -212,14 +183,8 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "request log configuration as env var", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), - withContainers([]corev1.Container{{ - ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), - }}), - withContainerConcurrency(0)), + withContainers(containers), + withContainerConcurrency(1)), oc: metrics.ObservabilityConfig{ RequestLogTemplate: "test template", EnableProbeRequestLog: true, @@ -233,14 +198,8 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "request metrics backend as env var", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), - withContainers([]corev1.Container{{ - ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), - }}), - withContainerConcurrency(0)), + withContainers(containers), + withContainerConcurrency(1)), oc: metrics.ObservabilityConfig{ RequestMetricsBackend: "prometheus", }, @@ -252,16 +211,8 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "enable profiling", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), -======= - withContainers([]corev1.Container{{ - ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), - }}), - withContainerConcurrency(0)), ->>>>>>> Refactored testcase + withContainers(containers), + withContainerConcurrency(1)), oc: metrics.ObservabilityConfig{EnableProfiling: true}, want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ @@ -272,14 +223,11 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "custom TimeoutSeconds", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - revision.Spec.TimeoutSeconds = ptr.Int64(99) - }), + withContainers(containers), + withContainerConcurrency(1)), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ - "REVISION_TIMEOUT_SECONDS": "99", + "REVISION_TIMEOUT_SECONDS": "45", }) }), }}