Skip to content

Commit

Permalink
Ensure kaniko e2e test pushes image to registry
Browse files Browse the repository at this point in the history
Adds `github.com/google/go-containerregistry` as a dependency for
accessing container registries

Adds `KANIKO_SECRET_CONFIG_FILE` for adding a service account to the
kaniko task when running e2e test locally to ensure kaniko is able
to push to a gcr.io registry.. This is necessary for running the e2e
tests locally unless the kubernetes nodes provide another way of
authenticating to the registry, such as provisioning them with a
storage admin scope.

Once tektoncd#151 is in place, we will be able to update this code to create
a service account and use that in the BuildSpec.

Fixes tektoncd#150
  • Loading branch information
Tanner Bruce committed Oct 16, 2018
1 parent fd9fb0e commit d987d1e
Show file tree
Hide file tree
Showing 54 changed files with 4,606 additions and 27 deletions.
20 changes: 20 additions & 0 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

30 changes: 30 additions & 0 deletions test/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,36 @@ pipelineRunsInformer.Informer().GetIndexer().Add(obj)
## Integration tests
### Setup
As well as requiring the environment variable `KO_DOCKER_REPO` variable, you may also
require authentication inside the Build to run the Kaniko e2e test. If so, setting
`KANIKO_SECRET_CONFIG_FILE` to be a path to a GCP service account JSON key which has
permissions to push to the registry specified in `KO_DOCKER_REPO` will enable Kaniko
to use those credentials when pushing.
To quickly create a service account usable for the e2e tests:
```shell
PROJECT=your-gcp-project
ACCOUNT_NAME=service-account-name
gcloud config set project $PROJECT
# create the service account
gcloud iam service-accounts create $ACCOUNT_NAME --display-name $ACCOUNT_NAME
EMAIL=$(gcloud iam service-accounts list | grep $ACCOUNT_NAME | awk '{print $2}')
# add the storage.admin policy to the account so it can push containers
gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:$EMAIL --role roles/storage.admin
# create the JSON key
gcloud iam service-accounts keys create config.json --iam-account $EMAIL
export KANIKO_SECRET_CONFIG_FILE="$PWD/config.json"
```
### Running
Integration tests live in this directory. To run these tests, you must provide `go` with
`-tags=e2e`. By default the tests run agains your current kubeconfig context,
but you can change that and other settings with [the flags](#flags):
Expand Down
171 changes: 144 additions & 27 deletions test/kaniko_task_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,17 @@ limitations under the License.
package test

import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"strings"
"testing"
"time"

"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1"
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
knativetest "github.com/knative/pkg/test"
Expand All @@ -51,7 +53,7 @@ func getGitResource(namespace string) *v1alpha1.PipelineResource {
Spec: v1alpha1.PipelineResourceSpec{
Type: v1alpha1.PipelineResourceTypeGit,
Params: []v1alpha1.Param{
v1alpha1.Param{
{
Name: "Url",
Value: "https://github.com/pivotal-nader-ziada/gohelloworld",
},
Expand All @@ -60,41 +62,96 @@ func getGitResource(namespace string) *v1alpha1.PipelineResource {
}
}

func getTask(namespace string, t *testing.T) *v1alpha1.Task {
func getDockerRepo() (string, error) {
// according to knative/test-infra readme (https://github.com/knative/test-infra/blob/13055d769cc5e1756e605fcb3bcc1c25376699f1/scripts/README.md)
// the KO_DOCKER_REPO will be set with according to the porject where the cluster is created
// it is used here to dunamically get the docker registery to push the image to
// the KO_DOCKER_REPO will be set with according to the project where the cluster is created
// it is used here to dynamically get the docker registry to push the image to
dockerRepo := os.Getenv("KO_DOCKER_REPO")
if dockerRepo == "" {
t.Fatalf("KO_DOCKER_REPO env variable is required")
return "", fmt.Errorf("KO_DOCKER_REPO env variable is required")
}
return fmt.Sprintf("%s/kanikotasktest", dockerRepo), nil
}

func createSecret(c *knativetest.KubeClient, namespace string) (bool, error) {
// when running e2e in cluster, this will not be set so just hop out early
file := os.Getenv("KANIKO_SECRET_CONFIG_FILE")
if file == "" {
return false, nil
}

return &v1alpha1.Task{
sec := &corev1.Secret{}
sec.Name = "kaniko-secret"
sec.Namespace = namespace

bs, err := ioutil.ReadFile(file)
if err != nil {
return false, fmt.Errorf("couldn't read kaniko secret json: %v", err)
}

sec.Data = map[string][]byte{
"config.json": bs,
}
_, err = c.Kube.CoreV1().Secrets(namespace).Create(sec)
return true, err
}

func getTask(repo, namespace string, withSecretConfig bool) *v1alpha1.Task {
task := &v1alpha1.Task{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: kanikoTaskName,
},
Spec: v1alpha1.TaskSpec{
Inputs: &v1alpha1.Inputs{
Resources: []v1alpha1.TaskResource{
v1alpha1.TaskResource{
{
Name: kanikoResourceName,
Type: v1alpha1.PipelineResourceTypeGit,
},
},
},
BuildSpec: &buildv1alpha1.BuildSpec{
Timeout: &metav1.Duration{Duration: 2 * time.Minute},
Steps: []corev1.Container{{
Name: "kaniko",
Image: "gcr.io/kaniko-project/executor",
Args: []string{"--dockerfile=/workspace/Dockerfile",
fmt.Sprintf("--destination=%s/kanikotasktest", dockerRepo),
},
}},
},
},
}

step := corev1.Container{
Name: "kaniko",
Image: "gcr.io/kaniko-project/executor",
Args: []string{"--dockerfile=/workspace/Dockerfile",
fmt.Sprintf("--destination=%s", repo),
},
}
if withSecretConfig {
step.VolumeMounts = []corev1.VolumeMount{
{
Name: "kaniko-secret",
MountPath: "/secrets",
},
}
step.Env = []corev1.EnvVar{
{
Name: "GOOGLE_APPLICATION_CREDENTIALS",
Value: "/secrets/config.json",
},
}
task.Spec.BuildSpec.Volumes = []corev1.Volume{
{
Name: "kaniko-secret",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "kaniko-secret",
},
},
},
}
}

task.Spec.BuildSpec.Steps = []corev1.Container{step}

return task
}

func getTaskRun(namespace string) *v1alpha1.TaskRun {
Expand All @@ -114,7 +171,7 @@ func getTaskRun(namespace string) *v1alpha1.TaskRun {
},
Inputs: v1alpha1.TaskRunInputs{
Resources: []v1alpha1.PipelineResourceVersion{
v1alpha1.PipelineResourceVersion{
{
ResourceRef: v1alpha1.PipelineResourceRef{
Name: kanikoResourceName,
},
Expand All @@ -131,16 +188,26 @@ func TestKanikoTaskRun(t *testing.T) {
logger := logging.GetContextLogger(t.Name())
c, namespace := setup(t, logger)

repo, err := getDockerRepo()
if err != nil {
t.Errorf("Expected to get docker repo")
}

knativetest.CleanupOnInterrupt(func() { tearDown(logger, c.KubeClient, namespace) }, logger)
defer tearDown(logger, c.KubeClient, namespace)

hasSecretConfig, err := createSecret(c.KubeClient, namespace)
if err != nil {
t.Fatalf("Expected to create kaniko creds: %v", err)
}

logger.Infof("Creating Git PipelineResource %s", kanikoResourceName)
if _, err := c.PipelineResourceClient.Create(getGitResource(namespace)); err != nil {
t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoResourceName, err)
}

logger.Infof("Creating Task %s", kanikoTaskName)
if _, err := c.TaskClient.Create(getTask(namespace, t)); err != nil {
if _, err := c.TaskClient.Create(getTask(repo, namespace, hasSecretConfig)); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", kanikoTaskName, err)
}

Expand Down Expand Up @@ -175,19 +242,69 @@ func TestKanikoTaskRun(t *testing.T) {
}
podName := cluster.PodName
pods := c.KubeClient.Kube.CoreV1().Pods(namespace)
t.Logf("Retrieved pods for podname %s: %s\n", podName, pods)
logger.Infof("Retrieved pods for podname %s: %s\n", podName, pods)

req := pods.GetLogs(podName, &corev1.PodLogOptions{})
// get the logs for the kaniko step
req := pods.GetLogs(podName, &corev1.PodLogOptions{Container: "build-step-kaniko"})
readCloser, err := req.Stream()
if err != nil {
t.Fatalf("Failed to open stream to read: %v", err)
}
defer readCloser.Close()
var buf bytes.Buffer
out := bufio.NewWriter(&buf)
_, err = io.Copy(out, readCloser)
if !strings.Contains(buf.String(), kanikoBuildOutput) {
t.Fatalf("Expected output %s from pod %s but got %s", kanikoBuildOutput, podName, buf.String())
bs, err := ioutil.ReadAll(readCloser)
readCloser.Close()
if err != nil {
t.Fatalf("Failed to read build-step-kaniko log stream: %v", err)
}
initLogs := string(bs)

// get the pods' logs
req = pods.GetLogs(podName, &corev1.PodLogOptions{})
readCloser, err = req.Stream()
if err != nil {
t.Fatalf("Failed to open stream to read: %v", err)
}
bs, err = ioutil.ReadAll(readCloser)
readCloser.Close()
if err != nil {
t.Fatalf("Failed to read nop container log stream: %v", err)
}
podLogs := string(bs)

// check the logs match what we expect
if !strings.Contains(podLogs, kanikoBuildOutput) {
t.Fatalf("Expected output %s from pod %s but got %s", kanikoBuildOutput, podName, string(bs))
}
// make sure the pushed digest matches the one we pushed
re := regexp.MustCompile("digest: (sha256:\\w+)")
match := re.FindStringSubmatch(initLogs)
// make sure we found a match and it has the capture group
if len(match) != 2 {
t.Fatalf("Expected to find an image digest in the build output")
}
// match the local digest, which is first capture group against
// the remote image
digest := match[1]
remoteDigest, err := getRemoteDigest(repo)
if err != nil {
t.Fatalf("Expected to get digest for remote image %s", repo)
}
if digest != remoteDigest {
t.Fatalf("Expected local digest %s to match remote digest %s", digest, remoteDigest)
}
}

func getRemoteDigest(image string) (string, error) {
ref, err := name.ParseReference(image, name.WeakValidation)
if err != nil {
return "", fmt.Errorf("Expected to be able to parse image reference %q: %v", image, err)
}
img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
if err != nil {
return "", fmt.Errorf("Expected to be able to pull remote ref %s: %v", ref, err)
}
digest, err := img.Digest()
if err != nil {
return "", fmt.Errorf("Expected to get digest for image %s: %v", img, err)
}
return digest.String(), nil
}
Loading

0 comments on commit d987d1e

Please sign in to comment.