Skip to content

E2E

E2E #55

Workflow file for this run

name: E2E
on:
workflow_dispatch:
env:
COMMIT_NAME: Monkeynator
COMMIT_EMAIL: [email protected]
jobs:
build:
name: Build test image
runs-on: ubuntu-22.04
env:
VERSION: ${{ github.run_id }}
GHCR_REGISTRY: "ghcr.io"
QUAY_IMAGE: "quay.io/enix/kube-image-keeper"
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
path: repository
- name: check-for-cc
id: check-for-cc
uses: webiny/[email protected]
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.GHCR_REGISTRY }}
username: ${{ env.COMMIT_NAME }}
password: ${{ secrets.RELEASE_GITHUB_TOKEN }}
- name: Generate image metadata
id: meta
uses: docker/metadata-action@v5
with:
images: |
${{ env.GHCR_REGISTRY }}/${{ github.repository }}
${{ github.repository }}
${{ env.QUAY_IMAGE }}
- name: Build container images
uses: docker/build-push-action@v5
with:
context: repository
platforms: linux/amd64,linux/arm64
build-args: |
"VERSION=${{ env.VERSION }}"
"REVISION=${{ github.sha }}"
push: true
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ env.GHCR_REGISTRY }}/${{ github.repository }}:${{ env.VERSION }}
- name: Build alpine container images
uses: docker/build-push-action@v5
with:
context: repository
platforms: linux/amd64,linux/arm64
build-args: |
"VERSION=${{ env.VERSION }}"
"REVISION=${{ github.sha }}"
target: alpine
push: true
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ env.GHCR_REGISTRY }}/${{ github.repository }}:${{ env.VERSION }}-alpine
e2e:
name: Tests End-to-End on K8s
needs:
- build
runs-on: ubuntu-22.04
env:
VERSION: ${{ github.run_id }}
GHCR_IMAGE_NAME: "ghcr.io/enix/kube-image-keeper"
GHCR_REGISTRY: "ghcr.io"
strategy:
max-parallel: 6
matrix:
k8sversion: ["v1.21.14", "v1.22.17", "v1.23.17", "v1.24.15", "v1.25.11", "v1.26.6", "v1.27.3", "v1.28.0"]
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Setup KinD
uses: helm/[email protected]
with:
node_image: kindest/node:${{ matrix.k8sversion }}
- name: Run cert-manager installation
run: |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml
kubectl wait pods -n cert-manager -l app.kubernetes.io/instance=cert-manager --for condition=Ready --timeout=30s
- name: Set up chart-testing
uses: helm/[email protected]
- name: Set up helm
uses: azure/setup-helm@v3
with:
version: '3.9.0'
- name: Run chart-testing (lint)
run: |
set -euo pipefail
ct lint \
--charts helm/kube-image-keeper \
--chart-repos bitnami=https://charts.bitnami.com/bitnami \
--validate-maintainers=false --check-version-increment=false
# Need wait for the next release with flash --skip-clean-up
# - name: Run chart-testing (install)
# run: |
# set -euo pipefail
# ct install \
# --charts helm/cache-registry \
# --helm-extra-set-args "--set controllers.image.tag=latest --set proxy.image.tag=latest"
- name: Run helm (install)
run : |
set -euo pipefail
kubectl create namespace kuik-system
kubectl create secret docker-registry ghcr-secret -n kuik-system --docker-server=https://ghcr.io \
--docker-username=monkeynator --docker-password=${{ secrets.RELEASE_GITHUB_TOKEN }} \
--docker-email=${{ env.COMMIT_EMAIL }}
helm upgrade --install kube-image-keeper -n kuik-system --create-namespace ./helm/kube-image-keeper \
--set controllers.image.tag=${{ env.VERSION }} --set proxy.image.tag=${{ env.VERSION }} \
--set controllers.image.repository=${{ env.GHCR_IMAGE_NAME }} --set proxy.image.repository=${{ env.GHCR_IMAGE_NAME }} \
--set controllers.imagePullSecrets[0].name=ghcr-secret --set proxy.image.imagePullSecrets[0].name=ghcr-secret --debug
kubectl wait pods -n kuik-system -l app.kubernetes.io/instance=kube-image-keeper --for condition=Ready --timeout=30s
- name: Run end-to-end tests
run: |
set -euo pipefail
kubectl create deploy nginx --image=nginx:stable-alpine --replicas=2
kubectl wait deployment nginx --for condition=Available=True --timeout=30s
echo "kubectl get cachedimages"
kubectl get cachedimages
if [ $(kubectl get cachedimages -o json | jq ".items[0].status.isCached") ];
then
if [ $(kubectl get cachedimages -o json | jq ".items[0].status.usedBy.count") -eq 2 ];
then
echo "Found cached image used by 2 pods"
else
echo "Error: pods count should be equal 2"
exit 1
fi
else
echo "Error: image cached status is false"
exit 1
fi
for component in proxy controllers
do
echo "Testing $component metrics endpoint"
for ip in $(kubectl get po -l "app.kubernetes.io/component=$component" -n kuik-system -o jsonpath='{range .items[*]}{.status.podIP}{"\n"}{end}')
do
attempts=0
success=false
while [[ $attempts -lt 3 && $success == false ]]
do
response=$(kubectl run curl-pod --image=curlimages/curl --rm -ti --quiet --restart=Never -- curl -s -o /dev/null -w "%{http_code}\n" http://$ip:8080/metrics)
if [[ $response -ge 200 && $response -lt 300 ]]; then
echo "HTTP status code $response is valid for $ip"
success=true
else
echo "HTTP status code $response is not valid for $ip"
((attempts++))
sleep 5
fi
done
if [[ $success == false ]]; then
echo "Failed after 3 attempts for $ip"
exit 1
fi
done
done