From c172e3b562230dac00ae9b38041694a05224c1d7 Mon Sep 17 00:00:00 2001 From: Guido Iaquinti <4038041+guidoiaquinti@users.noreply.github.com> Date: Mon, 22 Nov 2021 17:35:13 +0100 Subject: [PATCH] e2e - DigitalOcean (install) (#196) --- .../test-digital-ocean-1-click-install.yaml | 112 ++++++++++++++ .../workflows/test-digital-ocean-install.yaml | 141 ++++++++++++++++++ .github/workflows/test-helm-chart.yaml | 2 +- .../workflows/test-install-digital-ocean.yaml | 83 ----------- ci/setup_ingestion_test.sh | 1 + ci/values/digital_ocean.yaml | 7 + 6 files changed, 262 insertions(+), 84 deletions(-) create mode 100644 .github/workflows/test-digital-ocean-1-click-install.yaml create mode 100644 .github/workflows/test-digital-ocean-install.yaml delete mode 100644 .github/workflows/test-install-digital-ocean.yaml create mode 100644 ci/values/digital_ocean.yaml diff --git a/.github/workflows/test-digital-ocean-1-click-install.yaml b/.github/workflows/test-digital-ocean-1-click-install.yaml new file mode 100644 index 000000000..637da41b6 --- /dev/null +++ b/.github/workflows/test-digital-ocean-1-click-install.yaml @@ -0,0 +1,112 @@ +# # +# # This is an e2e test to deploy PostHog on DigitalOcean using DigitalOcean's 1-click app install. +# # +# # TODO: +# # - run k8s spec test +# # - run action only when necessary +# # - test the "Securing your 1-click install" https://posthog.com/docs/self-host/deploy/digital-ocean#securing-your-1-click-install +# # +# name: e2e - DigitalOcean "1-click" (install) + +# # +# # Unfortunately we can test this scenario only after merging to 'main' as we don't have +# # control over the branch used by the DigitalOcean's marketplace 1-click install. +# # +# # ref: https://github.com/digitalocean/marketplace-kubernetes/blob/master/stacks/posthog/deploy.sh +# # +# on: +# push: +# branches: +# - main + +# jobs: +# do-install: +# runs-on: ubuntu-20.04 +# if: github.repository == 'PostHog/charts-clickhouse' +# steps: + +# - name: Checkout +# uses: actions/checkout@v2 + +# - name: Install doctl +# uses: digitalocean/action-doctl@v2 +# with: +# token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} + +# - name: Declare variables that we can share across steps +# id: vars +# run: | +# TEST_NAME="helm-test-e2e-do-1-click-$(git rev-parse --short HEAD)" +# echo "::set-output name=k8s_cluster_name::${TEST_NAME}" + +# - name: Deploy a new k8s cluster +# id: k8s_cluster_creation +# run: | +# doctl k8s clusters create \ +# ${{ steps.vars.outputs.k8s_cluster_name }} \ +# --version 1.21.5-do.0 \ +# --tag="provisioned_by:github_action" \ +# --size s-2vcpu-4gb \ +# --count 2 \ +# --wait \ +# --1-clicks \ +# posthog + +# # +# # Wait for all k8s resources to be ready. +# # +# # Despite the --wait flag used in the command above +# # there is no guarantee that all the resources will be deployed +# # when the command returns. +# # +# # Why can't we directly use the 'action-k8s-await-workloads' step below? +# # Because it's not working for this use case +# # +# # ref: https://github.com/jupyterhub/action-k8s-await-workloads/issues/38 +# # +# - name: Workaround - wait for all the k8s resources to be ready +# timeout-minutes: 15 +# run: | +# echo "Waiting for pods to be ready..." +# while ! kubectl wait --for=condition=Ready pods --timeout=60s --all -n posthog > /dev/null 2>&1 +# do +# echo " pods are not yet ready" +# done +# echo "All pods are now ready!" + +# echo "Waiting for the DigitalOcean Load Balancer to be ready..." +# load_balancer_external_ip="" +# while [ -z "$load_balancer_external_ip" ]; +# do +# load_balancer_external_ip=$(kubectl get ingress -n posthog posthog -o jsonpath="{.status.loadBalancer.ingress[0].ip}") +# [ -z "$load_balancer_external_ip" ] && echo " sleeping 10 seconds" && sleep 10 +# done +# echo "The DigitalOcean Load Balancer is now ready!" + +# - name: Wait until all the resources are fully deployed in k8s +# uses: jupyterhub/action-k8s-await-workloads@main +# with: +# namespace: "posthog" +# timeout: 300 +# max-restarts: 10 + +# - name: Setup PostHog for the ingestion test +# run: ./ci/setup_ingestion_test.sh + +# - name: Set PostHog endpoints to use for the ingestion test +# run: | +# # Get the Load Balancer IP address +# load_balancer_external_ip=$(kubectl get ingress -n posthog posthog -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + +# echo "POSTHOG_API_ENDPOINT=http://${load_balancer_external_ip}" | tee -a "$GITHUB_ENV" +# echo "POSTHOG_EVENT_ENDPOINT=http://${load_balancer_external_ip}" | tee -a "$GITHUB_ENV" + +# - name: Run ingestion test using k6 +# uses: k6io/action@v0.2.0 +# with: +# filename: ci/k6-ingestion-test.js + +# - name: Delete the k8s cluster and all associated resources (LB, volumes, ...) +# if: ${{ always() && steps.k8s_cluster_creation.outcome == 'success' }} +# run: | +# doctl k8s cluster delete --dangerous --force ${{ steps.vars.outputs.k8s_cluster_name }} diff --git a/.github/workflows/test-digital-ocean-install.yaml b/.github/workflows/test-digital-ocean-install.yaml new file mode 100644 index 000000000..1cdc1e02f --- /dev/null +++ b/.github/workflows/test-digital-ocean-install.yaml @@ -0,0 +1,141 @@ +# +# This is an e2e test to deploy PostHog on DigitalOcean using Helm. +# +# TODO: +# - run k8s spec test +# - run action only when necessary +# +name: e2e - DigitalOcean (install) + +on: push + +jobs: + do-install: + runs-on: ubuntu-20.04 + if: github.repository == 'PostHog/charts-clickhouse' + steps: + + - name: Checkout + uses: actions/checkout@v2 + + - name: Install doctl + uses: digitalocean/action-doctl@v2 + with: + token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} + + - name: Declare variables that we can share across steps + id: vars + run: | + TEST_NAME="helm-test-e2e-do-install-$(git rev-parse --short HEAD)" + echo "::set-output name=k8s_cluster_name::${TEST_NAME}" + echo "::set-output name=dns_record::${TEST_NAME}" + echo "::set-output name=fqdn_record::${TEST_NAME}.posthog.cc" + + - name: Deploy a new k8s cluster + id: k8s_cluster_creation + run: | + doctl k8s clusters create \ + ${{ steps.vars.outputs.k8s_cluster_name }} \ + --version 1.21.5-do.0 \ + --tag="provisioned_by:github_action" \ + --size s-2vcpu-4gb \ + --count 2 \ + --wait + + - name: Install PostHog using the Helm chart + run: | + helm upgrade --install \ + -f ci/values/digital_ocean.yaml \ + --set "ingress.hostname=${{ steps.vars.outputs.fqdn_record }}" \ + --timeout 20m \ + --create-namespace \ + --namespace posthog \ + posthog ./charts/posthog \ + --wait-for-jobs \ + --wait + + # + # Wait for all k8s resources to be ready. + # + # Despite the --wait flag used in the command above + # there is no guarantee that all the resources will be deployed + # when the command returns. + # + # + # Why can't we directly use the 'action-k8s-await-workloads' step below? + # Because it's not working for this use case + # + # ref: https://github.com/jupyterhub/action-k8s-await-workloads/issues/38 + # + - name: Workaround - wait for all the k8s resources to be ready + timeout-minutes: 15 + run: | + echo "Waiting for pods to be ready..." + while ! kubectl wait --for=condition=Ready pods --timeout=60s --all -n posthog > /dev/null 2>&1 + do + echo " pods are not yet ready" + done + echo "All pods are now ready!" + + echo "Waiting for the DigitalOcean Load Balancer to be ready..." + load_balancer_external_ip="" + while [ -z "$load_balancer_external_ip" ]; + do + load_balancer_external_ip=$(kubectl get ingress -n posthog posthog -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + [ -z "$load_balancer_external_ip" ] && echo " sleeping 10 seconds" && sleep 10 + done + echo "The DigitalOcean Load Balancer is now ready!" + + - name: Wait until all the resources are fully deployed in k8s + uses: jupyterhub/action-k8s-await-workloads@main + with: + namespace: "posthog" + timeout: 300 + max-restarts: 10 + + - name: Create the DNS record + id: dns_creation + run: | + # Get the Load Balancer IP address + load_balancer_external_ip=$(kubectl get ingress -n posthog posthog -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + + # Create the DNS record + doctl compute domain records create \ + posthog.cc \ + --record-type A \ + --record-ttl 60 \ + --record-name "${{ steps.vars.outputs.dns_record }}" \ + --record-data "$load_balancer_external_ip" + + - name: Wait for the Let's Encrypt certificate to be issued and deployed + run: | + echo "Wait for the Let's Encrypt certificate to be issued and deployed..." + while ! kubectl wait --for=condition=Ready --timeout=60s certificaterequest --all -n posthog > /dev/null 2>&1 + do + echo " certificate hasn't been yet issued and deployed" + done + echo "The certificate has been issued and it has been deployed!" + + - name: Setup PostHog for the ingestion test + run: ./ci/setup_ingestion_test.sh + + - name: Set PostHog endpoints to use for the ingestion test + run: | + echo "POSTHOG_API_ENDPOINT=https://${{ steps.vars.outputs.fqdn_record }}" | tee -a "$GITHUB_ENV" + echo "POSTHOG_EVENT_ENDPOINT=https://${{ steps.vars.outputs.fqdn_record }}" | tee -a "$GITHUB_ENV" + + - name: Run ingestion test using k6 + uses: k6io/action@v0.2.0 + with: + filename: ci/k6-ingestion-test.js + + - name: Delete the k8s cluster and all associated resources (LB, volumes, ...) + if: ${{ always() && steps.k8s_cluster_creation.outcome == 'success' }} + run: | + doctl k8s cluster delete --dangerous --force ${{ steps.vars.outputs.k8s_cluster_name }} + + - name: Delete the DNS record + if: ${{ always() && steps.dns_creation.outcome == 'success' }} + run: | + DNS_RECORD_ID=$(doctl compute domain records list posthog.cc --no-header --format ID,Name | grep ${{ steps.vars.outputs.dns_record }} | awk '{print $1}') + doctl compute domain records delete --force posthog.cc "$DNS_RECORD_ID" diff --git a/.github/workflows/test-helm-chart.yaml b/.github/workflows/test-helm-chart.yaml index bcfbbb711..b52855da3 100644 --- a/.github/workflows/test-helm-chart.yaml +++ b/.github/workflows/test-helm-chart.yaml @@ -89,7 +89,7 @@ jobs: - name: Setup PostHog for the ingestion test run: ./ci/setup_ingestion_test.sh - - name: Fetch PostHog endpoints to use for the ingestion test + - name: Set PostHog endpoints to use for the ingestion test run: | POSTHOG_API_ADDRESS=$(kubectl get svc -n posthog posthog-web -o jsonpath="{.spec.clusterIP}") POSTHOG_EVENTS_ADDRESS=$(kubectl get svc -n posthog posthog-events -o jsonpath="{.spec.clusterIP}") diff --git a/.github/workflows/test-install-digital-ocean.yaml b/.github/workflows/test-install-digital-ocean.yaml deleted file mode 100644 index 2a4b07978..000000000 --- a/.github/workflows/test-install-digital-ocean.yaml +++ /dev/null @@ -1,83 +0,0 @@ -name: e2e - DigitalOcean 1-click (install) - -on: push - -jobs: - do-install: - runs-on: ubuntu-latest - if: github.repository == 'PostHog/charts-clickhouse' - steps: - - - name: Checkout - uses: actions/checkout@v2 - - - name: Install doctl - uses: digitalocean/action-doctl@v2 - with: - token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }} - - - name: Declare variables that we can share across steps - id: vars - run: | - echo "::set-output name=k8s_cluster_name::helm-e2e-testing-do-1-click-$(git rev-parse --short HEAD)" - - - name: Deploy a new k8s cluster - run: | - doctl k8s clusters create \ - ${{ steps.vars.outputs.k8s_cluster_name }} \ - --region fra1 \ - --version 1.21.5-do.0 \ - --tag="provisioned_by:github_action" \ - --size s-2vcpu-2gb \ - --count 2 \ - --wait \ - --1-clicks \ - posthog - - # Wait for the DigitalOcean marketplace to complete the installation - # - # Despite the --wait flag used in the command above - # there is no guarantee the PostHog app will be deployed - # when the command returns. With this workaround we wait - # until the Kubernetes keyspace is created. - # - # ref: https://github.com/digitalocean/doctl/issues/1063 - # - # Why can't we directly use the 'action-k8s-await-workloads' step below? - # Because it's not working for this use case - # - # ref: https://github.com/jupyterhub/action-k8s-await-workloads/issues/38 - # - - name: Workaround - wait until the 'posthog' pods are ready - timeout-minutes: 15 - run: | - echo "Waiting for pods to be ready..." - while ! kubectl wait --for=condition=Ready pods --timeout=60s --all -n posthog > /dev/null 2>&1 - do - echo " sleeping 10 seconds" - sleep 10 - done - echo "All pods are now ready!" - - - name: Wait until all the resources are fully deployed in k8s - uses: jupyterhub/action-k8s-await-workloads@main - with: - namespace: "posthog" - timeout: 300 - max-restarts: 10 - - # TODO - # - setup ingestion test - # - run ingestion test - # - run k8s spec test - - - name: Emit k8s namespace report - uses: jupyterhub/action-k8s-namespace-report@v1 - if: always() - with: - namespace: "posthog" - - - name: Delete the k8s cluster and all associated resources (LB, volumes, ...) - if: always() - run: | - doctl k8s cluster delete --dangerous --force ${{ steps.vars.outputs.k8s_cluster_name }} diff --git a/ci/setup_ingestion_test.sh b/ci/setup_ingestion_test.sh index deddd135c..06f2aa1c3 100755 --- a/ci/setup_ingestion_test.sh +++ b/ci/setup_ingestion_test.sh @@ -6,4 +6,5 @@ sleep 10 # TODO: remove this. It was added as the command below often errors with 'unable to upgrade connection: container not found ("posthog-web")' WEB_POD=$(kubectl get pods -n posthog -l role=web -o jsonpath="{.items[].metadata.name}") + kubectl exec "$WEB_POD" -n posthog -- python manage.py setup_dev --no-data diff --git a/ci/values/digital_ocean.yaml b/ci/values/digital_ocean.yaml new file mode 100644 index 000000000..e5e2558f7 --- /dev/null +++ b/ci/values/digital_ocean.yaml @@ -0,0 +1,7 @@ +cloud: "do" +ingress: + hostname: + nginx: + enabled: true +cert-manager: + enabled: true