diff --git a/.ci-orchestrator/runtime-component-operator-build.yml b/.ci-orchestrator/runtime-component-operator-build.yml new file mode 100644 index 000000000..50b8e53f3 --- /dev/null +++ b/.ci-orchestrator/runtime-component-operator-build.yml @@ -0,0 +1,30 @@ +type: pipeline_definition +product: Liberty +name: Runtime Componet Operator Docker Build +description: A build to run the websphere-liberty operator docker container build +triggers: +- type: manual + triggerName: "rcodocker" + propertyDefinitions: + - name: RELEASE_TARGET + defaultValue: "main" + - name: command + defaultValue: "make build-operator-pipeline REGISTRY=cp.stg.icr.io" + +steps: +- stepName: Z Build + workType: Jenkins + projectName: ebcDockerBuilderRCO + timeoutInMinutes: 1440 + # Need properties for Makefile or build script for WLO + properties: + ebcPlan: svl-dockerJenkins-ubuntu20_s390x.yml + + +- stepName: P Build + workType: Jenkins + projectName: ebcDockerBuilderRCO + timeoutInMinutes: 1440 + # Need properties for Makefile or build script for WLO + properties: + ebcPlan: svl-dockerJenkins-ubuntu20_ppcle.yml \ No newline at end of file diff --git a/.one-pipeline-cd.yaml b/.one-pipeline-cd.yaml new file mode 100644 index 000000000..ad3df1a9e --- /dev/null +++ b/.one-pipeline-cd.yaml @@ -0,0 +1,132 @@ +# Documentation on available configuration +# https://pages.github.ibm.com/one-pipeline/docs/custom-scripts.html + +version: "1" + +setup: + dind: true + image: icr.io/continuous-delivery/pipeline/pipeline-base-ubi:3.12 + script: | + #!/usr/bin/env bash + echo "setup stage" + skopeo --version || exit 1 + INVENTORY_PATH="$(get_env inventory-path)" + INVENTORY_ENTRIES_PATH="$WORKSPACE/$(get_env INVENTORY_ENTRIES_PATH)" + INVENTORY_ENTRIES=$(cat "${INVENTORY_ENTRIES_PATH}") + echo "$(get_env ibmcloud-api-key-staging)" | docker login "$(get_env staging-registry)" -u "$(get_env ibmcloud-api-user)" --password-stdin + for INVENTORY_ENTRY in $(echo "${INVENTORY_ENTRIES}" | jq -r '.[] '); do + APP=$(cat "${INVENTORY_PATH}/${INVENTORY_ENTRY}") + ARTIFACT=$(echo "${APP}" | jq -r '.artifact') + DIGEST=$(echo "${APP}" | jq -r '.sha256' ) + + echo "${ARTIFACT}" + echo "${DIGEST}" + echo "${APP}" | jq '.' + + SAVED_DIGEST="$(skopeo inspect docker://$ARTIFACT | grep Digest | grep -o 'sha[^\"]*')" + if [[ ${DIGEST} == ${SAVED_DIGEST} ]]; then + echo "Image, $ARTIFACT, passes validation" + else + echo "Image, $ARTIFACT, does not exist or digests do not match" + exit 1 + fi + done + +deploy: + dind: true + image: icr.io/continuous-delivery/pipeline/pipeline-base-ubi:3.12 + script: | + #!/usr/bin/env bash + if [[ "$PIPELINE_DEBUG" == 1 ]]; then + trap env EXIT + env + set -x + fi + echo "deploy stage" + skopeo --version || exit 1 + TARGET_ENVIRONMENT="$(get_env environment)" + INVENTORY_PATH="$(get_env inventory-path)" + INVENTORY_ENTRIES_PATH="$WORKSPACE/$(get_env INVENTORY_ENTRIES_PATH)" + INVENTORY_ENTRIES=$(cat "${INVENTORY_ENTRIES_PATH}") + + echo "Target environment: ${TARGET_ENVIRONMENT}" + echo "Inventory entries" + echo "" + + echo "$INVENTORY_ENTRIES" | jq '.' + + echo "" + echo "Inventory content" + echo "" + + ls -la ${INVENTORY_PATH} + + for INVENTORY_ENTRY in $(echo "${INVENTORY_ENTRIES}" | jq -r '.[] '); do + APP=$(cat "${INVENTORY_PATH}/${INVENTORY_ENTRY}") + ARTIFACT=$(echo "${APP}" | jq -r '.artifact') + NAME=$(echo "${APP}" | jq -r '.name') + DIGEST=$(echo "${APP}" | jq -r '.sha256' ) + TYPE=$(echo "${APP}" | jq -r '.type' ) + REPO=$(echo "${APP}" | jq -r '.repository_url' ).git + COMMIT=$(echo "${APP}" | jq -r '.commit_sha' ) + echo "${ARTIFACT}" + #echo "${ARTIFACT##*/}" + IMAGE_NAME="${ARTIFACT##*/}" + echo "Image name: $IMAGE_NAME" + PRODUCTION_IMAGE=$(get_env production-registry)/$(get_env production-namespace)/$IMAGE_NAME + echo "Production image: $PRODUCTION_IMAGE" + echo "skopeo copy --all --src-creds $(get_env source-user):$(get_env source-key) --dest-creds $(get_env dest-user):$(get_env dest-key) docker://${ARTIFACT} docker://${PRODUCTION_IMAGE}" + skopeo copy --all --src-creds $(get_env source-user):$(get_env source-key) --dest-creds $(get_env dest-user):$(get_env dest-key) docker://${ARTIFACT} docker://${PRODUCTION_IMAGE} + save_artifact $NAME type=$TYPE name="${PRODUCTION_IMAGE}" digest="$DIGEST" source="${REPO}#${COMMIT}" + done + +sign-artifact: + image: docker-eu-public.artifactory.swg-devops.com/wcp-compliance-automation-team-docker-local/csso-image-sign:6.0.0@sha256:3499f75eb669416536f0d680104e7e9e37147c168459152d716a1fbf9b1af5a2 + script: | + #!/usr/bin/env bash + echo "sign-artifact stage" + # image-signing + set_env IMAGE_SIGNING_TASK_NAME "build-sign-artifact" + set_env IMAGE_SIGNING_STEP_NAME "run-stage" + "${COMMONS_PATH}"/ciso/sign_icr.sh + fingerprint=$(/opt/Garantir/bin/gpg --homedir $HOME/.gnupggrs/ --fingerprint --with-colons | grep fpr | tr -d 'fpr:') + echo "GNUPGHOME="$GNUPGHOME + gpg2 --homedir $HOME/.gnupggrs --output rco.pub --armor --export $fingerprint + save_file pub_file rco.pub + cat rco.pub + +acceptance-test: + image: docker-eu-public.artifactory.swg-devops.com/wcp-compliance-automation-team-docker-local/csso-image-sign:6.0.0@sha256:3499f75eb669416536f0d680104e7e9e37147c168459152d716a1fbf9b1af5a2 + script: | + #!/usr/bin/env bash + echo "acceptance-test stage" + load_file pub_file > rco.pub + gpg2 --import rco.pub + export fingerprint=$(gpg --fingerprint --with-colons | grep fpr | tr -d 'fpr:') + echo "fingerprint=$fingerprint" + mkdir -p images + if which list_artifacts >/dev/null; then + list_artifacts | while IFS= read -r artifact; do + image_name="$(load_artifact "$artifact" "name")" + type="$(load_artifact "$artifact" "type")" + echo "type="$type + if [[ "$type" == "image" ]]; then + echo "Verifying image ${image_name}" + skopeo copy --src-creds $(get_env dest-user):$(get_env dest-key) docker://${image_name} dir:./images + skopeo standalone-verify ./images/manifest.json ${image_name} ${fingerprint} ./images/signature-1 + if [[ $? != 0 ]]; then + exit 1 + fi + rm images/* + else + echo "Skipping image ${image_name}" + fi + done + fi + +finish: + image: icr.io/continuous-delivery/toolchains/devsecops/baseimage@sha256:2132bf3187b63496d119f61d375bbb656d0b3e4a664970478c44b527c4c058c5 + script: | + #!/usr/bin/env bash + echo "finish stage" + ./scripts/pipeline/cd_finish diff --git a/.one-pipeline.yaml b/.one-pipeline.yaml index 32501d2b3..ea37be7fe 100644 --- a/.one-pipeline.yaml +++ b/.one-pipeline.yaml @@ -54,6 +54,23 @@ setup: git push --prune https://$GHE_TOKEN@$WHITESOURCE_GHE_REPO $BRANCH_REFSPEC +refs/tags/*:refs/tags/* fi + SKIP_ACCEPTANCE_TEST=$(get_env SKIP_ACCEPTANCE_TEST) + SKIP_ACCEPTANCE_TEST="$(echo "$SKIP_ACCEPTANCE_TEST" | tr '[:upper:]' '[:lower:]')" + if [[ ! -z "$SKIP_ACCEPTANCE_TEST" && "$SKIP_ACCEPTANCE_TEST" != "false" && "$SKIP_ACCEPTANCE_TEST" != "no" ]]; then + echo "Skipping acceptance-test, SKIP_ACCEPTANCE_TEST=$SKIP_ACCEPTANCE_TEST" + exit 0 + else + export arch=$(get_env architecture) + git clone https://$(get_env git-token)@github.ibm.com/elastic-build-cloud/ebc-gateway-http.git + if [[ "$arch" == "X" ]]; then + ./scripts/pipeline/getCluster.sh "X" + else + ./scripts/pipeline/getCluster.sh "Z" + ./scripts/pipeline/getCluster.sh "X" + ./scripts/pipeline/getCluster.sh "P" + fi + fi + test: dind: true abort_on_failure: true @@ -128,111 +145,204 @@ containerize: abort_on_failure: true image: icr.io/continuous-delivery/pipeline/pipeline-base-ubi:3.12 script: | - #!/usr/bin/env bash - - if [[ "$PIPELINE_DEBUG" == 1 ]]; then - trap env EXIT - env - set -x - fi - - ## Setup required tooling - make setup-go GO_RELEASE_VERSION=$(get_env go-version) - export PATH=$PATH:/usr/local/go/bin - yum -y -q update - - # PERIODIC_SCAN=$(get_env periodic-rescan) - # PERIODIC_SCAN="$(echo "$PERIODIC_SCAN" | tr '[:upper:]' '[:lower:]')" - # Build images - export RELEASE_TARGET=$(get_env branch) - export PIPELINE_USERNAME=$(get_env ibmcloud-api-user) - export PIPELINE_PASSWORD=$(get_env ibmcloud-api-key-staging) - export PIPELINE_REGISTRY=$(get_env pipeline-registry) - export PIPELINE_OPERATOR_IMAGE=$(get_env pipeline-operator-image) - export PIPELINE_PRODUCTION_IMAGE=$PIPELINE_REGISTRY/$PIPELINE_OPERATOR_IMAGE - export REDHAT_BASE_IMAGE=$(get_env redhat-base-image) - export OPM_VERSION=$(get_env opm-version) - export DISABLE_ARTIFACTORY=$(get_env disable-artifactory) - # export REDHAT_USERNAME=$(get_env redhat-user-id) - # export REDHAT_PASSWORD=$(get_env redhat-password) - # export REDHAT_REGISTRY=$(get_env redhat-registry) - # export W3_USERNAME=$(get_env w3_username) - # export W3_PASSWORD=$(get_env w3_password) - - # flags for P and/or Z - export arch=$(get_env architecture) - - git clone https://$(get_env git-token)@github.ibm.com/websphere/operators.git - cp -rf operators/scripts/build ./scripts/ - # Temporary catalog build scripts (registry mirror to be done) - cp ./scripts/build-catalog.sh ./scripts/build/build-catalog.sh - - echo "skopeo version" - skopeo --version || exit 1 + #!/usr/bin/env bash + + # instruct bash to exit if any command fails + set -e + + echo $STAGE + + echo "*** OS release ***" + cat /etc/os-release + + PERIODIC_SCAN=$(get_env periodic-rescan) + PERIODIC_SCAN="$(echo "$PERIODIC_SCAN" | tr '[:upper:]' '[:lower:]')" + # Build images + export PIPELINE_USERNAME=$(get_env ibmcloud-api-user) + export PIPELINE_PASSWORD=$(get_env ibmcloud-api-key-staging) + export PIPELINE_REGISTRY=$(get_env pipeline-registry) + export PIPELINE_PRODUCTION_IMAGE=$(get_env pipeline-production-image) + export PIPELINE_OPERATOR_IMAGE=$(get_env pipeline-operator-image) + export REDHAT_USERNAME=$(get_env redhat-user-id) + export REDHAT_PASSWORD=$(get_env redhat-password) + export REDHAT_BASE_IMAGE=$(get_env redhat-base-image) + export REDHAT_REGISTRY=$(get_env redhat-registry) + export OPM_VERSION=$(get_env opm-version) + export DISABLE_ARTIFACTORY=$(get_env disable-artifactory) + export W3_USERNAME=$(get_env w3_username) + export W3_PASSWORD=$(get_env w3_password) + + # flags for P and/or Z + export arch=$(get_env architecture) + + git clone https://$(get_env git-token)@github.ibm.com/websphere/operators.git + cp -rf operators/scripts/build ./scripts/ - # Docker login and setup build configurations - scripts/build/build-initialize.sh - - # Build amd64 image - make build-operator-pipeline REGISTRY=${PIPELINE_REGISTRY} - - # Build manifest - make build-manifest-pipeline REGISTRY=${PIPELINE_REGISTRY} IMAGE=${PIPELINE_OPERATOR_IMAGE} - - # Build bundle image - make build-bundle-pipeline REGISTRY=${PIPELINE_REGISTRY} - - # Build catalog image - make build-catalog-pipeline REGISTRY=${PIPELINE_REGISTRY} - - # Build catalog manifest - make build-manifest-pipeline REGISTRY=${PIPELINE_REGISTRY} IMAGE=${PIPELINE_OPERATOR_IMAGE}-catalog - - echo "**** Saving Artifacts ****" - if [[ "$arch" == "ZXP" ]]; then - declare -a tags=("${RELEASE_TARGET}" "${RELEASE_TARGET}-amd64" "${RELEASE_TARGET}-ppc64le" "${RELEASE_TARGET}-s390x") - else - declare -a tags=("${RELEASE_TARGET}" "${RELEASE_TARGET}-amd64") - fi - for i in "${tags[@]}" - do - IMAGE=$PIPELINE_REGISTRY/$PIPELINE_OPERATOR_IMAGE:$i - DIGEST="$(skopeo inspect docker://$IMAGE | grep Digest | grep -o 'sha[^\"]*')" - { ARCH="$(echo $i | grep -o '\(amd64\|s390x\|ppc64le\)$')" && TYPE="image"; } || { TYPE="manifest"; } + echo "skopeo version" + skopeo --version || exit 1 - if [[ "$TYPE" == "manifest" ]]; then - echo "Saving artifact operator-$i type=$TYPE name=$IMAGE digest=$DIGEST" - save_artifact operator-$i type=$TYPE name="$IMAGE" "digest=$DIGEST" + if [[ ! -z "$PERIODIC_SCAN" && "$PERIODIC_SCAN" != "false" && "$PERIODIC_SCAN" != "no" ]]; then + echo "Skipping containerize, but generating list of images. This is a periodic run that is only meant to produce CVE information." + RELEASE_TARGET=$(curl --silent "https://api.github.com/repos/WASdev/websphere-liberty-operator/releases/latest" | jq -r .tag_name) + #RELEASE_TARGET=$(get_env branch) else - echo "Saving artifact operator-$i type=$TYPE name=$IMAGE digest=$DIGEST arch=$ARCH" - save_artifact operator-$i type=$TYPE name="$IMAGE" "digest=$DIGEST" "arch=$ARCH" + if [[ "$PIPELINE_DEBUG" == 1 ]]; then + trap env EXIT + env + set -x + fi + + ## Setup required tooling + make setup-go GO_RELEASE_VERSION=$(get_env go-version) + export PATH=$PATH:/usr/local/go/bin + yum -y -q update + + # Build images + export RELEASE_TARGET=$(get_env branch) + + if [[ -z $DISABLE_ARTIFACTORY ]]; then + export DISABLE_ARTIFACTORY="false" + fi + + # Staging registry build + echo "Running builds for staging registry." + + # Docker login and setup build configurations + scripts/build/build-initialize.sh + # update Jenkins job to run shallow clone and scripts/build/build-initialize.sh + if [[ "$arch" == "ZXP" ]]; then + echo " Sending request to build P and Z" + DISABLE_ARTIFACTORY=true ./scripts/pipeline/request-ciorchestrator.sh --command "make build-operator-pipeline REGISTRY=${PIPELINE_REGISTRY}" --user "$W3_USERNAME" --password "$W3_PASSWORD" --branch "$RELEASE_TARGET" --repository "runtime-component-operator" --org "application-stacks" --trigger "rcodocker" --configFile ".ci-orchestrator/runtime-component-operator-build.yml" + pipelineid=$(cat ciorchestrator-submit.id) + fi + + # Build operator image + make build-operator-pipeline REGISTRY=${PIPELINE_REGISTRY} + if [[ "$arch" == "ZXP" ]]; then + # wait for build ppc64le and s390x images + echo " waiting on request to build P and Z" + ./scripts/pipeline/await-ciorchestrator.sh --user "$W3_USERNAME" --password "$W3_PASSWORD" --pipelineId "$pipelineid" + fi + # Build operator manifest (after 3 arch operator builds) + make build-manifest-pipeline REGISTRY=${PIPELINE_REGISTRY} IMAGE=${PIPELINE_OPERATOR_IMAGE} + + # Build bundle image + make build-bundle-pipeline REGISTRY=${PIPELINE_REGISTRY} + + # Build catalog image for amd64 first - then p and z + make build-catalog-pipeline REGISTRY=${PIPELINE_REGISTRY} + # Build catalog image + if [[ "$arch" == "ZXP" ]]; then + echo " Sending request to build P and Z catalogs" + DISABLE_ARTIFACTORY=true ./scripts/pipeline/request-ciorchestrator.sh --command "make build-catalog-pipeline REGISTRY=${PIPELINE_REGISTRY}" --user "$W3_USERNAME" --password "$W3_PASSWORD" --branch "$RELEASE_TARGET" --repository "runtime-component-operator" --org "application-stacks" --trigger "rcodocker" --configFile ".ci-orchestrator/runtime-component-operator-build.yml" + pipelineid=$(cat ciorchestrator-submit.id) + echo " waiting on request to build P and Z catalogs" + ./scripts/pipeline/await-ciorchestrator.sh --user "$W3_USERNAME" --password "$W3_PASSWORD" --pipelineId "$pipelineid" + fi + + # Build catalog manifest + make build-manifest-pipeline REGISTRY=${PIPELINE_REGISTRY} IMAGE=${PIPELINE_OPERATOR_IMAGE}-catalog + + + if [[ "$DISABLE_ARTIFACTORY" == "false" ]]; then + echo "Running builds for artifactory repository." + + read -r ARTIFACTORY_REPO_URL_VALUE <<< "$(get_env artifactorybackup | jq -r '.parameters.repository_url' | sed 's:/*$::')" + read -r ARTIFACTORY_USERNAME_VALUE <<< "$(get_env artifactorybackup | jq -r '.parameters.user_id')" + read -r ARTIFACTORY_TOKEN_VALUE <<< "$(get_env artifactorybackup | jq -r '.parameters.token')" + + export ARTIFACTORY_REPO_URL="${ARTIFACTORY_REPO_URL_VALUE#*://}" # Cuts the http(s):// off of the front of the url + export ARTIFACTORY_USERNAME="$ARTIFACTORY_USERNAME_VALUE" + export ARTIFACTORY_TOKEN="$ARTIFACTORY_TOKEN_VALUE" + + # Docker login and setup build configurations + scripts/build/build-initialize.sh + # update Jenkins job to run shallow clone and scripts/build/build-initialize.sh + if [[ "$arch" == "ZXP" ]]; then + echo " Sending request to build P and Z ( artifactory )" + ./scripts/pipeline/request-ciorchestrator.sh --command "make build-operator-pipeline REGISTRY=${ARTIFACTORY_REPO_URL}" --user "$W3_USERNAME" --password "$W3_PASSWORD" --branch "$RELEASE_TARGET" --repository "runtime-component-operator" --org "application-stacks" --trigger "rcodocker" --configFile ".ci-orchestrator/runtime-component-operator-build.yml" + pipelineid=$(cat ciorchestrator-submit.id) + fi + + # Build operator image + make build-operator-pipeline REGISTRY=${ARTIFACTORY_REPO_URL} + if [[ "$arch" == "ZXP" ]]; then + # wait for build ppc64le and s390x images + echo " waiting on request to build P and Z ( artifactory )" + ./scripts/pipeline/await-ciorchestrator.sh --user "$W3_USERNAME" --password "$W3_PASSWORD" --pipelineId "$pipelineid" + fi + # Build operator manifest (after 3 arch operator builds) + make build-manifest-pipeline REGISTRY=${ARTIFACTORY_REPO_URL} IMAGE=${PIPELINE_OPERATOR_IMAGE} + + # Build bundle image + make build-bundle-pipeline REGISTRY=${ARTIFACTORY_REPO_URL} + + # Build catalog image for amd64 first - then p and z + make build-catalog-pipeline REGISTRY=${ARTIFACTORY_REPO_URL} + # Build catalog image + if [[ "$arch" == "ZXP" ]]; then + echo " Sending request to build P and Z catalogs ( artifactory )" + ./scripts/pipeline/request-ciorchestrator.sh --command "make build-catalog-pipeline REGISTRY=${ARTIFACTORY_REPO_URL}" --user "$W3_USERNAME" --password "$W3_PASSWORD" --branch "$RELEASE_TARGET" --repository "runtime-component-operator" --org "application-stacks" --trigger "rcodocker" --configFile ".ci-orchestrator/runtime-component-operator-build.yml" + pipelineid=$(cat ciorchestrator-submit.id) + echo " waiting on request to build P and Z catalogs ( artifactory )" + ./scripts/pipeline/await-ciorchestrator.sh --user "$W3_USERNAME" --password "$W3_PASSWORD" --pipelineId "$pipelineid" + fi + + # Build catalog manifest + make build-manifest-pipeline REGISTRY=${ARTIFACTORY_REPO_URL} IMAGE=${PIPELINE_OPERATOR_IMAGE}-catalog + + echo "Completed pushing to artifactory." + + fi fi - done - - IMAGE=$PIPELINE_REGISTRY/$PIPELINE_OPERATOR_IMAGE-bundle:${RELEASE_TARGET} - DIGEST="$(skopeo inspect docker://$IMAGE | grep Digest | grep -o 'sha[^\"]*')" - echo "Saving artifact bundle-${RELEASE_TARGET} name=$IMAGE digest=$DIGEST" - save_artifact bundle-${RELEASE_TARGET} type=image name="$IMAGE" "digest=$DIGEST" - for i in "${tags[@]}" - do - IMAGE=$PIPELINE_REGISTRY/$PIPELINE_OPERATOR_IMAGE-catalog:$i - DIGEST="$(skopeo inspect docker://$IMAGE | grep Digest | grep -o 'sha[^\"]*')" - { ARCH="$(echo $i | grep -o '\(amd64\|s390x\|ppc64le\)$')" && TYPE="image"; } || { ARCH="amd64" && TYPE="manifest"; } - - if [[ "$TYPE" == "manifest" ]]; then - echo "Saving artifact catalog-$i type=$TYPE name=$IMAGE digest=$DIGEST" - save_artifact catalog-$i type=$TYPE name="$IMAGE" "digest=$DIGEST" + echo "**** Saving Artifacts ****" + if [[ "$arch" == "ZXP" ]]; then + declare -a tags=("${RELEASE_TARGET}" "${RELEASE_TARGET}-amd64" "${RELEASE_TARGET}-ppc64le" "${RELEASE_TARGET}-s390x") else - echo "Saving artifact catalog-$i type=$TYPE name=$IMAGE digest=$DIGEST arch=$ARCH" - save_artifact catalog-$i type=$TYPE name="$IMAGE" "digest=$DIGEST" "arch=$ARCH" + declare -a tags=("${RELEASE_TARGET}" "${RELEASE_TARGET}-amd64") fi - done - - # echo "whitesource scan" - # #source "${COMMONS_PATH}/whitesource/whitesource_unified_agent_scan.sh" - # source ./scripts/pipeline/whitesource_unified_agent_scan.sh + for i in "${tags[@]}" + do + IMAGE=$PIPELINE_REGISTRY/$PIPELINE_OPERATOR_IMAGE:$i + DIGEST="$(skopeo inspect docker://$IMAGE | grep Digest | grep -o 'sha[^\"]*')" + { ARCH="$(echo $i | grep -o '\(amd64\|s390x\|ppc64le\)$')" && TYPE="image"; } || { TYPE="manifest"; } + if [[ "$TYPE" == "manifest" ]]; then + echo "Saving artifact operator-$i type=$TYPE name=$IMAGE digest=$DIGEST" + save_artifact operator-$i type=$TYPE name="$IMAGE" "digest=$DIGEST" + else + echo "Saving artifact operator-$i type=$TYPE name=$IMAGE digest=$DIGEST arch=$ARCH" + save_artifact operator-$i type=$TYPE name="$IMAGE" "digest=$DIGEST" "arch=$ARCH" + fi + done + IMAGE=$PIPELINE_REGISTRY/$PIPELINE_OPERATOR_IMAGE-bundle:${RELEASE_TARGET} + DIGEST="$(skopeo inspect docker://$IMAGE | grep Digest | grep -o 'sha[^\"]*')" + echo "Saving artifact bundle-${RELEASE_TARGET} name=$IMAGE digest=$DIGEST" + save_artifact bundle-${RELEASE_TARGET} type=image name="$IMAGE" "digest=$DIGEST" + for i in "${tags[@]}" + do + IMAGE=$PIPELINE_REGISTRY/$PIPELINE_OPERATOR_IMAGE-catalog:$i + DIGEST="$(skopeo inspect docker://$IMAGE | grep Digest | grep -o 'sha[^\"]*')" + { ARCH="$(echo $i | grep -o '\(amd64\|s390x\|ppc64le\)$')" && TYPE="image"; } || { ARCH="amd64" && TYPE="manifest"; } + if [[ "$TYPE" == "manifest" ]]; then + echo "Saving artifact catalog-$i type=$TYPE name=$IMAGE digest=$DIGEST" + save_artifact catalog-$i type=$TYPE name="$IMAGE" "digest=$DIGEST" + else + echo "Saving artifact catalog-$i type=$TYPE name=$IMAGE digest=$DIGEST arch=$ARCH" + save_artifact catalog-$i type=$TYPE name="$IMAGE" "digest=$DIGEST" "arch=$ARCH" + fi + done + + echo "MEND unified agent scan" + chmod +x "${COMMONS_PATH}/whitesource/whitesource_unified_agent_scan.sh" + source "${COMMONS_PATH}/whitesource/whitesource_unified_agent_scan.sh" + + ## Perform lint + IMAGE="${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}-bundle:${RELEASE_TARGET}" + DIGEST="$(skopeo inspect docker://$IMAGE | grep Digest | grep -o 'sha[^\"]*')" + BUNDLE_IMAGE_WITH_DIGEST="${IMAGE}@${DIGEST}" + ./scripts/pipeline/static-linter-scan.sh --git-token $(get_env git-token) --bundle-image $BUNDLE_IMAGE_WITH_DIGEST --static-linter-version $(get_env static-linter-version) sign-artifact: abort_on_failure: false @@ -296,45 +406,41 @@ acceptance-test: image: icr.io/continuous-delivery/pipeline/pipeline-base-ubi:3.12 script: | #!/usr/bin/env bash - echo "acceptance-test" + + echo $STAGE + PERIODIC_SCAN=$(get_env periodic-rescan) PERIODIC_SCAN="$(echo "$PERIODIC_SCAN" | tr '[:upper:]' '[:lower:]')" + if [[ ! -z "$PERIODIC_SCAN" && "$PERIODIC_SCAN" != "false" && "$PERIODIC_SCAN" != "no" ]]; then echo "Skipping acceptance-test. This is a periodic run that is only meant to produce CVE information." exit 0 fi - SKIP_KIND_E2E_TEST=$(get_env SKIP_KIND_E2E_TEST) - SKIP_KIND_E2E_TEST="$(echo "$SKIP_KIND_E2E_TEST" | tr '[:upper:]' '[:lower:]')" - if [[ ! -z "$SKIP_KIND_E2E_TEST" && "$SKIP_KIND_E2E_TEST" != "false" && "$SKIP_KIND_E2E_TEST" != "no" ]]; then - echo "Skipping acceptance-test, SKIP_KIND_E2E_TEST=$SKIP_KIND_E2E_TEST" + + SKIP_ACCEPTANCE_TEST=$(get_env SKIP_ACCEPTANCE_TEST) + SKIP_ACCEPTANCE_TEST="$(echo "$SKIP_ACCEPTANCE_TEST" | tr '[:upper:]' '[:lower:]')" + if [[ ! -z "$SKIP_ACCEPTANCE_TEST" && "$SKIP_ACCEPTANCE_TEST" != "false" && "$SKIP_ACCEPTANCE_TEST" != "no" ]]; then + echo "Skipping acceptance-test, SKIP_ACCEPTANCE_TEST=$SKIP_ACCEPTANCE_TEST" exit 0 fi + + export intranetId_USR=$(get_env ebc_id) + export intranetId_PSW=$(get_env ebc_pw) + export ebcEnvironment=prod + + cd scripts/pipeline + rm -rf .git + git clone https://$(get_env git-token)@github.ibm.com/elastic-build-cloud/ebc-gateway-http.git + + export arch=$(get_env architecture) - # Download and configure golang - GO_VERSION=$(get_env go-version) - if [[ -z "${GO_VERSION}" ]]; then - GO_VERSION="$(grep '^go [0-9]\+.[0-9]\+' go.mod | cut -d ' ' -f 2)" + if [[ "$arch" == "ZXP" ]]; then + source runTest.sh Z + source runTest.sh X + source runTest.sh P + else + source runTest.sh X fi - export GO_VERSION - # OCP test - export PIPELINE_USERNAME=$(get_env ibmcloud-api-user) - export PIPELINE_PASSWORD=$(get_env ibmcloud-api-key-staging) - export PIPELINE_REGISTRY=$(get_env pipeline-registry) - export PIPELINE_OPERATOR_IMAGE=$(get_env pipeline-operator-image) - export DOCKER_USERNAME=$(get_env docker-username) - export DOCKER_PASSWORD=$(get_env docker-password) - export CLUSTER_URL=$(get_env test-cluster-url) - export CLUSTER_USER=$(get_env test-cluster-user kubeadmin) - export CLUSTER_TOKEN=$(get_env test-cluster-token) - export RELEASE_TARGET=$(get_env branch) - export DEBUG_FAILURE=$(get_env debug-failure) - # Kind test - export FYRE_USER=$(get_env fyre-user) - export FYRE_KEY=$(get_env fyre-key) - export FYRE_PASS=$(get_env fyre-pass) - export FYRE_PRODUCT_GROUP_ID=$(get_env fyre-product-group-id) - scripts/acceptance-test.sh - scan-artifact: @@ -342,38 +448,7 @@ scan-artifact: image: icr.io/continuous-delivery/pipeline/pipeline-base-ubi:3.12 script: | #!/usr/bin/env bash - # echo "twistlock-scan" - # ./scripts/pipeline/twistlock-scan.sh - # echo "VA scan" - # . scripts/pipeline/va_scan - # if which list_artifacts >/dev/null; then - # list_artifacts | while IFS= read -r artifact; do - # image="$(load_artifact "$artifact" "name")" - # type="$(load_artifact "$artifact" "type")" - # digest="$(load_artifact "$artifact" "digest")" - # name="$(echo "$artifact" | awk '{print $1}')" - # if [[ "$type" == "image" ]]; then - # if [[ "$image" == *"icr.io"* ]]; then - # echo "Starting VA scan for $image" - # start_va_scan "$name" "$image" "$digest" - # else - # echo "Skipping VA scan for $image" - # fi - # fi - # done - # fi - - # echo "aqua scan" - # # install docker - # curl -fsSL https://get.docker.com -o get-docker.sh - # sudo sh get-docker.sh - # # get aqua scan executables - # git clone https://$(get_env git-token)@github.ibm.com/CICD-CPP/cpp-pipelines.git - # chmod -R +x cpp-pipelines - # # setup and execute aqua scan - # cd cpp-pipelines - # export CUSTOM_SCRIPTS_PATH=/workspace/app/one-pipeline-config-repo/cpp-pipelines - # ./commons/aqua/aqua-local-scan + # ========== Security Scanner ========== ./scripts/pipeline/ci_to_secure_pipeline_scan.sh diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index a86877a7d..000000000 --- a/.travis.yml +++ /dev/null @@ -1,106 +0,0 @@ -dist: focal -language: go -go: - - 1.19.x - -go_import_path: github.com/application-stacks/runtime-component-operator - -services: - - docker - -before_install: - - sudo apt-get update - -before_script: - - go mod vendor - -stages: - - name: unit-test - # Builds are split into 'e2e-test' and 'build' to allow e2e tests to run first. If e2e fails, don't bother - # building and pushing the images for the other architectures. - - name: e2e-test - if: (branch = main OR tag =~ ^v) AND fork = false AND type != cron - - name: minikube-e2e-test - if: (branch = main OR tag =~ ^v) AND fork = false AND type != cron - - name: build - if: (branch = main OR tag =~ ^v) AND fork = false AND type != pull_request AND type != cron - - name: build-manifest - if: (branch = main OR tag =~ ^v) AND fork = false AND type != pull_request AND type != cron - # Releases are rebuilt weekly via a Travis cron job - - name: rebuild-e2e-test - if: branch = main AND fork = false AND type = cron - - name: rebuild-releases - if: branch = main AND fork = false AND type = cron - - name: rebuild-bundles - if: branch = main AND fork = false AND type = cron - -jobs: - include: - - name: Unit testing - stage: unit-test - script: make unit-test - - name: Build image on amd64 and test - stage: e2e-test - os: linux - arch: amd64 - script: travis_wait 45 make setup test-e2e || travis_terminate 1 - - name: Build image on amd64 and test on Minikube - stage: minikube-e2e-test - os: linux - arch: amd64 - script: travis_wait 45 make setup minikube-test-e2e || travis_terminate 1 - - name: Build image on ppc64le - stage: build - os: linux - arch: ppc64le - script: make build-releases - - name: Build image on s390x - stage: build - os: linux - arch: s390x - script: make build-releases - - name: Build image and bundle on amd64 - stage: build - os: linux - arch: amd64 - before_install: - - sudo apt-get install -qq -y software-properties-common uidmap - - make install-podman - - make install-opm - script: make build-releases && make build-manifest && make bundle-releases - ## in case there were concurrency issues with building manifest lists - ## in previous steps, create FAT manifests one last time - - name: Verify manifest lists - stage: build-manifest - script: make build-manifest - # Build all non-ignored releases - - name: Build image on amd64 and test - stage: rebuild-e2e-test - os: linux - arch: amd64 - script: travis_wait 45 make setup test-e2e RELEASE_TARGET="releases" || travis_terminate 1 - - name: Build releases on ppc64le - stage: rebuild-releases - os: linux - arch: ppc64le - script: make build-releases RELEASE_TARGET="releases" - - name: Build releases on s390x - stage: rebuild-releases - os: linux - arch: s390x - script: make build-releases RELEASE_TARGET="releases" - - name: Build releases on amd64 - stage: rebuild-releases - os: linux - arch: amd64 - script: make build-releases RELEASE_TARGET="releases" - # Bundle all non-ignored releases - - name: Bundle releases - stage: rebuild-bundles - os: linux - arch: amd64 - before_install: - - sudo apt-get install -qq -y software-properties-common uidmap - - make install-podman - - make install-opm - script: make build-manifest RELEASE_TARGET="releases" && make bundle-releases RELEASE_TARGET="releases" diff --git a/Dockerfile b/Dockerfile index e10c904f3..1db7aad6d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,11 +5,9 @@ WORKDIR /workspace # Copy the Go Modules manifests COPY go.mod go.mod COPY go.sum go.sum -COPY vendor/ vendor/ - # cache deps before building and copying source so that we don't need to re-download as much # and so that source changes don't invalidate our downloaded layer -#RUN go mod download +RUN go mod download # Copy the go source COPY main.go main.go @@ -19,23 +17,40 @@ COPY common/ common/ COPY utils/ utils/ # Build -RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -ldflags="-s -w" -mod vendor -a -o manager main.go - +RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -ldflags="-s -w" -a -o manager main.go -#Build final image +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details FROM registry.access.redhat.com/ubi8/ubi-minimal:latest -LABEL vendor="Runtime Component Community" \ - name="Runtime Component Operator" \ - version="0.8.1" \ - summary="Image for Runtime Component Operator" \ - description="This image contains the controller for Runtime Component Operator. See https://github.com/application-stacks/runtime-component-operator" - -COPY LICENSE /licenses/ +ARG USER_ID=65532 +ARG GROUP_ID=65532 + +ARG VERSION_LABEL=1.0.0 +ARG RELEASE_LABEL=XX +ARG VCS_REF=0123456789012345678901234567890123456789 +ARG VCS_URL="https://github.com/application-stacks/runtime-component-operator" +ARG NAME="runtime-component-operator" +ARG SUMMARY="Runtime Component Operator" +ARG DESCRIPTION="This image contains the controller for Runtime Component Operator." + +LABEL name=$NAME \ + vendor="Runtime Component Community" \ + version=$VERSION_LABEL \ + release=$RELEASE_LABEL \ + description=$DESCRIPTION \ + summary=$SUMMARY \ + io.k8s.display-name=$SUMMARY \ + io.k8s.description=$DESCRIPTION \ + vcs-type=git \ + vcs-ref=$VCS_REF \ + vcs-url=$VCS_URL \ + url=$VCS_URL +COPY LICENSE /licenses/ WORKDIR / COPY --from=builder /workspace/manager . -USER 65532:65532 +USER ${USER_ID}:${GROUP_ID} ENTRYPOINT ["/manager"] diff --git a/Makefile b/Makefile index c1cfffa3b..5014dfa5f 100644 --- a/Makefile +++ b/Makefile @@ -1,25 +1,49 @@ -# Current Operator version. +# VERSION defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) VERSION ?= 1.0.0 OPERATOR_SDK_RELEASE_VERSION ?= v1.24.0 # CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "preview,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=preview,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="preview,fast,stable") CHANNELS ?= beta2 ifneq ($(origin CHANNELS), undefined) BUNDLE_CHANNELS := --channels=$(CHANNELS) endif # DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") DEFAULT_CHANNEL ?= beta2 ifneq ($(origin DEFAULT_CHANNEL), undefined) BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) endif BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) +# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. +# This variable is used to construct full image tags for bundle and catalog images. +# +# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both +# openliberty.io/op-test-bundle:$VERSION and openliberty.io/op-test-catalog:$VERSION. +IMAGE_TAG_BASE ?= icr.io/appcafe/runtime-component-operator + # OPERATOR_IMAGE defines the docker.io namespace and part of the image name for remote images. -OPERATOR_IMAGE ?= applicationstacks/operator +OPERATOR_IMAGE ?= icr.io/appcafe/runtime-component-operator # BUNDLE_IMG defines the image:tag used for the bundle. -BUNDLE_IMG ?= applicationstacks/operator:bundle-daily +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:daily + +# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). +# These images MUST exist in a registry and be pull-able. +BUNDLE_IMGS ?= $(BUNDLE_IMG) # BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) @@ -32,27 +56,47 @@ ifeq ($(USE_IMAGE_DIGESTS), true) BUNDLE_GEN_FLAGS += --use-image-digests endif -# Image URL to use all building/pushing image targets. -IMG ?= applicationstacks/operator:daily +# Image URL to use all building/pushing image targets +IMG ?= icr.io/appcafe/runtime-component-operator:daily -# The image tag given to the resulting catalog image. -CATALOG_IMG ?= applicationstacks/operator:catalog-daily +# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) + +# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. +ifneq ($(origin CATALOG_BASE_IMG), undefined) +FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) +endif PUBLISH_REGISTRY=docker.io -PIPELINE_REGISTRY ?= cp.stg.icr.io -PIPELINE_REGISTRY_NAMESPACE ?= cp -PIPELINE_OPERATOR_IMAGE ?= ${PIPELINE_REGISTRY_NAMESPACE}/rco-operator # Type of release. Can be "daily", "releases", or a release tag. RELEASE_TARGET := $(or ${RELEASE_TARGET}, ${TRAVIS_TAG}, daily) -# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set). +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin else GOBIN=$(shell go env GOBIN) endif +# Setting SHELL to bash allows bash commands to be executed by recipes. +# This is a requirement for 'setup-envtest.sh' in the test target. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +CREATEDAT ?= AUTO +ifeq ($(CREATEDAT), AUTO) +CREATEDAT := $(shell date +%Y-%m-%dT%TZ) +endif + +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +CRD_OPTIONS ?= "crd:crdVersions=v1,generateEmbeddedObjectMeta=true" + +# Produce files under deploy/kustomize/daily with default namespace +KUSTOMIZE_NAMESPACE = default +KUSTOMIZE_IMG = cp.stg.icr.io/cp/runtime-component-operator:main + # Use docker if available. Otherwise default to podman. # Override choice by setting CONTAINER_COMMAND CHECK_DOCKER_RC=$(shell docker -v > /dev/null 2>&1; echo $$?) @@ -70,88 +114,58 @@ else CONTAINER_COMMAND ?= docker endif -# Setting SHELL to bash allows bash commands to be executed by recipes. -# This is a requirement for 'setup-envtest.sh' in the test target. -# Options are set to exit when a recipe line exits non-zero or a piped command fails. -SHELL = /usr/bin/env bash -o pipefail -.SHELLFLAGS = -ec - -CREATEDAT ?= AUTO -ifeq ($(CREATEDAT), AUTO) -CREATEDAT := $(shell date +%Y-%m-%dT%TZ) -endif - -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion). -CRD_OPTIONS ?= "crd:crdVersions=v1,generateEmbeddedObjectMeta=true" - -# Produce files under deploy/kustomize/daily with default namespace. -KUSTOMIZE_NAMESPACE = default - .PHONY: all -all: manager +all: build ##@ General +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + .PHONY: help help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) ##@ Setup -## Location to install dependencies to. +## Location to install dependencies to LOCALBIN ?= $(shell pwd)/bin $(LOCALBIN): mkdir -p $(LOCALBIN) +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen # find or download controller-gen # download controller-gen if necessary -CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen .PHONY: controller-gen controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. $(CONTROLLER_GEN): $(LOCALBIN) test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.2 KUSTOMIZE ?= $(LOCALBIN)/kustomize -KUSTOMIZE_VERSION ?= 3.8.7 -KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/release-kustomize-v3.8/hack/install_kustomize.sh" +# TODO iain - This was 3.8.7 in previous version +KUSTOMIZE_VERSION ?= 4.5.5 +KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/kustomize/v${KUSTOMIZE_VERSION}/hack/install_kustomize.sh" .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. $(KUSTOMIZE): $(LOCALBIN) test -s $(LOCALBIN)/kustomize || curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s $(KUSTOMIZE_VERSION) $(LOCALBIN) -.PHONY: opm -OPM = ./bin/opm -opm: ## Download opm locally if necessary. -ifeq (,$(wildcard $(OPM))) -ifeq (,$(shell which opm 2>/dev/null)) - @{ \ - set -e ;\ - mkdir -p $(dir $(OPM)) ;\ - OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ - curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\ - chmod +x $(OPM) ;\ - } -else -OPM = $(shell which opm) -endif -endif - -# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). -# These images MUST exist in a registry and be pull-able. -BUNDLE_IMGS ?= $(BUNDLE_IMG) - -# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). -CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) - -# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. -ifneq ($(origin CATALOG_BASE_IMG), undefined) -FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) -endif - .PHONY: setup -setup: ## Install Operator SDK if necessary. +setup: ## Ensure Operator SDK is installed. ./scripts/installers/install-operator-sdk.sh ${OPERATOR_SDK_RELEASE_VERSION} +.PHONY: setup-go +setup-go: ## Ensure Go is installed. + ./scripts/installers/install-go.sh ${GO_RELEASE_VERSION} + .PHONY: setup-manifest setup-manifest: ## Install manifest tool. ./scripts/installers/install-manifest-tool.sh @@ -168,12 +182,10 @@ install-opm: .PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. - $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." - + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." .PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases - rm -f config/manifests/patches/csvAnnotations.yaml.bak .PHONY: bundle bundle: manifests setup kustomize ## Generate bundle manifests and metadata, then validate generated files. @@ -182,18 +194,29 @@ bundle: manifests setup kustomize ## Generate bundle manifests and metadata, the operator-sdk generate kustomize manifests -q cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle $(BUNDLE_GEN_FLAGS) - + ./scripts/csv_description_update.sh update_csv + +# $(KUSTOMIZE) build config/kustomize/crd -o internal/deploy/kustomize/daily/base/websphere-liberty-crd.yaml $(KUSTOMIZE) build config/kustomize/crd -o deploy/kustomize/daily/base/runtime-component-crd.yaml cd config/kustomize/operator && $(KUSTOMIZE) edit set namespace $(KUSTOMIZE_NAMESPACE) +# $(KUSTOMIZE) build config/kustomize/operator -o internal/deploy/kustomize/daily/base/websphere-liberty-deployment.yaml $(KUSTOMIZE) build config/kustomize/operator -o deploy/kustomize/daily/base/runtime-component-operator.yaml - +# This does two replacements +# ${IMG} becomes ${KUSTOMIZE_IMG} +# serviceAccountName: controller-manager becomes serviceAccountName: websphere-liberty-controller-manager +# The second one looks unneeded, as the equivalent is already in the deployment file +# TODO iain +# Not sure about the first +# Probably +# sed -i.bak "s,${IMG},${KUSTOMIZE_IMG},g;s,serviceAccountName: controller-manager,serviceAccountName: websphere-liberty-controller-manager,g" internal/deploy/kustomize/daily/base/websphere-liberty-deployment.yaml + sed -i.bak "s,${IMG},${KUSTOMIZE_IMG},g" deploy/kustomize/daily/base/runtime-component-operator.yaml +# TODO iain - I think these are included in the deployment file for RCO +# $(KUSTOMIZE) build config/kustomize/roles -o internal/deploy/kustomize/daily/base/websphere-liberty-roles.yaml + mv config/manifests/patches/csvAnnotations.yaml.bak config/manifests/patches/csvAnnotations.yaml +# rm internal/deploy/kustomize/daily/base/websphere-liberty-deployment.yaml.bak operator-sdk bundle validate ./bundle -.PHONY: kustomize-build -kustomize-build: manifests kustomize ## Generate build controller, and roles & role bindings under deploy/kustomize directory. - cd deploy/kustomize/daily/base && $(KUSTOMIZE) edit set namespace ${KUSTOMIZE_NAMESPACE} - .PHONY: fmt fmt: ## Run go fmt against code. go fmt ./... @@ -202,19 +225,20 @@ fmt: ## Run go fmt against code. vet: ## Run go vet against code. go vet ./... -ENVTEST_ASSETS_DIR = $(shell pwd)/testbin +ENVTEST_ASSETS_DIR=$(shell pwd)/testbin .PHONY: test -test: generate fmt vet manifests ## Run tests. - mkdir -p $(ENVTEST_ASSETS_DIR) - test -f $(ENVTEST_ASSETS_DIR)/setup-envtest.sh || curl -sSLo $(ENVTEST_ASSETS_DIR)/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.6.3/hack/setup-envtest.sh - source $(ENVTEST_ASSETS_DIR)/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out +test: manifests generate fmt vet ## Run tests. + mkdir -p ${ENVTEST_ASSETS_DIR} + test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.2/hack/setup-envtest.sh + source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out .PHONY: unit-test -unit-test: ## Run unit tests. +unit-test: ## Run unit tests +# go test -v -mod=vendor -tags=unit github.com/WASdev/websphere-liberty-operator/... go test -v -mod=vendor -tags=unit github.com/application-stacks/runtime-component-operator/... .PHONY: run -run: generate fmt vet manifests ## Run a controller against the configured Kubernetes cluster in ~/.kube/config from your host. +run: manifests generate fmt vet ## Run a controller against the configured Kubernetes cluster in ~/.kube/config from your host. go run ./main.go ##@ Deployment @@ -237,20 +261,18 @@ deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in $(KUSTOMIZE) build config/default | kubectl apply -f - .PHONY: undeploy -undeploy: manifests kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} +undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - - ##@ Build -.PHONY: manager -manager: generate fmt vet ## Build manager binary. +.PHONY: build +build: generate fmt vet ## Build manager binary. go build -o bin/manager main.go .PHONY: docker-login -docker-login: ## Log in to a Docker registry. - docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" +docker-login: + docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" .PHONY: docker-build docker-build: test ## Build docker image with the manager. @@ -258,7 +280,7 @@ docker-build: test ## Build docker image with the manager. .PHONY: docker-push docker-push: ## Push docker image with the manager. - $(CONTAINER_COMMAND) push $(PODMAN_SKIP_TLS_VERIFY) ${IMG} + $(CONTAINER_COMMAND) push $(PODMAN_SKIP_TLS_VERIFY) ${IMG} .PHONY: bundle-build bundle-build: ## Build the bundle image. @@ -266,50 +288,79 @@ bundle-build: ## Build the bundle image. .PHONY: bundle-push bundle-push: ## Push the bundle image. - $(CONTAINER_COMMAND) push $(PODMAN_SKIP_TLS_VERIFY) "${BUNDLE_IMG}" + $(CONTAINER_COMMAND) push $(PODMAN_SKIP_TLS_VERIFY) $(BUNDLE_IMG) -build-manifest: setup-manifest - ./scripts/build-manifest.sh --image "${PUBLISH_REGISTRY}/${OPERATOR_IMAGE}" --target "${RELEASE_TARGET}" +.PHONY: catalog-push +catalog-push: ## Push a catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) + +# go-get-tool will 'go get' any package $2 and install it to $1. +PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) +define go-get-tool +@[ -f $(1) ] || { \ +set -e ;\ +TMP_DIR=$$(mktemp -d) ;\ +cd $$TMP_DIR ;\ +go mod init tmp ;\ +echo "Downloading $(2)" ;\ +GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ +rm -rf $$TMP_DIR ;\ +} +endef + +.PHONY: opm +OPM = ./bin/opm +opm: ## Download opm locally if necessary. +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif + +# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. +# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: +# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator +.PHONY: catalog-build +catalog-build: opm ## Build a catalog image. + $(OPM) index add $(SKIP_TLS_VERIFY) --container-tool $(CONTAINER_COMMAND) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) --permissive kind-e2e-test: ./scripts/e2e-kind.sh --test-tag "${TRAVIS_BUILD_NUMBER}" -build-pipeline-manifest: setup-manifest - ./scripts/build-manifest.sh -u "${PIPELINE_USERNAME}" -p "${PIPELINE_PASSWORD}" --registry "${PIPELINE_REGISTRY}" --image "${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}" --target "${RELEASE_TARGET}" -bundle-pipeline: - ./scripts/bundle-release.sh -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" --registry "${PIPELINE_REGISTRY}" --prod-image "${PIPELINE_PRODUCTION_IMAGE}" --image "${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}" --release "${RELEASE_TARGET}" +build-manifest: setup-manifest + ./scripts/build/build-manifest.sh --registry "${PUBLISH_REGISTRY}" --image "${OPERATOR_IMAGE}" --tag "${RELEASE_TARGET}" -catalog-pipeline-build: opm ## Build a catalog image. - ./scripts/catalog-build.sh -n "v${OPM_VERSION}" -b "${REDHAT_BASE_IMAGE}" -o "${OPM}" --container-tool "docker" -i "${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}-bundle:${RELEASE_TARGET}" -p "${PIPELINE_PRODUCTION_IMAGE}-bundle" -a "${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}-catalog:${RELEASE_TARGET}" -t "${PWD}/operator-build" -v "${VERSION}" +build-operator-pipeline: + ./scripts/build/build-operator.sh --registry "${REGISTRY}" --image "${PIPELINE_OPERATOR_IMAGE}" --tag "${RELEASE_TARGET}" -catalog-pipeline-push: ## Push a catalog image. - $(MAKE) docker-push IMG="${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}-catalog:${RELEASE_TARGET}" +build-manifest-pipeline: + ./scripts/build/build-manifest.sh --registry "${REGISTRY}" --image "${IMAGE}" --tag "${RELEASE_TARGET}" -minikube-test-e2e: - ./scripts/e2e-minikube.sh --test-tag "${TRAVIS_BUILD_NUMBER}" +build-bundle-pipeline: + ./scripts/build/build-bundle.sh --prod-image "${PIPELINE_PRODUCTION_IMAGE}" --registry "${REGISTRY}" --image "${PIPELINE_OPERATOR_IMAGE}" --tag "${RELEASE_TARGET}" + +build-catalog-pipeline: opm ## Build a catalog image. + ./scripts/build/build-catalog.sh -n "v${OPM_VERSION}" -b "${REDHAT_BASE_IMAGE}" -o "${OPM}" --container-tool "docker" -r "${REGISTRY}" -i "${PIPELINE_OPERATOR_IMAGE}-bundle:${RELEASE_TARGET}" -p "${PIPELINE_PRODUCTION_IMAGE}-bundle" -a "${PIPELINE_OPERATOR_IMAGE}-catalog:${RELEASE_TARGET}" -t "${PWD}/operator-build" -v "${VERSION}" test-e2e: ./scripts/e2e-release.sh --registry-name default-route --registry-namespace openshift-image-registry \ --test-tag "${TRAVIS_BUILD_NUMBER}" --target "${RELEASE_TARGET}" test-pipeline-e2e: - ./scripts/pipeline/fyre-e2e.sh -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" \ + ./scripts/pipeline/ocp-cluster-e2e.sh -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" \ --cluster-url "${CLUSTER_URL}" --cluster-user "${CLUSTER_USER}" --cluster-token "${CLUSTER_TOKEN}" \ --registry-name "${PIPELINE_REGISTRY}" --registry-image "${PIPELINE_OPERATOR_IMAGE}" \ --registry-user "${PIPELINE_USERNAME}" --registry-password "${PIPELINE_PASSWORD}" \ - --test-tag "${TRAVIS_BUILD_NUMBER}" --release "${RELEASE_TARGET}" --channel "${DEFAULT_CHANNEL}" - -build-releases: - ./scripts/build-releases.sh --image "${PUBLISH_REGISTRY}/${OPERATOR_IMAGE}" --target "${RELEASE_TARGET}" - -build-pipeline-releases: - ./scripts/build-releases.sh -u "${PIPELINE_USERNAME}" -p "${PIPELINE_PASSWORD}" --registry "${PIPELINE_REGISTRY}" --image "${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}" --target "${RELEASE_TARGET}" - -bundle-releases: - ./scripts/bundle-releases.sh --image "${PUBLISH_REGISTRY}/${OPERATOR_IMAGE}" --target "${RELEASE_TARGET}" - -bundle-pipeline-releases: - ./scripts/bundle-releases.sh -u "${PIPELINE_USERNAME}" -p "${PIPELINE_PASSWORD}" --registry "${PIPELINE_REGISTRY}" --image "${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}" --target "${RELEASE_TARGET}" + --test-tag "${TRAVIS_BUILD_NUMBER}" --release "${RELEASE_TARGET}" --channel "${DEFAULT_CHANNEL}" \ + --install-mode "${INSTALL_MODE}" --architecture "${ARCHITECTURE}" bundle-build-podman: podman build -f bundle.Dockerfile -t "${BUNDLE_IMG}" @@ -318,42 +369,10 @@ bundle-push-podman: podman push --format=docker "${BUNDLE_IMG}" build-catalog: - opm index add --bundles "${BUNDLE_IMG}" --tag "${CATALOG_IMG}" -c docker - -push-catalog: - docker push "${CATALOG_IMG}" - -push-pipeline-catalog: - docker push "${CATALOG_IMG}" - -# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. -# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: -# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator -.PHONY: catalog-build -catalog-build: opm ## Build a catalog image. - $(OPM) index add $(SKIP_TLS_VERIFY) --container-tool $(CONTAINER_COMMAND) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) --permissive + opm index add --bundles "${BUNDLE_IMG}" --tag "${CATALOG_IMG}" -# Push the catalog image. -.PHONY: catalog-push -catalog-push: ## Push a catalog image. - $(MAKE) docker-push IMG=$(CATALOG_IMG) +push-catalog: docker-login + podman push --format=docker "${CATALOG_IMG}" dev: - ./scripts/dev.sh all - -## Multi-Arch changes -.PHONY: setup-go -setup-go: ## Ensure Go is installed. - ./scripts/installers/install-go.sh ${GO_RELEASE_VERSION} - -build-operator-pipeline: - ./scripts/build/build-operator.sh --registry "${REGISTRY}" --image "${PIPELINE_OPERATOR_IMAGE}" --tag "${RELEASE_TARGET}" - -build-manifest-pipeline: - ./scripts/build/build-manifest.sh --registry "${REGISTRY}" --image "${IMAGE}" --tag "${RELEASE_TARGET}" - -build-bundle-pipeline: - ./scripts/build/build-bundle.sh --prod-image "${PIPELINE_PRODUCTION_IMAGE}" --registry "${REGISTRY}" --image "${PIPELINE_OPERATOR_IMAGE}" --tag "${RELEASE_TARGET}" - -build-catalog-pipeline: opm ## Build a catalog image. - ./scripts/build/build-catalog.sh -n "v${OPM_VERSION}" -b "${REDHAT_BASE_IMAGE}" -o "${OPM}" --container-tool "docker" -r "${REGISTRY}" -i "${PIPELINE_OPERATOR_IMAGE}-bundle:${RELEASE_TARGET}" -p "${PIPELINE_PRODUCTION_IMAGE}-bundle" -a "${PIPELINE_OPERATOR_IMAGE}-catalog:${RELEASE_TARGET}" -t "${PWD}/operator-build" -v "${VERSION}" + ./scripts/dev.sh all -channel ${DEFAULT_CHANNEL} diff --git a/bundle/manifests/runtime-component.clusterserviceversion.yaml b/bundle/manifests/runtime-component.clusterserviceversion.yaml index 4f39d326a..3046d5f6c 100644 --- a/bundle/manifests/runtime-component.clusterserviceversion.yaml +++ b/bundle/manifests/runtime-component.clusterserviceversion.yaml @@ -836,49 +836,37 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text version: v1beta2 + displayName: Runtime Component description: | This advanced Operator is capable of deploying any runtime component image with consistent, production-grade QoS. It enables enterprise architects to govern the way their applications get deployed & managed in the cluster, while dramatically reducing the learning curve for developers to deploy into Kubernetes - allowing them to focus on writing the code! Here are some key features: - #### Application Lifecyle You can deploy your runtime component container by either pointing to a container image, or an OpenShift ImageStream. When using an ImageStream the Operator will watch for any updates and will re-deploy the modified image. - #### Custom RBAC This Operator is capable of using a custom ServiceAccount from the caller, allowing it to follow RBAC restrictions. By default it creates a ServiceAccount if one is not specified, which can also be bound with specific roles. - #### Environment Configuration You can configure a variety of artifacts with your deployment, such as: labels, annotations, and environment variables from a ConfigMap, a Secret or a value. - #### Routing Expose your application to external users via a single toggle to create a Route on OpenShift or an Ingress on other Kubernetes environments. Advanced configuration, such as TLS settings, are also easily enabled. Expiring Route certificates are re-issued. - #### High Availability via Horizontal Pod Autoscaling Run multiple instances of your application for high availability. Either specify a static number of replicas or easily configure horizontal auto scaling to create (and delete) instances based on resource consumption. - #### Persistence and advanced storage Enable persistence for your application by specifying simple requirements: just tell us the size of the storage and where you would like it to be mounted and We will create and manage that storage for you. This toggles a StatefulSet resource instead of a Deployment resource, so your container can recover transactions and state upon a pod restart. We offer an advanced mode where the user specifies a built-in PersistentVolumeClaim, allowing them to configure many details of the persistent volume, such as its storage class and access mode. - #### Service Binding Your runtime components can expose services by a simple toggle. We take care of the heavy lifting such as creating kubernetes Secrets with information other services can use to bind. We also keep the bindable information synchronized, so your applications can dynamically reconnect to its required services without any intervention or interruption. - #### Exposing metrics to Prometheus The Runtime Component Operator exposes the runtime container's metrics via the [Prometheus Operator](https://operatorhub.io/operator/prometheus). Users can pick between a basic mode, where they simply specify the label that Prometheus is watching to scrape the metrics from the container, or they can specify the full `ServiceMonitor` spec embedded into the RuntimeComponent's `spec.monitoring` key controlling things like the poll internal and security credentials. - #### Easily mount logs and transaction directories If you need to mount the logs and transaction data from your runtime component to an external volume such as NFS (or any storage supported in your cluster), simply add the following (customizing the folder location and size) to your RuntimeComponent CR: ``` storage: size: 2Gi mountPath: "/logs" ``` - #### Integration with OpenShift Serverless Deploy your serverless runtime component using a single toggle. The Operator will convert all of its generated resources into [Knative](https://knative.dev) resources, allowing your pod to automatically scale to 0 when it is idle. - #### Integration with OpenShift's Topology UI We set the corresponding labels to support OpenShift's Developer Topology UI, which allows you to visualize your entire set of deployments and how they are connected. - See our [**documentation**](https://github.com/application-stacks/runtime-component-operator/tree/main/doc/) for more information. - displayName: Runtime Component icon: - base64data: iVBORw0KGgoAAAANSUhEUgAAAY8AAAGwCAYAAABRtumfAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAaGVYSWZNTQAqAAAACAAEAQYAAwAAAAEAAgAAARIAAwAAAAEAAQAAASgAAwAAAAEAAgAAh2kABAAAAAEAAAA+AAAAAAADoAEAAwAAAAEAAQAAoAIABAAAAAEAAAGPoAMABAAAAAEAAAGwAAAAAIncb1sAAALkaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA1LjQuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICAgICA8dGlmZjpDb21wcmVzc2lvbj4xPC90aWZmOkNvbXByZXNzaW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8ZXhpZjpQaXhlbFlEaW1lbnNpb24+NDMyPC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6Q29sb3JTcGFjZT4xPC9leGlmOkNvbG9yU3BhY2U+CiAgICAgICAgIDxleGlmOlBpeGVsWERpbWVuc2lvbj4zOTk8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KTuacxQAAIWJJREFUeAHt3XusLVdZAPCWXkof0AcgBAsFRUUTRAWBBiivAkqBFijQEiPgI4I8asD4hwYRjQgxon+YCARR4z/QIi0PLQ+hlIJGTBRUoAgJSIGWtgql9AGlcP0+7hnYPZxz7p2916yZWfNbyXfPvefsWY/f2nd/55uZfc5hh2kECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgfkLHD7/Jay1glz3y9Y60kEECBBoQyBfB1++7lL2rXvgzI97Rcz/t2e+BtMnQIDApgK3RAd/uE4nt1nnoAaO2d/AGiyBAAECowksNXmMBm5gAgQItCAgebSwi9ZAgACBygKSR2VwwxEgQKAFAcmjhV20BgIECFQWkDwqgxuOAAECLQhIHi3sojUQIECgsoDkURnccAQIEGhBQPJoYRetgQABApUFJI/K4IYjQIBACwKSRwu7aA0ECBCoLCB5VAY3HAECBFoQkDxa2EVrIECAQGUByaMyuOEIECDQgoDk0cIuWgMBAgQqCyz193m8MZyfHfHNyt6GI0CAwKYCR0YH+Yuc7rZpR5scv9Tk8bFAu/smcI4lQIDAiAJ3ibH/MeInI0b5jbBOW424+4YmQIDAmgJXx3EPibhizeM3Pkzy2JhQBwQIEBhF4IYYdbRT75LHKHtuUAIECBQRuKZIL2t0InmsgeYQAgQILF1A8lj6M8D6CRAgsIbAlO+2ytvR3hpxvzXW5RACBAi0IJB3Ur0v4llTW8yUk8cdAutREUdNDc18CBAgUFHg6THW5JKH01YVnwGGIkCAQCsCkkcrO2kdBAgQqCggeVTENhQBAgRaEZA8WtlJ6yBAgEBFAcmjIrahCBAg0IqA5NHKTloHAQIEKgpIHhWxDUWAAIFWBCSPVnbSOggQIFBRQPKoiG0oAgQItCIgebSyk9ZBgACBigKSR0VsQxEgQKAVAcmjlZ20DgIECFQUkDwqYhuKAAECrQhIHq3spHUQIECgooDkURHbUAQIEGhFYOrJY+rza+V5YB0ECBDoJTDlXwb1lVjJH0c8oNeKPJgAAQLjC1wVUzgtIn+p3QnjT8cMCBAgQGBOAr8Sk70iYv8GcdMeC/7XDfrNOb10j773/JLTQnvy+CIBAgQ2EnhDHP36iHyhbqpJHk1tp8UQIDBBgY/GnK6d4Lw2mpLksRGfgwkQIHBQgRvjEYcf9FEze4DkMbMNM10CBAhMQUDymMIumAMBAgRmJlDiVt2XxJpfPbN1my4BAgRKClwfnZ0akdc3FtFKVB4JphEgQGDJAvla+tNLAiiRPJbkZa0ECBAgEAKSh6cBAQIECPQWkDx6kzmAAAECBCQPzwECBAgQ6C0gefQmcwABAgQISB6eAwQIECDQW0Dy6E3mAAIECBCQPDwHCBAgQKC3gOTRm8wBBAgQICB5eA4QIECAQG8ByaM3mQMIECBAQPLwHCBAgACB3gKSR28yBxAgQICA5OE5QIBAywL5u8O/3fICx1pbid/nMdbcjUuAAIGDCeSvf23uV8AebNE1vq7yqKFsDAIECDQmIHk0tqGWQ4AAgRoCkkcNZWMQIECgMQHJo7ENtRwCBAjUEJA8aigbgwABAo0JSB6NbajlECBAoIaA5FFD2RgECBBoTEDyaGxDLYcAAQI1BCSPGsrGIECAQGMCkkdjG2o5BAgQqCEgedRQNgYBAgQaE5A8GttQyyEwY4H8IYbaTAQkj5lslGkSWICAH2A4o02WPGa0WaZKoHEBlceMNljymNFmmSqBxgVUHjPaYMljRptlqgQIEJiKgOQxlZ0wDwIECMxIQPKY0WaZKgECBKYiIHlMZSfMgwABAjMSkDxmtFmmSoAAgakISB5T2QnzIECAwIwEJI8ZbZapEiBAYCoCksdUdsI8CBAgMCMByWNGm2WqBAgQmIqA5DGVnTAPAgQIzEhA8pjRZpkqAQIEpiIgeUxlJ8yDAAECMxKQPGa0WaZKgACBqQhIHlPZCfMgQIDAjAQkjxltlqkSIEBgKgKSx1R2wjwIECAwI4ESyeO2M1qvqRIgQGAIgX1DdDrlPkss+FmxwHOnvEhzI0CAwMAC10b/fzPwGJPqvkTy+HKs6OWTWpXJECBAgMCgAiVOWw06QZ0TIECAwPQEJI/p7YkZESDQlsDNsZyvt7Wkww6TPFrbUeshQGBqAkfGhI6a2qQ2nY/ksamg4wkQILBAgRIXzDOr/sQC7SyZAAECnUCemrqs+8cSPpZIHq8IqF+PuGUJYNZIgACBHQT2x+fOirh4h681+akSyeNHQubYJnUsigABAocmcGM87ORDe2gbj3LNo419tAoCBAhUFZA8qnIbjAABAm0ISB5t7KNVECBAoKqA5FGV22AECBBoQ0DyaGMfrYIAAQJVBSSPqtwGI0CAQBsCkkcb+2gVBAgQqCogeVTlNhgBAgTaEJA82thHqyBAgEBVAcmjKrfBCBAg0IaA5NHGPloFAQIEqgpIHlW5DUaAAIE2BCSPNvbRKggQIFBVQPKoym0wAgQItCEgebSxj1ZBgACBqgKSR1VugxEgQKANAcmjjX20CgIECFQVkDyqchuMAAECbQhIHm3so1UQIECgqoDkUZXbYAQIEGhDQPJoYx+tggABAlUFJI+q3AYjQIBAGwKSRxv7aBUECBCoKiB5VOU2GAECBNoQkDza2EerIECAQFUByaMqt8EIECDQhoDk0cY+WgUBAgSqCkgeVbkNRoAAgTYEJI829tEqCBAgUFVA8qjKbTACBAi0ISB5tLGPVkGAAIGqApJHVW6DESBAoA0ByaONfbQKAgQIVBWQPKpyG4wAAQJtCEgebeyjVRAgQKCqgORRldtgBAgQaENA8mhjH62CAAECVQUkj6rcBiNAgEAbApJHG/toFQQIEKgqIHlU5TYYAQIE2hCQPNrYR6sgQIBAVQHJoyq3wQgQINCGgOTRxj5aBQECBKoKSB5VuQ1GgACBNgQkjzb20SoIECBQVUDyqMptMAIECLQhIHm0sY9WQYAAgaoCkkdVboMRIECgDQHJo419tAoCBAhUFZA8qnIbjAABAm0ISB5t7KNVECBAoKqA5FGV22AECBBoQ0DyaGMfrYIAAQJVBSSPqtwGI0CAQBsCkkcb+2gVBAgQqCogeVTlNhgBAgTaEJA82thHqyBAgEBVAcmjKrfBCBAg0IaA5NHGPloFAQIEqgpIHlW5DUaAAIE2BCSPNvbRKggQIFBVQPKoym0wAgQItCEgebSxj1ZBgACBqgKSR1VugxEgQKANAcmjjX20CgIECFQVkDyqchuMAAECbQhIHm3so1UQIECgqoDkUZXbYAQIEGhDQPJoYx+tggABAlUFJI+q3AYjQIBAGwKSRxv7aBUECBCoKiB5VOU2GAECBNoQkDza2EerIECAQFUByaMqt8EIECDQhoDk0cY+WgUBAgSqCkgeVbkNRoAAgTYE9rWxDKtYsMA3Y+3XRRweceLWx/igESAwpIDkMaSuvocS+Fp0fHTEJyL+NuKqiG9EPCziyRF3jshkko/RCBAYQEDyGABVl8UFsrq4KWJ/xLsizo94Z0R+brW9Of7xGxEnRZwe8cyIUyNuiDguIhOKRoBAAQHJowCiLgYRWK0uMlm8I+I/D3GkL8bjXr8VecgjI54SoSoJBI1ACQHJo4SiPkoIdNXFt6Ozd0fsVl2sM9YlcVDGalVyTvw7q5IbI+4Q4eaRQNAIHKqA5HGoUh43hMAm1cW68zlYVZL9HrNu544jsBQByWMpOz2Nda5WF3ntIq9R7HTtouZsL4nBMlarkrPj3w+PUJUEgkZgJwHJYycVnyspkLfR5nfyeWdUnop6e8R/RZRseZfV1RGf2rDT7VXJI6K/vE6S10t+YKtvVckWhA/LFpA8lr3/Q6y+RnVxp5j46RFPj3hMRCaovC336xF5Yf2CiKxo8u6sTdoH4uCMF0ecFJFjqkoCQSMgeXgOlBDoqouPR2fnRfx9ROnq4pTo80kRT4s4OSKTVF7ozta9nyNvx/3liLMijo/4cETO56IIVUkgaARKCUgepSSX1c/Nsdz8Lj/vjBrq2sX26iLfBHhsxG0jsh114MP3/Znv5Thh67OZcH4q4vcjcr5DVSU/GH13VUme6nKtJBC0tgUkj7b3t+TqplJd9F1TViUZQ1YlV0T/f7kV8eEw10pSQWtaQPJoens3WtxO1UWe/snv4Eu1rrp4RnR4WsShVhfrjq8qWVfOcQS2CUge20AW/s+5VhfrbtsYVUneApx3cD01wh1c6+6c40YXkDxG34JRJ7BaXeTdSd37LuZcXawLWqsquTQmmPGSiO3XSm6Iz+XpNe92DwRt2gKSx7T3Z4jZddXFx6Lz8yPGujNqiLWV7HOnqiQvxP9LxHkRJe7g2n6tpKtK8n0ld4nIW43zJgGNwOQEJI/JbUnxCXXVxbei56wu/m7r4xKri3Vxd6pK/iA6uykik+9bItJ20/eVrFYld4v+uju4Hhl/V5UEgjYdAcljOntRcibbq4u8RTUrjZLtlOjsYO+7KDnelPrKqiRbvs/klyLOisj3lZSsSq6M/t6wFfHhOz8u5cz4mNdKVCUpoo0qIHmMyl9s8LGqi9vHCrrn0G7vuyi2yIl2lFVJJo5smVDzfSVDViW/Gf13VUnepfbIiBsjMpEdEaERqCLQ/cevMphBigqsVhd5Dj5Pn6guihKv1dluVcmHo7c3RVwU8am1ev7eQdurkvzR8t0dXKqS7zn524ACkseAuIW7Vl0UBq3Q3WpV8uAYb6iq5IPRd4aqpMKmGuKAgOQx7WdCJoyrIr4UkT/sT3URCDNueWovY/u1kqGrkrxW8sCIe0XcPeI2ERqBjQQkj434Bj047655f0Se1867ekq1O0VHeRdP9tu9q9u1i1K6h95P7aokZ5a3AL8q4t4RR0RoBNYWkDzWphv8wPypsedGlEgceSH3jIinRdwj4paITBjZunP0B/7lz7EEalQlF8bi3h7x2Yh8HmgE1haQPNamG/zAz8QI+Z98k5bnwX82ovuZUfZ7E816x+5WlXwrpvDGiOdvMJXs49oIyWMDRIc69znl58CmiSPXlr8Z7ysReYoiKxltngJ57et2EddE5LvSN22ZPDQCGwn4TnQjvskffM7WDO8VH7t3K+evbM3bfP0MpUCYaMtEf2NEXth+T0T3M8dy3zQCkxCQPCaxDYNP4n9ihL/YihzsMRF58TTvwjk+IisT1z4CYcT2tRg7q4tPR2SyeEfEv0doBCYpIHlMclsGn9R7Y4SMF0TcM+IJEWdHqEoCoVJbrS7eHWN21UUmEY3A5AUkj8lv0fdNMC+m/l5EXs+4aOtjfFi7fS6OVJWszdfrwO3VRd759JFePRz8wfnNwBMj8rTkKw/+cI8gsJ6A5LGe25hHHRmDvzQib+H9s4j/jbgw4m0Rl0Rs2rZXJavXSvLFz7WSQxfuqotM+KvXLkpXF3kaMn88SUaehszx8tZfySMQtGEEJI9hXGv02r1P49gY7EURz47Idy5/IOK8iFJVyWuir4xsp0V010pOjL/nBV3XSgJhpWViyBfu/47IU1HviChdXdwr+uze6Hlq/H2nGyBuic9rBAYTkDwGo63acV7wzhfzbPkCn28KHKIqeV/0m/HCiJMjVq+VLLUqWa0uVq9dXB8+Jdtjo7OsLM6MyOoiE/cxEdlOOPDBnwTqCUge9axrjpTVSLauKnlO/D0rlaxKzo/IquQLEZu0y+PgpVYlNauLs8PZjQybPFMdO4iA5DEI66Q6zaqk+860q0r+ND73fxEXRJS6VrK9KumuleRplfwuPE+p5XfLc2xZXdy0NfGhq4s8LXhGhOpiC9yHaQpIHtPclyFntVqVnBsDPSdiiKrktdFvRrZHR+SLYp52ydNrmUSmfq1ktbrIai2vXXw0omT7oeisS7IPjb/vdO2i5Hj6IlBMQPIoRjnLjvJFfLeq5ML4WlYl7y+wsoujj4y8sJ/XSroXzClVJV11sT/mt1pd5E83LtkeF51lEs1rF8dF5B4cE5Gt24sD//IngQkLSB4T3pwRprZaleQLfd7B1XJV0lUXn4x1vjmiVnWRp6TydlqNwGwFJI/Zbt3gEx+jKrlHrCqrknMi8iJxftdf8lqJ6iJANQIlBCSPEorL6GO3quTSWP75EXkH1+c3pMjjX7cV2dWjIrprJXeMv2dC63utJKuLPOayiJxnVhf/EVGy/XB0lkkv74zKaxdfjVBdBILWroDk0e7eDrmy1ark0THQgyNeHfHliLxW8taIEtdKso+MvLB/qFXJanXxrjguT0e9M+LGiJItr108NeKMiDy1l/+XusTm2kVgaG0LSB5t72+t1a1WJS+MQZ8VkS+oH4w4L2LoquSYGCO/01ddBIJGoIaA5FFDeVljrFYledpptSrJiiQj77zatK1WJXnL61URpauLn4s+87RZVhd57eWICNVFIGgEJA/PgaEFsirIltXJCyJ+MSJfiC+NKFWVfDb6KtHuHZ101y4eEn937aKEqj6aFJA8mtzWyS6qVlXSB6CrLs6MgzLB5f8J1UUfQY9dpIDkschtn8yid6tKVq+VXF54tqqLwqC6W6aA5LHMfZ/iqlerkkfGBB8U8ScReQfXptdKfj76yDujnhShuggEjcCmApLHpoKOH0rgYFXJX8XAN+8yeB77qxFnR+S1i2sjvO8iEDQCpQQkj1KS+hlSYHtVckoMlj8X6xd2GfSC+PwjIvKXMmU74cAHf24J+NEongobC+R/So3A3AQyKeQPWNyt5de6xLHbY3yeAIENBCSPDfAcSoAAgaUKSB5L3XnrXrKA01ZL3v1Ca5c8CkHqhgABAksSkDyWtNvWSoAAgUICkkchSN0QIEBgSQKSx5J221oJECBQSEDyKASpGwIECCxJQPJY0m5bKwECBAoJSB6FIHVDgACBJQlIHkvabWslQIBAIQHJoxCkbqoLHFd9RAMSIPBdAcnjuxT+MjOBk2Y2X9Ml0JSA5NHUdloMAQIE6ghIHnWcjUKAAIGmBCSPprbTYggQIFBHQPKo42yUugJH1h3OaASWJyB5LG/Pl7Diuy5hkdZIYEwByWNMfWMPJbB/qI71S4DAAQHJwzOBAAECBHoLSB69yRxAgAABApKH5wABAgQI9BaQPHqTOYAAAQIEJA/PAQIECBDoLSB59CZzAAECBAhIHp4DBAgQINBbQPLoTeYAAgQIEJA8PAcILE/g8OUt2YpLC0gepUX1R4AAgQUISB4L2ORGl7jXd897fa1RDssiUFdA8qjrbbRyAh/fo6tP7PE1XyJAoICA5FEAURejCFyzx6hX7vE1XyJAoICA5FEAURcECBBYmoDksbQdt14CBAgUEJA8CiDqggABAksTkDyWtuPWS4AAgQICkkcBRF0QIEBgaQKSx9J2vJ313nePpfzMHl/zJQIECghIHgUQdTGKwJ32GPXEPb7mSwQIFBCQPAog6oIAAQJLE5A8lrbj1kuAAIECApJHAURdECBAYGkCksfSdtx6CRAgUEBA8iiAqAsCBAgsTUDyWNqOWy8BAgQKCEgeBRB1QYAAgaUJSB5L23HrJUCAQAEByaMAoi4IECCwNAHJY2k7br0ECBAoICB5FEDUBQECBJYmIHksbceXsd4blrFMqyQwnoDkMZ69kYcT+MpwXeuZAIEUkDw8DwgQIECgt4Dk0ZvMAQRmL3D47FdgAaMLSB6jb4EJECBAYH4Cksf89syMCRAgMLqA5DH6FpjAmgLXrXmcwwgQKCAgeRRA1MUoAl8cZVSDEiDwHQHJwxOBAAECBHoLSB69yRxAgAABApKH5wABAgQI9BaQPHqTOYAAAQIEJA/PAQIECBDoLSB59CZzAAECBAhIHp4DBAgQINBbQPLoTeYAAgQIEJA8PAcIECBAoLeA5NGbzAEECBAgIHl4DhBoU8CPXW9zXyezKsljMltxyBO5OR755xHfjvhqxP4IjUAK5HMhnxPfjHhlhEZgMAHJYzDawTrOF4gXRxwR8bSI10bkDwnM39v99QhtWQI3xXJvjLg8Ir+peHLEkRG/G6ERGExg32A963hTgawwDtbeGw/IeH7EPSNOj3hmxEMjvhZxXITTF4HQUMuKM/f2mIgPRrwp4qKIPj9l+BvxeI3ARgKSx0Z8gx78wJ69fy4e/5qtyEMfE/HUiDMiTojIKvPoCG1+AllZZLsm4sKIt0Z8IGLddr91D3QcgU5A8ugkpvfxzjGlH4349JpT26kqOSf6yqrk+ghVyZqwFQ5brS4ujfG66uKKAmM/JPq4pUA/uli4gOQx3SdAVgn/FvHYiA9vOM3tVclp0V9WJWdGqEo2xC10eOnqYqdpPS4+mae4NAIbC0geGxMO1sHtoue88PmWiLxu8baIPF3xnohN2/uig4wXRHTXSlQlm6r2O361ushTUOdF5At7ieqim0meqnx8xFMi8vRl3oWVN1poBDYWkDw2Jhy0g0waJ22N8Nz4mBfDj4/4UET3YvPZ+PsmbbeqJF9sToxwrWQT3Vsf21UXV8en8xuBvH6Rp6VKtvtEZ3njxNkRD4r4akQ+Z/K5pBEoJiB5FKMcvKN8Ec9TTNlOjXhAxKsirosYqio5OfpevYPLtZIA6dF2qi7+IY6/skcfB3vo9uoiK9bbRhy9dWD3nDlYP75OoJeA5NGLa1IPzls1s90+Yqiq5PLoO99HkpEtr5XkKZC8VqIqCYQd2mp1kZVFVhiqix2gfGreApLHvPevm33tquSFMXBXleS1kodFLLUq6aqL/E5/9dqF6iJAtHYFJI8293a1KnleLLG7VvJP8fe8VpKnTja9VrK9Knl09JlVyZMjWq9KuuriqlhrV13kG/ZKtvtEZ0+IeEaEaxclZfVVREDyKMI46U7yQml33jsrhPtHvDKi9LWSi6PPjBdFdFVJXrTN6zNzr0pqVhd5C/UTI46KyLvt8mO2bg8P/MufBEYWkDxG3oARhp9CVZIJrZvHCASHNGT+rLCcp+rikLg8aGkCksfSdvzW6x2jKrlHTOH0iLxWklVJ9zO48rrNmG21urgkJpKn9y6K+FJEqZZrfHyE6qKUqH5GE5A8RqOf5MBdNZB3cO10rSRfTD+z4cw/H8e/biuyq+5aSd7BdceImlXJanWRb8Z8W8SHIko21y5KauprMgKSx2S2YnIT2akq+aOYZVYKb4/IC8Ul3u1+cfSTkddKuqqku1aSL+53iChVlXwr+srrL0dHXBKhuggEjcA6ApLHOmrLPKarSvLF/LkRedrp+IjuDq4hqpJHRf/dHVzrViWZgDL5XBlxQYTqIhA0ApsKSB6bCi7z+O1VSb7bfYiq5P3Rb8a5EatVycPj35+M2K19Kr5w74g8tqsurtrtwWt83rWLNdAc0paA5NHWfo61mjwNlK1mVZJj7daeE1+4drcvrvn5H4vj8hbaPKX2wIjrIo6LyESqEVicgOSxuC0ffME7VSX5vpK8VpKnjEpdK8n+dmslEsf26iITZP7MqO59F3nKTiOwWAHJY7FbX23hXVWSd3ANda2k1GJ2qi4kiVK6+mlKQPJoajsnv5i9qpK8gysvaJe4g+tQIVariyfFQVlVqC4OVc/jFi0geSx6+0df/GpV8msxm7yDK68j/HPEmyJK3MEV3dyqqS5uxeEfBNYTkDzWc3NUeYGsSrpTRPl71u8f0V0r2aQq6aqLs6K/7mdGqS4CQyOwiYDksYmeY4cU2KQqUV0MuTP6JhACkoenwRwE9qpK/joWcFnE7SJ+POLZEXnt4siI/Fy2rqI58C9/EiCwsYDksTGhDkYQWK1KfifGz9t28/TUsSPMxZAEFikgeSxy25tb9F5vGGxusRZEYAoC+d2aRoAAAQIEeglIHr24PJgAAQIEUkDy8DwgQIAAgd4CkkdvMgcQIECAgOThOUCAAAECvQUkj95kDiBAgAABycNzgAABAgR6C0gevckcQIAAAQKSh+cAAQIECPQWkDx6kzmAAAECBCQPzwECBAgQ6C0gefQmcwABAgQISB6eAwQIECDQW0Dy6E3mAAIECBCQPDwHCBAgQKC3gOTRm8wBBAgQICB5eA4QIECAQG8ByaM3mQMIECBAQPLwHCBAgACB3gKSR28yBxAgMCOB/THXDK2wwL7C/emOAAECUxI4fEqTaWkuKo+WdtNaCBAgUElA8qgEbRgCBAi0JCB5tLSb1kKAAIFKApJHJWjDECBAoCUByaOl3bQWAgQIVBKQPCpBG4YAAQItCUgeLe2mtRAgQKCSgORRCdowBAgQaElA8mhpN62FAAEClQQkj0rQhiFAgEBLApJHS7tpLQQIEKgkIHlUgjYMAQIEWhKQPFraTWshQIBAJQHJoxK0YQgQINCSgOTR0m5aCwECBCoJSB6VoA1DgACBlgQkj5Z201oIECBQSUDyqARtGAIECLQkIHm0tJvWQoAAgUoCkkclaMMQIECgJQHJo6XdtBYCBAhUEthXaRzDECBAgEB5gY9Fl/eN2L9G11k8fGSN475ziOSxrpzjCBAgML7A82IKL1tzGplwvrjmsYdJHuvKOY4AAQLjC9wcU/jCGNNwzWMMdWMSIEBg5gKSx8w30PQJECAwhoDkMYa6MQkQIDBzAclj5hto+gQIEBhDQPIYQ92YBAgQmLmA5DHzDTR9AgQIjCEgeYyhbkwCBAjMXEDymPkGmj4BAgTGEJA8xlA3JgECBGYuIHnMfANNnwABAmMISB5jqBuTAAECMxeQPGa+gaZPgACBMQQkjzHUjUmAAIGZC0geM99A0ydAgMAYApLHGOrGJECAwMwFJI+Zb6DpEyBAYAwByWMMdWMSIEBg5gKSx8w30PQJECAwhoDkMYa6MQkQIDBzAclj5hto+gQIEBhDQPIYQ92YBAgsTaC519p9S9tB6yVAgEBlgY/EeJdF3HXNcd+75nGDHiZ5DMqrcwIECBx2dRic0ppDc6VUaxtkPQQIEJiigOQxxV0xJwIECExcQPKY+AaZHgECBKYoIHlMcVfMiQABAhMXkDwmvkGmR4AAgSkKSB5T3BVzIkCAwMQFJI+Jb5DpESBAYIoCkscUd8WcCBAgMHEByWPiG2R6BAgQmKKA5DHFXTEnAgQITFxA8pj4BpkeAQIEpiggeUxxV8yJAAECExeQPCa+QaZHgACBKQpIHlPcFXMiQIDAxAUkj4lvkOkRIEBgigKSxxR3xZwIECAwcQHJY+IbZHoECBCYooDkMcVdMScCBAhMXEDymPgGmR4BAgSmKCB5THFXzIkAAQITF5A8Jr5BpkeAAIEpCkgeU9wVcyJAgMDEBUokj30TX6PpESBAYGiBI4YeYGr9l3jh/61Y1Dcibpna4syHAAEClQSuj3HOrzSWYQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgNgL/D8pzsAXky5EoAAAAAElFTkSuQmCC mediatype: image/png diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f9d5447f4..4767fb956 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -71,3 +71,15 @@ spec: memory: 64Mi terminationGracePeriodSeconds: 10 serviceAccountName: controller-manager + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - ppc64le + - s390x + diff --git a/config/manifests/bases/runtime-component.clusterserviceversion.yaml b/config/manifests/bases/runtime-component.clusterserviceversion.yaml index 70a2159e4..2cd09453f 100644 --- a/config/manifests/bases/runtime-component.clusterserviceversion.yaml +++ b/config/manifests/bases/runtime-component.clusterserviceversion.yaml @@ -11,6 +11,11 @@ metadata: description: Deploys any runtime component with dynamic and auto-tuning configuration repository: https://github.com/application-stacks/runtime-component-operator support: Community + labels: + operatorframework.io/arch.amd64: supported + operatorframework.io/arch.ppc64le: supported + operatorframework.io/arch.s390x: supported + operatorframework.io/os.linux: supported name: runtime-component.v0.0.0 namespace: placeholder spec: @@ -698,48 +703,6 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text version: v1beta2 - description: | - This advanced Operator is capable of deploying any runtime component image with consistent, production-grade QoS. It enables enterprise architects to govern the way their applications get deployed & managed in the cluster, while dramatically reducing the learning curve for developers to deploy into Kubernetes - allowing them to focus on writing the code! - Here are some key features: - - #### Application Lifecyle - You can deploy your runtime component container by either pointing to a container image, or an OpenShift ImageStream. When using an ImageStream the Operator will watch for any updates and will re-deploy the modified image. - - #### Custom RBAC - This Operator is capable of using a custom ServiceAccount from the caller, allowing it to follow RBAC restrictions. By default it creates a ServiceAccount if one is not specified, which can also be bound with specific roles. - - #### Environment Configuration - You can configure a variety of artifacts with your deployment, such as: labels, annotations, and environment variables from a ConfigMap, a Secret or a value. - - #### Routing - Expose your application to external users via a single toggle to create a Route on OpenShift or an Ingress on other Kubernetes environments. Advanced configuration, such as TLS settings, are also easily enabled. Expiring Route certificates are re-issued. - - #### High Availability via Horizontal Pod Autoscaling - Run multiple instances of your application for high availability. Either specify a static number of replicas or easily configure horizontal auto scaling to create (and delete) instances based on resource consumption. - - #### Persistence and advanced storage - Enable persistence for your application by specifying simple requirements: just tell us the size of the storage and where you would like it to be mounted and We will create and manage that storage for you. - This toggles a StatefulSet resource instead of a Deployment resource, so your container can recover transactions and state upon a pod restart. - We offer an advanced mode where the user specifies a built-in PersistentVolumeClaim, allowing them to configure many details of the persistent volume, such as its storage class and access mode. - - #### Service Binding - Your runtime components can expose services by a simple toggle. We take care of the heavy lifting such as creating kubernetes Secrets with information other services can use to bind. We also keep the bindable information synchronized, so your applications can dynamically reconnect to its required services without any intervention or interruption. - - #### Exposing metrics to Prometheus - The Runtime Component Operator exposes the runtime container's metrics via the [Prometheus Operator](https://operatorhub.io/operator/prometheus). - Users can pick between a basic mode, where they simply specify the label that Prometheus is watching to scrape the metrics from the container, or they can specify the full `ServiceMonitor` spec embedded into the RuntimeComponent's `spec.monitoring` key controlling things like the poll internal and security credentials. - - #### Easily mount logs and transaction directories - If you need to mount the logs and transaction data from your runtime component to an external volume such as NFS (or any storage supported in your cluster), simply add the following (customizing the folder location and size) to your RuntimeComponent CR: - ``` storage: size: 2Gi mountPath: "/logs" ``` - - #### Integration with OpenShift Serverless - Deploy your serverless runtime component using a single toggle. The Operator will convert all of its generated resources into [Knative](https://knative.dev) resources, allowing your pod to automatically scale to 0 when it is idle. - - #### Integration with OpenShift's Topology UI - We set the corresponding labels to support OpenShift's Developer Topology UI, which allows you to visualize your entire set of deployments and how they are connected. - - See our [**documentation**](https://github.com/application-stacks/runtime-component-operator/tree/main/doc/) for more information. displayName: Runtime Component icon: - base64data: iVBORw0KGgoAAAANSUhEUgAAAY8AAAGwCAYAAABRtumfAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAaGVYSWZNTQAqAAAACAAEAQYAAwAAAAEAAgAAARIAAwAAAAEAAQAAASgAAwAAAAEAAgAAh2kABAAAAAEAAAA+AAAAAAADoAEAAwAAAAEAAQAAoAIABAAAAAEAAAGPoAMABAAAAAEAAAGwAAAAAIncb1sAAALkaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA1LjQuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICAgICA8dGlmZjpDb21wcmVzc2lvbj4xPC90aWZmOkNvbXByZXNzaW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8ZXhpZjpQaXhlbFlEaW1lbnNpb24+NDMyPC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6Q29sb3JTcGFjZT4xPC9leGlmOkNvbG9yU3BhY2U+CiAgICAgICAgIDxleGlmOlBpeGVsWERpbWVuc2lvbj4zOTk8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KTuacxQAAIWJJREFUeAHt3XusLVdZAPCWXkof0AcgBAsFRUUTRAWBBiivAkqBFijQEiPgI4I8asD4hwYRjQgxon+YCARR4z/QIi0PLQ+hlIJGTBRUoAgJSIGWtgql9AGlcP0+7hnYPZxz7p2916yZWfNbyXfPvefsWY/f2nd/55uZfc5hh2kECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgfkLHD7/Jay1glz3y9Y60kEECBBoQyBfB1++7lL2rXvgzI97Rcz/t2e+BtMnQIDApgK3RAd/uE4nt1nnoAaO2d/AGiyBAAECowksNXmMBm5gAgQItCAgebSwi9ZAgACBygKSR2VwwxEgQKAFAcmjhV20BgIECFQWkDwqgxuOAAECLQhIHi3sojUQIECgsoDkURnccAQIEGhBQPJoYRetgQABApUFJI/K4IYjQIBACwKSRwu7aA0ECBCoLCB5VAY3HAECBFoQkDxa2EVrIECAQGUByaMyuOEIECDQgoDk0cIuWgMBAgQqCyz193m8MZyfHfHNyt6GI0CAwKYCR0YH+Yuc7rZpR5scv9Tk8bFAu/smcI4lQIDAiAJ3ibH/MeInI0b5jbBOW424+4YmQIDAmgJXx3EPibhizeM3Pkzy2JhQBwQIEBhF4IYYdbRT75LHKHtuUAIECBQRuKZIL2t0InmsgeYQAgQILF1A8lj6M8D6CRAgsIbAlO+2ytvR3hpxvzXW5RACBAi0IJB3Ur0v4llTW8yUk8cdAutREUdNDc18CBAgUFHg6THW5JKH01YVnwGGIkCAQCsCkkcrO2kdBAgQqCggeVTENhQBAgRaEZA8WtlJ6yBAgEBFAcmjIrahCBAg0IqA5NHKTloHAQIEKgpIHhWxDUWAAIFWBCSPVnbSOggQIFBRQPKoiG0oAgQItCIgebSyk9ZBgACBigKSR0VsQxEgQKAVAcmjlZ20DgIECFQUkDwqYhuKAAECrQhIHq3spHUQIECgooDkURHbUAQIEGhFYOrJY+rza+V5YB0ECBDoJTDlXwb1lVjJH0c8oNeKPJgAAQLjC1wVUzgtIn+p3QnjT8cMCBAgQGBOAr8Sk70iYv8GcdMeC/7XDfrNOb10j773/JLTQnvy+CIBAgQ2EnhDHP36iHyhbqpJHk1tp8UQIDBBgY/GnK6d4Lw2mpLksRGfgwkQIHBQgRvjEYcf9FEze4DkMbMNM10CBAhMQUDymMIumAMBAgRmJlDiVt2XxJpfPbN1my4BAgRKClwfnZ0akdc3FtFKVB4JphEgQGDJAvla+tNLAiiRPJbkZa0ECBAgEAKSh6cBAQIECPQWkDx6kzmAAAECBCQPzwECBAgQ6C0gefQmcwABAgQISB6eAwQIECDQW0Dy6E3mAAIECBCQPDwHCBAgQKC3gOTRm8wBBAgQICB5eA4QIECAQG8ByaM3mQMIECBAQPLwHCBAgACB3gKSR28yBxAgQICA5OE5QIBAywL5u8O/3fICx1pbid/nMdbcjUuAAIGDCeSvf23uV8AebNE1vq7yqKFsDAIECDQmIHk0tqGWQ4AAgRoCkkcNZWMQIECgMQHJo7ENtRwCBAjUEJA8aigbgwABAo0JSB6NbajlECBAoIaA5FFD2RgECBBoTEDyaGxDLYcAAQI1BCSPGsrGIECAQGMCkkdjG2o5BAgQqCEgedRQNgYBAgQaE5A8GttQyyEwY4H8IYbaTAQkj5lslGkSWICAH2A4o02WPGa0WaZKoHEBlceMNljymNFmmSqBxgVUHjPaYMljRptlqgQIEJiKgOQxlZ0wDwIECMxIQPKY0WaZKgECBKYiIHlMZSfMgwABAjMSkDxmtFmmSoAAgakISB5T2QnzIECAwIwEJI8ZbZapEiBAYCoCksdUdsI8CBAgMCMByWNGm2WqBAgQmIqA5DGVnTAPAgQIzEhA8pjRZpkqAQIEpiIgeUxlJ8yDAAECMxKQPGa0WaZKgACBqQhIHlPZCfMgQIDAjAQkjxltlqkSIEBgKgKSx1R2wjwIECAwI4ESyeO2M1qvqRIgQGAIgX1DdDrlPkss+FmxwHOnvEhzI0CAwMAC10b/fzPwGJPqvkTy+HKs6OWTWpXJECBAgMCgAiVOWw06QZ0TIECAwPQEJI/p7YkZESDQlsDNsZyvt7Wkww6TPFrbUeshQGBqAkfGhI6a2qQ2nY/ksamg4wkQILBAgRIXzDOr/sQC7SyZAAECnUCemrqs+8cSPpZIHq8IqF+PuGUJYNZIgACBHQT2x+fOirh4h681+akSyeNHQubYJnUsigABAocmcGM87ORDe2gbj3LNo419tAoCBAhUFZA8qnIbjAABAm0ISB5t7KNVECBAoKqA5FGV22AECBBoQ0DyaGMfrYIAAQJVBSSPqtwGI0CAQBsCkkcb+2gVBAgQqCogeVTlNhgBAgTaEJA82thHqyBAgEBVAcmjKrfBCBAg0IaA5NHGPloFAQIEqgpIHlW5DUaAAIE2BCSPNvbRKggQIFBVQPKoym0wAgQItCEgebSxj1ZBgACBqgKSR1VugxEgQKANAcmjjX20CgIECFQVkDyqchuMAAECbQhIHm3so1UQIECgqoDkUZXbYAQIEGhDQPJoYx+tggABAlUFJI+q3AYjQIBAGwKSRxv7aBUECBCoKiB5VOU2GAECBNoQkDza2EerIECAQFUByaMqt8EIECDQhoDk0cY+WgUBAgSqCkgeVbkNRoAAgTYEJI829tEqCBAgUFVA8qjKbTACBAi0ISB5tLGPVkGAAIGqApJHVW6DESBAoA0ByaONfbQKAgQIVBWQPKpyG4wAAQJtCEgebeyjVRAgQKCqgORRldtgBAgQaENA8mhjH62CAAECVQUkj6rcBiNAgEAbApJHG/toFQQIEKgqIHlU5TYYAQIE2hCQPNrYR6sgQIBAVQHJoyq3wQgQINCGgOTRxj5aBQECBKoKSB5VuQ1GgACBNgQkjzb20SoIECBQVUDyqMptMAIECLQhIHm0sY9WQYAAgaoCkkdVboMRIECgDQHJo419tAoCBAhUFZA8qnIbjAABAm0ISB5t7KNVECBAoKqA5FGV22AECBBoQ0DyaGMfrYIAAQJVBSSPqtwGI0CAQBsCkkcb+2gVBAgQqCogeVTlNhgBAgTaEJA82thHqyBAgEBVAcmjKrfBCBAg0IaA5NHGPloFAQIEqgpIHlW5DUaAAIE2BCSPNvbRKggQIFBVQPKoym0wAgQItCEgebSxj1ZBgACBqgKSR1VugxEgQKANAcmjjX20CgIECFQVkDyqchuMAAECbQhIHm3so1UQIECgqoDkUZXbYAQIEGhDQPJoYx+tggABAlUFJI+q3AYjQIBAGwKSRxv7aBUECBCoKiB5VOU2GAECBNoQkDza2EerIECAQFUByaMqt8EIECDQhoDk0cY+WgUBAgSqCkgeVbkNRoAAgTYE9rWxDKtYsMA3Y+3XRRweceLWx/igESAwpIDkMaSuvocS+Fp0fHTEJyL+NuKqiG9EPCziyRF3jshkko/RCBAYQEDyGABVl8UFsrq4KWJ/xLsizo94Z0R+brW9Of7xGxEnRZwe8cyIUyNuiDguIhOKRoBAAQHJowCiLgYRWK0uMlm8I+I/D3GkL8bjXr8VecgjI54SoSoJBI1ACQHJo4SiPkoIdNXFt6Ozd0fsVl2sM9YlcVDGalVyTvw7q5IbI+4Q4eaRQNAIHKqA5HGoUh43hMAm1cW68zlYVZL9HrNu544jsBQByWMpOz2Nda5WF3ntIq9R7HTtouZsL4nBMlarkrPj3w+PUJUEgkZgJwHJYycVnyspkLfR5nfyeWdUnop6e8R/RZRseZfV1RGf2rDT7VXJI6K/vE6S10t+YKtvVckWhA/LFpA8lr3/Q6y+RnVxp5j46RFPj3hMRCaovC336xF5Yf2CiKxo8u6sTdoH4uCMF0ecFJFjqkoCQSMgeXgOlBDoqouPR2fnRfx9ROnq4pTo80kRT4s4OSKTVF7ozta9nyNvx/3liLMijo/4cETO56IIVUkgaARKCUgepSSX1c/Nsdz8Lj/vjBrq2sX26iLfBHhsxG0jsh114MP3/Znv5Thh67OZcH4q4vcjcr5DVSU/GH13VUme6nKtJBC0tgUkj7b3t+TqplJd9F1TViUZQ1YlV0T/f7kV8eEw10pSQWtaQPJoens3WtxO1UWe/snv4Eu1rrp4RnR4WsShVhfrjq8qWVfOcQS2CUge20AW/s+5VhfrbtsYVUneApx3cD01wh1c6+6c40YXkDxG34JRJ7BaXeTdSd37LuZcXawLWqsquTQmmPGSiO3XSm6Iz+XpNe92DwRt2gKSx7T3Z4jZddXFx6Lz8yPGujNqiLWV7HOnqiQvxP9LxHkRJe7g2n6tpKtK8n0ld4nIW43zJgGNwOQEJI/JbUnxCXXVxbei56wu/m7r4xKri3Vxd6pK/iA6uykik+9bItJ20/eVrFYld4v+uju4Hhl/V5UEgjYdAcljOntRcibbq4u8RTUrjZLtlOjsYO+7KDnelPrKqiRbvs/klyLOisj3lZSsSq6M/t6wFfHhOz8u5cz4mNdKVCUpoo0qIHmMyl9s8LGqi9vHCrrn0G7vuyi2yIl2lFVJJo5smVDzfSVDViW/Gf13VUnepfbIiBsjMpEdEaERqCLQ/cevMphBigqsVhd5Dj5Pn6guihKv1dluVcmHo7c3RVwU8am1ev7eQdurkvzR8t0dXKqS7zn524ACkseAuIW7Vl0UBq3Q3WpV8uAYb6iq5IPRd4aqpMKmGuKAgOQx7WdCJoyrIr4UkT/sT3URCDNueWovY/u1kqGrkrxW8sCIe0XcPeI2ERqBjQQkj434Bj047655f0Se1867ekq1O0VHeRdP9tu9q9u1i1K6h95P7aokZ5a3AL8q4t4RR0RoBNYWkDzWphv8wPypsedGlEgceSH3jIinRdwj4paITBjZunP0B/7lz7EEalQlF8bi3h7x2Yh8HmgE1haQPNamG/zAz8QI+Z98k5bnwX82ovuZUfZ7E816x+5WlXwrpvDGiOdvMJXs49oIyWMDRIc69znl58CmiSPXlr8Z7ysReYoiKxltngJ57et2EddE5LvSN22ZPDQCGwn4TnQjvskffM7WDO8VH7t3K+evbM3bfP0MpUCYaMtEf2NEXth+T0T3M8dy3zQCkxCQPCaxDYNP4n9ihL/YihzsMRF58TTvwjk+IisT1z4CYcT2tRg7q4tPR2SyeEfEv0doBCYpIHlMclsGn9R7Y4SMF0TcM+IJEWdHqEoCoVJbrS7eHWN21UUmEY3A5AUkj8lv0fdNMC+m/l5EXs+4aOtjfFi7fS6OVJWszdfrwO3VRd759JFePRz8wfnNwBMj8rTkKw/+cI8gsJ6A5LGe25hHHRmDvzQib+H9s4j/jbgw4m0Rl0Rs2rZXJavXSvLFz7WSQxfuqotM+KvXLkpXF3kaMn88SUaehszx8tZfySMQtGEEJI9hXGv02r1P49gY7EURz47Idy5/IOK8iFJVyWuir4xsp0V010pOjL/nBV3XSgJhpWViyBfu/47IU1HviChdXdwr+uze6Hlq/H2nGyBuic9rBAYTkDwGo63acV7wzhfzbPkCn28KHKIqeV/0m/HCiJMjVq+VLLUqWa0uVq9dXB8+Jdtjo7OsLM6MyOoiE/cxEdlOOPDBnwTqCUge9axrjpTVSLauKnlO/D0rlaxKzo/IquQLEZu0y+PgpVYlNauLs8PZjQybPFMdO4iA5DEI66Q6zaqk+860q0r+ND73fxEXRJS6VrK9KumuleRplfwuPE+p5XfLc2xZXdy0NfGhq4s8LXhGhOpiC9yHaQpIHtPclyFntVqVnBsDPSdiiKrktdFvRrZHR+SLYp52ydNrmUSmfq1ktbrIai2vXXw0omT7oeisS7IPjb/vdO2i5Hj6IlBMQPIoRjnLjvJFfLeq5ML4WlYl7y+wsoujj4y8sJ/XSroXzClVJV11sT/mt1pd5E83LtkeF51lEs1rF8dF5B4cE5Gt24sD//IngQkLSB4T3pwRprZaleQLfd7B1XJV0lUXn4x1vjmiVnWRp6TydlqNwGwFJI/Zbt3gEx+jKrlHrCqrknMi8iJxftdf8lqJ6iJANQIlBCSPEorL6GO3quTSWP75EXkH1+c3pMjjX7cV2dWjIrprJXeMv2dC63utJKuLPOayiJxnVhf/EVGy/XB0lkkv74zKaxdfjVBdBILWroDk0e7eDrmy1ark0THQgyNeHfHliLxW8taIEtdKso+MvLB/qFXJanXxrjguT0e9M+LGiJItr108NeKMiDy1l/+XusTm2kVgaG0LSB5t72+t1a1WJS+MQZ8VkS+oH4w4L2LoquSYGCO/01ddBIJGoIaA5FFDeVljrFYledpptSrJiiQj77zatK1WJXnL61URpauLn4s+87RZVhd57eWICNVFIGgEJA/PgaEFsirIltXJCyJ+MSJfiC+NKFWVfDb6KtHuHZ101y4eEn937aKEqj6aFJA8mtzWyS6qVlXSB6CrLs6MgzLB5f8J1UUfQY9dpIDkschtn8yid6tKVq+VXF54tqqLwqC6W6aA5LHMfZ/iqlerkkfGBB8U8ScReQfXptdKfj76yDujnhShuggEjcCmApLHpoKOH0rgYFXJX8XAN+8yeB77qxFnR+S1i2sjvO8iEDQCpQQkj1KS+hlSYHtVckoMlj8X6xd2GfSC+PwjIvKXMmU74cAHf24J+NEongobC+R/So3A3AQyKeQPWNyt5de6xLHbY3yeAIENBCSPDfAcSoAAgaUKSB5L3XnrXrKA01ZL3v1Ca5c8CkHqhgABAksSkDyWtNvWSoAAgUICkkchSN0QIEBgSQKSx5J221oJECBQSEDyKASpGwIECCxJQPJY0m5bKwECBAoJSB6FIHVDgACBJQlIHkvabWslQIBAIQHJoxCkbqoLHFd9RAMSIPBdAcnjuxT+MjOBk2Y2X9Ml0JSA5NHUdloMAQIE6ghIHnWcjUKAAIGmBCSPprbTYggQIFBHQPKo42yUugJH1h3OaASWJyB5LG/Pl7Diuy5hkdZIYEwByWNMfWMPJbB/qI71S4DAAQHJwzOBAAECBHoLSB69yRxAgAABApKH5wABAgQI9BaQPHqTOYAAAQIEJA/PAQIECBDoLSB59CZzAAECBAhIHp4DBAgQINBbQPLoTeYAAgQIEJA8PAcILE/g8OUt2YpLC0gepUX1R4AAgQUISB4L2ORGl7jXd897fa1RDssiUFdA8qjrbbRyAh/fo6tP7PE1XyJAoICA5FEAURejCFyzx6hX7vE1XyJAoICA5FEAURcECBBYmoDksbQdt14CBAgUEJA8CiDqggABAksTkDyWtuPWS4AAgQICkkcBRF0QIEBgaQKSx9J2vJ313nePpfzMHl/zJQIECghIHgUQdTGKwJ32GPXEPb7mSwQIFBCQPAog6oIAAQJLE5A8lrbj1kuAAIECApJHAURdECBAYGkCksfSdtx6CRAgUEBA8iiAqAsCBAgsTUDyWNqOWy8BAgQKCEgeBRB1QYAAgaUJSB5L23HrJUCAQAEByaMAoi4IECCwNAHJY2k7br0ECBAoICB5FEDUBQECBJYmIHksbceXsd4blrFMqyQwnoDkMZ69kYcT+MpwXeuZAIEUkDw8DwgQIECgt4Dk0ZvMAQRmL3D47FdgAaMLSB6jb4EJECBAYH4Cksf89syMCRAgMLqA5DH6FpjAmgLXrXmcwwgQKCAgeRRA1MUoAl8cZVSDEiDwHQHJwxOBAAECBHoLSB69yRxAgAABApKH5wABAgQI9BaQPHqTOYAAAQIEJA/PAQIECBDoLSB59CZzAAECBAhIHp4DBAgQINBbQPLoTeYAAgQIEJA8PAcIECBAoLeA5NGbzAEECBAgIHl4DhBoU8CPXW9zXyezKsljMltxyBO5OR755xHfjvhqxP4IjUAK5HMhnxPfjHhlhEZgMAHJYzDawTrOF4gXRxwR8bSI10bkDwnM39v99QhtWQI3xXJvjLg8Ir+peHLEkRG/G6ERGExg32A963hTgawwDtbeGw/IeH7EPSNOj3hmxEMjvhZxXITTF4HQUMuKM/f2mIgPRrwp4qKIPj9l+BvxeI3ARgKSx0Z8gx78wJ69fy4e/5qtyEMfE/HUiDMiTojIKvPoCG1+AllZZLsm4sKIt0Z8IGLddr91D3QcgU5A8ugkpvfxzjGlH4349JpT26kqOSf6yqrk+ghVyZqwFQ5brS4ujfG66uKKAmM/JPq4pUA/uli4gOQx3SdAVgn/FvHYiA9vOM3tVclp0V9WJWdGqEo2xC10eOnqYqdpPS4+mae4NAIbC0geGxMO1sHtoue88PmWiLxu8baIPF3xnohN2/uig4wXRHTXSlQlm6r2O361ushTUOdF5At7ieqim0meqnx8xFMi8vRl3oWVN1poBDYWkDw2Jhy0g0waJ22N8Nz4mBfDj4/4UET3YvPZ+PsmbbeqJF9sToxwrWQT3Vsf21UXV8en8xuBvH6Rp6VKtvtEZ3njxNkRD4r4akQ+Z/K5pBEoJiB5FKMcvKN8Ec9TTNlOjXhAxKsirosYqio5OfpevYPLtZIA6dF2qi7+IY6/skcfB3vo9uoiK9bbRhy9dWD3nDlYP75OoJeA5NGLa1IPzls1s90+Yqiq5PLoO99HkpEtr5XkKZC8VqIqCYQd2mp1kZVFVhiqix2gfGreApLHvPevm33tquSFMXBXleS1kodFLLUq6aqL/E5/9dqF6iJAtHYFJI8293a1KnleLLG7VvJP8fe8VpKnTja9VrK9Knl09JlVyZMjWq9KuuriqlhrV13kG/ZKtvtEZ0+IeEaEaxclZfVVREDyKMI46U7yQml33jsrhPtHvDKi9LWSi6PPjBdFdFVJXrTN6zNzr0pqVhd5C/UTI46KyLvt8mO2bg8P/MufBEYWkDxG3oARhp9CVZIJrZvHCASHNGT+rLCcp+rikLg8aGkCksfSdvzW6x2jKrlHTOH0iLxWklVJ9zO48rrNmG21urgkJpKn9y6K+FJEqZZrfHyE6qKUqH5GE5A8RqOf5MBdNZB3cO10rSRfTD+z4cw/H8e/biuyq+5aSd7BdceImlXJanWRb8Z8W8SHIko21y5KauprMgKSx2S2YnIT2akq+aOYZVYKb4/IC8Ul3u1+cfSTkddKuqqku1aSL+53iChVlXwr+srrL0dHXBKhuggEjcA6ApLHOmrLPKarSvLF/LkRedrp+IjuDq4hqpJHRf/dHVzrViWZgDL5XBlxQYTqIhA0ApsKSB6bCi7z+O1VSb7bfYiq5P3Rb8a5EatVycPj35+M2K19Kr5w74g8tqsurtrtwWt83rWLNdAc0paA5NHWfo61mjwNlK1mVZJj7daeE1+4drcvrvn5H4vj8hbaPKX2wIjrIo6LyESqEVicgOSxuC0ffME7VSX5vpK8VpKnjEpdK8n+dmslEsf26iITZP7MqO59F3nKTiOwWAHJY7FbX23hXVWSd3ANda2k1GJ2qi4kiVK6+mlKQPJoajsnv5i9qpK8gysvaJe4g+tQIVariyfFQVlVqC4OVc/jFi0geSx6+0df/GpV8msxm7yDK68j/HPEmyJK3MEV3dyqqS5uxeEfBNYTkDzWc3NUeYGsSrpTRPl71u8f0V0r2aQq6aqLs6K/7mdGqS4CQyOwiYDksYmeY4cU2KQqUV0MuTP6JhACkoenwRwE9qpK/joWcFnE7SJ+POLZEXnt4siI/Fy2rqI58C9/EiCwsYDksTGhDkYQWK1KfifGz9t28/TUsSPMxZAEFikgeSxy25tb9F5vGGxusRZEYAoC+d2aRoAAAQIEeglIHr24PJgAAQIEUkDy8DwgQIAAgd4CkkdvMgcQIECAgOThOUCAAAECvQUkj95kDiBAgAABycNzgAABAgR6C0gevckcQIAAAQKSh+cAAQIECPQWkDx6kzmAAAECBCQPzwECBAgQ6C0gefQmcwABAgQISB6eAwQIECDQW0Dy6E3mAAIECBCQPDwHCBAgQKC3gOTRm8wBBAgQICB5eA4QIECAQG8ByaM3mQMIECBAQPLwHCBAgACB3gKSR28yBxAgMCOB/THXDK2wwL7C/emOAAECUxI4fEqTaWkuKo+WdtNaCBAgUElA8qgEbRgCBAi0JCB5tLSb1kKAAIFKApJHJWjDECBAoCUByaOl3bQWAgQIVBKQPCpBG4YAAQItCUgeLe2mtRAgQKCSgORRCdowBAgQaElA8mhpN62FAAEClQQkj0rQhiFAgEBLApJHS7tpLQQIEKgkIHlUgjYMAQIEWhKQPFraTWshQIBAJQHJoxK0YQgQINCSgOTR0m5aCwECBCoJSB6VoA1DgACBlgQkj5Z201oIECBQSUDyqARtGAIECLQkIHm0tJvWQoAAgUoCkkclaMMQIECgJQHJo6XdtBYCBAhUEthXaRzDECBAgEB5gY9Fl/eN2L9G11k8fGSN475ziOSxrpzjCBAgML7A82IKL1tzGplwvrjmsYdJHuvKOY4AAQLjC9wcU/jCGNNwzWMMdWMSIEBg5gKSx8w30PQJECAwhoDkMYa6MQkQIDBzAclj5hto+gQIEBhDQPIYQ92YBAgQmLmA5DHzDTR9AgQIjCEgeYyhbkwCBAjMXEDymPkGmj4BAgTGEJA8xlA3JgECBGYuIHnMfANNnwABAmMISB5jqBuTAAECMxeQPGa+gaZPgACBMQQkjzHUjUmAAIGZC0geM99A0ydAgMAYApLHGOrGJECAwMwFJI+Zb6DpEyBAYAwByWMMdWMSIEBg5gKSx8w30PQJECAwhoDkMYa6MQkQIDBzAclj5hto+gQIEBhDQPIYQ92YBAgsTaC519p9S9tB6yVAgEBlgY/EeJdF3HXNcd+75nGDHiZ5DMqrcwIECBx2dRic0ppDc6VUaxtkPQQIEJiigOQxxV0xJwIECExcQPKY+AaZHgECBKYoIHlMcVfMiQABAhMXkDwmvkGmR4AAgSkKSB5T3BVzIkCAwMQFJI+Jb5DpESBAYIoCkscUd8WcCBAgMHEByWPiG2R6BAgQmKKA5DHFXTEnAgQITFxA8pj4BpkeAQIEpiggeUxxV8yJAAECExeQPCa+QaZHgACBKQpIHlPcFXMiQIDAxAUkj4lvkOkRIEBgigKSxxR3xZwIECAwcQHJY+IbZHoECBCYooDkMcVdMScCBAhMXEDymPgGmR4BAgSmKCB5THFXzIkAAQITF5A8Jr5BpkeAAIEpCkgeU9wVcyJAgMDEBUokj30TX6PpESBAYGiBI4YeYGr9l3jh/61Y1Dcibpna4syHAAEClQSuj3HOrzSWYQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgNgL/D8pzsAXky5EoAAAAAElFTkSuQmCC diff --git a/config/manifests/description.md b/config/manifests/description.md new file mode 100644 index 000000000..3ea91913a --- /dev/null +++ b/config/manifests/description.md @@ -0,0 +1,29 @@ +This advanced Operator is capable of deploying any runtime component image with consistent, production-grade QoS. It enables enterprise architects to govern the way their applications get deployed & managed in the cluster, while dramatically reducing the learning curve for developers to deploy into Kubernetes - allowing them to focus on writing the code! +Here are some key features: +#### Application Lifecyle +You can deploy your runtime component container by either pointing to a container image, or an OpenShift ImageStream. When using an ImageStream the Operator will watch for any updates and will re-deploy the modified image. +#### Custom RBAC +This Operator is capable of using a custom ServiceAccount from the caller, allowing it to follow RBAC restrictions. By default it creates a ServiceAccount if one is not specified, which can also be bound with specific roles. +#### Environment Configuration +You can configure a variety of artifacts with your deployment, such as: labels, annotations, and environment variables from a ConfigMap, a Secret or a value. +#### Routing +Expose your application to external users via a single toggle to create a Route on OpenShift or an Ingress on other Kubernetes environments. Advanced configuration, such as TLS settings, are also easily enabled. Expiring Route certificates are re-issued. +#### High Availability via Horizontal Pod Autoscaling +Run multiple instances of your application for high availability. Either specify a static number of replicas or easily configure horizontal auto scaling to create (and delete) instances based on resource consumption. +#### Persistence and advanced storage +Enable persistence for your application by specifying simple requirements: just tell us the size of the storage and where you would like it to be mounted and We will create and manage that storage for you. +This toggles a StatefulSet resource instead of a Deployment resource, so your container can recover transactions and state upon a pod restart. +We offer an advanced mode where the user specifies a built-in PersistentVolumeClaim, allowing them to configure many details of the persistent volume, such as its storage class and access mode. +#### Service Binding +Your runtime components can expose services by a simple toggle. We take care of the heavy lifting such as creating kubernetes Secrets with information other services can use to bind. We also keep the bindable information synchronized, so your applications can dynamically reconnect to its required services without any intervention or interruption. +#### Exposing metrics to Prometheus +The Runtime Component Operator exposes the runtime container's metrics via the [Prometheus Operator](https://operatorhub.io/operator/prometheus). +Users can pick between a basic mode, where they simply specify the label that Prometheus is watching to scrape the metrics from the container, or they can specify the full `ServiceMonitor` spec embedded into the RuntimeComponent's `spec.monitoring` key controlling things like the poll internal and security credentials. +#### Easily mount logs and transaction directories +If you need to mount the logs and transaction data from your runtime component to an external volume such as NFS (or any storage supported in your cluster), simply add the following (customizing the folder location and size) to your RuntimeComponent CR: +``` storage: size: 2Gi mountPath: "/logs" ``` +#### Integration with OpenShift Serverless +Deploy your serverless runtime component using a single toggle. The Operator will convert all of its generated resources into [Knative](https://knative.dev) resources, allowing your pod to automatically scale to 0 when it is idle. +#### Integration with OpenShift's Topology UI +We set the corresponding labels to support OpenShift's Developer Topology UI, which allows you to visualize your entire set of deployments and how they are connected. +See our [**documentation**](https://github.com/application-stacks/runtime-component-operator/tree/main/doc/) for more information. diff --git a/ebcDockerBuilderRCO.jenkinsfile b/ebcDockerBuilderRCO.jenkinsfile new file mode 100644 index 000000000..fcb72d961 --- /dev/null +++ b/ebcDockerBuilderRCO.jenkinsfile @@ -0,0 +1,143 @@ +properties([ + parameters([ + // EBC relevant properties + string(name: 'executionId', defaultValue: UUID.randomUUID().toString(), description: 'Unique execution id'), + string(name: 'ebcPriority', defaultValue: '200', description: 'EBC Priority'), + string(name: 'ebcPlan', defaultValue: 'svl-dockerJenkins-ubuntu20_ppcle.yml', description: 'EBC plan to use when provisioning a Jenkins node'), + string(name: 'ebcBranch', defaultValue: "${env.ecosystem_branch}", description: 'Git branch used for ebc code'), + // Container build relevant properties + //name: 'scriptBranch', defaultValue: "main", description: 'Git branch containing docker build scripts'), + string(name: 'scriptOrg', defaultValue: "application-stacks", description: 'Git org containing docker build scripts'), + string(name: 'command', defaultValue: "make build-operator-pipeline REGISTRY=cp.stg.icr.io", description: 'Build command to execute on target arch machine, e.g. make build-pipeline-releases'), + string(name: 'PIPELINE_OPERATOR_IMAGE', defaultValue: "cp/runtime-component-operator", description: 'namespace to push image to in registry'), + string(name: 'RELEASE_TARGET', defaultValue: "main", description: 'release branch to use'), + string(name: 'OPM_VERSION', defaultValue: "4.10", description: 'Redhat CLI OPM version'), + string(name: 'PIPELINE_PRODUCTION_IMAGE', defaultValue: "icr.io/cpopen/runtime-component-operator", description: 'namespace in prod registry'), + string(name: 'REDHAT_BASE_IMAGE', defaultValue: "registry.redhat.io/openshift4/ose-operator-registry", description: 'base image for operator'), + string(name: 'REDHAT_REGISTRY', defaultValue: "registry.redhat.io", description: 'RH registry used for docker login'), + string(name: 'PIPELINE_REGISTRY', defaultValue: "cp.stg.icr.io", description: 'staging registry to push images to'), + string(name: 'ARTIFACTORY_REPO_URL', defaultValue: "hyc-taas-onepipeline-team-docker-local.artifactory.swg-devops.com", description: 'artifactory repo url [only used if disable artifactory is false]]'), + string(name: 'DISABLE_ARTIFACTORY', defaultValue: "false", description: 'whether to back up container images to artifactorys') + ]) +]) +timestamps { + // Identify if the job was kicked off by the seed job. + def causes = currentBuild.getBuildCauses() + for(cause in causes) { + if ("seed".equalsIgnoreCase(cause.upstreamProject)) { + // As the seed job kicked off this build, bail early returning success. + // This allows the jenkinsfile's properties to be populated. + currentBuild.result = 'SUCCESS' + println "Returning success as upstream job is the seed job; this is therefore a dummy run to populate job parameters." + return + } + } + + def ebcPriority = "${params.ebcPriority}" + def executionId = "${params.executionId}" + def ebcPlan = "${params.ebcPlan}" + + try { + node (label: 'built-in') { + ws("workspace/${env.JOB_NAME}-${env.BUILD_NUMBER}") { + stage ("EBC Demand"){ + //This is executing on Jenkins Server + ebcDemand() + gitCloneAndStash(); + } + } + } + + node(label: "ebc_${executionId}"){ + stage("Running Job"){ + withCredentials([usernamePassword(credentialsId: 'operator_icrId', usernameVariable: 'PIPELINE_USERNAME', passwordVariable: 'PIPELINE_PASSWORD'), + usernamePassword(credentialsId: 'operatorRH_REG_ID', usernameVariable: 'REDHAT_USERNAME', passwordVariable: 'REDHAT_PASSWORD'), + usernamePassword(credentialsId: 'operator_artifactory_ID', usernameVariable: 'ARTIFACTORY_USERNAME', passwordVariable: 'ARTIFACTORY_TOKEN'), + usernamePassword(credentialsId: 'dockerId', usernameVariable: 'DOCKER_USERNAME', passwordVariable: 'DOCKER_PASSWORD')]) { + //This is executing on ebc dynamic machine + doWork(); + } // withCredentials() end + } + } + } finally { + node (label: 'built-in') { + ws("workspace/${env.JOB_NAME}-${env.BUILD_NUMBER}") { + stage ("EBC Cleanup"){ + //This is executing on Jenkins Server + ebcCleanup(); + // Clean up the workspace + cleanWs(cleanWhenAborted: true, + cleanWhenFailure: true, + cleanWhenNotBuilt: false, + cleanWhenSuccess: true, + cleanWhenUnstable: true, + deleteDirs: true, + disableDeferredWipeout: false, + notFailBuild: true) + } + } + } + } +} +// Functions Only Below here + +// Clone the git repo and stash it, so that the jenkins agent machine can grab it later +def gitCloneAndStash() { + dir('runtime-component-operator') { + git branch: RELEASE_TARGET, url: "git@github.com:${scriptOrg}/runtime-component-operator.git" + } + dir('operators') { + git branch: "main", url: "git@github.ibm.com:websphere/operators.git" + } + sh "cp -rf operators/scripts/build runtime-component-operator/scripts/" + dir('runtime-component-operator') { + stash(name: 'runtime-component-operator') + } + sh "ls -l" +} + +// Job Specific Functions +def void doWork(){ + // Setup global variables + + // Unstash the git repo + unstash(name: 'runtime-component-operator') + sh "./scripts/build/build-initialize.sh" + sh "${command}" +} + +// EBC Functions +def void ebcDemand(){ + buildName executionId + //cleanWs() + git branch: ebcBranch, url:'git@github.ibm.com:elastic-build-cloud/ebc-gateway-http' + withCredentials([usernamePassword(credentialsId: 'intranetId', usernameVariable: 'intranetId_USR', passwordVariable: 'intranetId_PSW')]) { + withEnv([ + "demandId=${executionId}", + "ebcEnvironment=${ebcBranch}", + "ebc_plan=${ebcPlan}", + "ebc_priority=${ebcPriority}", + "ebc_autoCompleteAfterXHours=24", + "ebc_reasonForEnvironment=${env.BUILD_URL}", + "ebc_jenkins_agent_label=ebc_${executionId}", + "ebc_jenkins_server_instance_name=${env.jenkins_server_instance_name}", + "ebc_jenkins_service_name=${env.jenkins_service_name}" + ]){ + sh "./ebc_demand.sh" + } + } + stash(name: 'ebc-gateway-http') +} + +def void ebcCleanup(){ + //cleanWs() + unstash(name: 'ebc-gateway-http') + withCredentials([usernamePassword(credentialsId: 'intranetId', usernameVariable: 'intranetId_USR', passwordVariable: 'intranetId_PSW')]) { + withEnv([ + "demandId=${executionId}", + "ebcEnvironment=${ebcBranch}" + ]){ + sh "./ebc_complete.sh" + } + } +} \ No newline at end of file diff --git a/index.Dockerfile b/index.Dockerfile new file mode 100644 index 000000000..b8fa4ef44 --- /dev/null +++ b/index.Dockerfile @@ -0,0 +1,43 @@ +FROM registry.redhat.io/openshift4/ose-operator-registry:v4.10 AS builder + +FROM registry.redhat.io/ubi8/ubi-minimal + +ARG VERSION_LABEL=1.0.0 +ARG RELEASE_LABEL=XX +ARG VCS_REF=0123456789012345678901234567890123456789 +ARG VCS_URL="https://github.com/application-stacks/runtime-component-operator" +ARG NAME="runtime-component-operator-catalog" +ARG SUMMARY="Runtime Component Operator Catalog" +ARG DESCRIPTION="This image contains the catalog for Runtime Component Operator." + +ARG USER_ID=1001 + +LABEL name=$NAME \ + vendor=IBM \ + version=$VERSION_LABEL \ + release=$RELEASE_LABEL \ + description=$DESCRIPTION \ + summary=$SUMMARY \ + io.k8s.display-name=$SUMMARY \ + io.k8s.description=$DESCRIPTION \ + vcs-type=git \ + vcs-ref=$VCS_REF \ + vcs-url=$VCS_URL \ + url=$VCS_URL + +# Copy Apache license +COPY LICENSE /licenses + +COPY --chown=1001:0 bundles.db /database/index.db +LABEL operators.operatorframework.io.index.database.v1=/database/index.db + +COPY --from=builder --chown=1001:0 /bin/registry-server /registry-server +COPY --from=builder --chown=1001:0 /bin/grpc_health_probe /bin/grpc_health_probe + +EXPOSE 50051 + +USER ${USER_ID} + +WORKDIR /tmp +ENTRYPOINT ["/registry-server"] +CMD ["--database", "/database/index.db"] diff --git a/scripts/acceptance-test.sh b/scripts/acceptance-test.sh index d270d3463..c33e053a0 100755 --- a/scripts/acceptance-test.sh +++ b/scripts/acceptance-test.sh @@ -11,7 +11,7 @@ docker build -t e2e-runner:latest -f Dockerfile.e2e --build-arg GO_VERSION="${GO } declare -A E2E_TESTS=( - [ocp-e2e-run]=$(cat <<-EOF + [ocp-e2e-run-${ARCHITECTURE}]=$(cat <<-EOF --volume /var/run/docker.sock:/var/run/docker.sock \ --env PIPELINE_USERNAME=${PIPELINE_USERNAME} \ --env PIPELINE_PASSWORD=${PIPELINE_PASSWORD} \ @@ -24,15 +24,17 @@ declare -A E2E_TESTS=( --env CLUSTER_TOKEN=${CLUSTER_TOKEN} \ --env TRAVIS_BUILD_NUMBER=${BUILD_NUMBER} \ --env RELEASE_TARGET=${RELEASE_TARGET} \ - --env CATALOG_IMAGE=${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}:catalog-${RELEASE_TARGET} \ + --env CATALOG_IMAGE=${PIPELINE_REGISTRY}/${PIPELINE_OPERATOR_IMAGE}-catalog:${RELEASE_TARGET} \ --env DEBUG_FAILURE=${DEBUG_FAILURE} \ + --env INSTALL_MODE=${INSTALL_MODE} \ + --env ARCHITECTURE=${ARCHITECTURE} \ e2e-runner:latest \ make test-pipeline-e2e EOF ) ) -if [[ "${SKIP_KIND_E2E_TEST}" != true ]]; then +if [[ "${SKIP_KIND_E2E_TEST}" != true && "${ARCHITECTURE}" == "X" ]]; then E2E_TESTS[kind-e2e-run]=$(cat <<- EOF --volume /var/run/docker.sock:/var/run/docker.sock \ --env FYRE_USER=${FYRE_USER} \ @@ -47,7 +49,7 @@ if [[ "${SKIP_KIND_E2E_TEST}" != true ]]; then EOF ) else - echo "SKIP_KIND_E2E was set. Skipping kind e2e..." + echo "SKIP_KIND_E2E was set or architecture is not X. Skipping kind e2e..." fi echo "****** Starting e2e tests" diff --git a/scripts/build-manifest.sh b/scripts/build-manifest.sh deleted file mode 100755 index a1660092e..000000000 --- a/scripts/build-manifest.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/bin/bash - -######################################################################################### -# -# -# Build manifest list for all releases of operator repository/image -# Note: Assumed to run under /scripts -# -# -######################################################################################### - -set -Eeo pipefail - -readonly usage="Usage: $0 -u -p --image [registry/]repository/image" -readonly script_dir="$(dirname "$0")" -readonly release_blocklist="${script_dir}/release-blocklist.txt" - -main() { - parse_args "$@" - - if [[ -z "${TARGET}" ]]; then - echo "****** Missing target release for operator manifest lists, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then - echo "****** Missing docker authentication information, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${IMAGE}" ]]; then - echo "****** Missing target image for operator manifest lists, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${REGISTRY}" ]]; then - echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin - else - echo "${DOCKER_PASSWORD}" | docker login "${REGISTRY}" -u "${DOCKER_USERNAME}" --password-stdin - fi - - # Build manifest for target release(s) - if [[ "${TARGET}" != "releases" ]]; then - # Remove 'v' prefix from any releases matching version regex `\d+\.\d+\.\d+.*` - if [[ "${TARGET}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+ ]]; then - readonly release_tag="${TARGET#*v}" - else - readonly release_tag="${TARGET}" - fi - build_manifest "${release_tag}" - else - build_manifests - fi -} - -build_manifest() { - local tag="$1" - echo "****** Building manifest for: ${tag}" - - ## try to build manifest but allow failure - ## this allows new release builds - local target="${IMAGE}:${tag}" - - manifest-tool push from-args \ - --platforms "linux/amd64" \ - --template "${target}-ARCH" \ - --target "${target}" \ - || echo "*** WARN: Target architectures not available" -} - -# Build manifest for previous releases -build_manifests() { - git fetch --tags - tags="$(git tag -l)" - while read -r tag; do - if [[ -z "${tag}" ]]; then - break - fi - - # Skip any releases listed in the release blocklist - if grep -q "^${tag}$" "${release_blocklist}"; then - echo "Release ${tag} found in blocklist. Skipping..." - continue - fi - - local release_tag="${tag#*v}" - build_manifest "${release_tag}" - done <<< "${tags}" -} - -parse_args() { - while [ $# -gt 0 ]; do - case "$1" in - -u) - shift - readonly DOCKER_USERNAME="${1}" - ;; - -p) - shift - readonly DOCKER_PASSWORD="${1}" - ;; - --registry) - shift - readonly REGISTRY="${1}" - ;; - --image) - shift - readonly IMAGE="${1}" - ;; - --target) - shift - readonly TARGET="${1}" - ;; - *) - echo "Error: Invalid argument - $1" - echo "$usage" - exit 1 - ;; - esac - shift - done -} - -main "$@" diff --git a/scripts/build-release.sh b/scripts/build-release.sh deleted file mode 100755 index 7b68f7da0..000000000 --- a/scripts/build-release.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash - -######################################################################################### -# -# -# Script to build the multi arch images for operator -# To skip pushing the image to the container registry, provide the `--skip-push` flag. -# Note: Assumed to run under /scripts -# -# -######################################################################################### - -set -Eeo pipefail - -readonly usage="Usage: build-release.sh -u -p --image repository/image --release [--skip-push]" - -main() { - parse_args "$@" - - if [[ -z "${IMAGE}" ]]; then - echo "****** Missing target image for operator build, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${RELEASE}" ]]; then - echo "****** Missing release for operator build, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then - echo "****** Missing docker authentication information, see usage" - echo "${usage}" - exit 1 - fi - - ## Define current arch variable - case "$(uname -p)" in - "ppc64le") - readonly arch="ppc64le" - ;; - "s390x") - readonly arch="s390x" - ;; - *) - readonly arch="amd64" - ;; - esac - - # Remove 'v' prefix from any releases matching version regex `\d+\.\d+\.\d+.*` - if [[ "${RELEASE}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+ ]]; then - readonly release_tag="${RELEASE#*v}" - else - readonly release_tag="${RELEASE}" - fi - - readonly full_image="${IMAGE}:${release_tag}-${arch}" - - ## login to docker - if [[ -z "${REGISTRY}" ]]; then - echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin - else - echo "${DOCKER_PASSWORD}" | docker login "${REGISTRY}" -u "${DOCKER_USERNAME}" --password-stdin - fi - - ## build or push latest main branch - echo "****** Building release: ${RELEASE}" - build_release "${RELEASE}" - - if [[ "${SKIP_PUSH}" != true ]]; then - echo "****** Pushing release: ${RELEASE}" - push_release - else - echo "****** Skipping push for release ${RELEASE}" - fi -} - -build_release() { - echo "*** Building ${full_image} for ${arch}" - - if [[ "${RELEASE}" != "daily" ]]; then - git checkout -q "${RELEASE}" - fi - - docker build -t "${full_image}" --build-arg GO_ARCH=${arch} . - return $? -} - -push_release() { - echo "****** Pushing image: ${full_image}" - docker push "${full_image}" -} - -parse_args() { - while [ $# -gt 0 ]; do - case "$1" in - -u) - shift - readonly DOCKER_USERNAME="${1}" - ;; - -p) - shift - readonly DOCKER_PASSWORD="${1}" - ;; - --registry) - shift - readonly REGISTRY="${1}" - ;; - --image) - shift - readonly IMAGE="${1}" - ;; - --skip-push) - readonly SKIP_PUSH=true - ;; - --release) - shift - readonly RELEASE="${1}" - ;; - *) - echo "Error: Invalid argument - $1" - echo "$usage" - exit 1 - ;; - esac - shift - done -} - -main "$@" diff --git a/scripts/build-releases.sh b/scripts/build-releases.sh deleted file mode 100755 index 5c7ebcd2a..000000000 --- a/scripts/build-releases.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash - -######################################################################################### -# -# -# Script to build images for all releases and daily. -# Note: Assumed to run under /scripts -# -# -######################################################################################### - -set -Eeo pipefail - -readonly usage="Usage: $0 -u -p --image [registry/]/ --target " -readonly script_dir="$(dirname "$0")" -readonly release_blocklist="${script_dir}/release-blocklist.txt" - -main() { - parse_args "$@" - - if [[ -z "${TARGET}" ]]; then - echo "****** Missing target release for operator build, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${IMAGE}" ]]; then - echo "****** Missing target image for operator build, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then - echo "****** Missing docker authentication information, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${REGISTRY}" ]]; then - echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin - else - echo "${DOCKER_PASSWORD}" | docker login "${REGISTRY}" -u "${DOCKER_USERNAME}" --password-stdin - fi - - # Build target release(s) - if [[ "${TARGET}" != "releases" ]]; then - if [[ -z "${REGISTRY}" ]]; then - "${script_dir}/build-release.sh" -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" --release "${TARGET}" --image "${IMAGE}" - else - "${script_dir}/build-release.sh" -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" --release "${TARGET}" --image "${IMAGE}" --registry "${REGISTRY}" - fi - else - build_releases - fi -} - -build_releases() { - git fetch --tags - tags="$(git tag -l)" - while read -r tag; do - if [[ -z "${tag}" ]]; then - break - fi - - # Skip any releases listed in the release blocklist - if grep -q "^${tag}$" "${release_blocklist}"; then - echo "Release ${tag} found in blocklist. Skipping..." - continue - fi - - "${script_dir}/build-release.sh" -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" --release "${tag}" --image "${IMAGE}" - done <<< "${tags}" -} - -parse_args() { - while [ $# -gt 0 ]; do - case "$1" in - -u) - shift - readonly DOCKER_USERNAME="${1}" - ;; - -p) - shift - readonly DOCKER_PASSWORD="${1}" - ;; - --registry) - shift - readonly REGISTRY="${1}" - ;; - --image) - shift - readonly IMAGE="${1}" - ;; - --target) - shift - readonly TARGET="${1}" - ;; - *) - echo "Error: Invalid argument - $1" - echo "$usage" - exit 1 - ;; - esac - shift - done -} - -main "$@" diff --git a/scripts/bundle-release.sh b/scripts/bundle-release.sh deleted file mode 100755 index 40432a5cc..000000000 --- a/scripts/bundle-release.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash - -######################################################################################### -# -# -# Script to bundle the multi arch images for operator -# To skip pushing the image to the container registry, provide the `--skip-push` flag. -# Note: Assumed to run under /scripts -# -# -######################################################################################### - -set -Eeo pipefail - -readonly usage="Usage: bundle-release.sh -u -p --image repository/image --prod-image prod-repository/image --release [--skip-push]" - -main() { - parse_args "$@" - - if [[ -z "${IMAGE}" ]]; then - echo "****** Missing target image for operator build, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${PROD_IMAGE}" ]]; then - echo "****** Missing production image reference for bundle, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${RELEASE}" ]]; then - echo "****** Missing release for operator build, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then - echo "****** Missing docker authentication information, see usage" - echo "${usage}" - exit 1 - fi - - ## Define current arch variable - case "$(uname -p)" in - "ppc64le") - readonly arch="ppc64le" - ;; - "s390x") - readonly arch="s390x" - ;; - *) - readonly arch="amd64" - ;; - esac - - # Remove 'v' prefix from any releases matching version regex `\d+\.\d+\.\d+.*` - if [[ "${RELEASE}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+ ]]; then - readonly release_tag="${RELEASE#*v}" - else - readonly release_tag="${RELEASE}" - fi - - readonly digest="$(skopeo inspect docker://$IMAGE:${release_tag}-${arch} | grep Digest | grep -o 'sha[^\"]*')" - readonly full_image="${PROD_IMAGE}@${digest}" - readonly bundle_image="${IMAGE}-bundle:${release_tag}" - - ## login to docker - if [[ -z "${REGISTRY}" ]]; then - echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin - else - echo "${DOCKER_PASSWORD}" | docker login "${REGISTRY}" -u "${DOCKER_USERNAME}" --password-stdin - fi - - echo "****** Bundling release: ${RELEASE}" - bundle_release "${RELEASE}" - - if [[ "${SKIP_PUSH}" != true ]]; then - echo "****** Pushing bundle: ${RELEASE}" - push_bundle - else - echo "****** Skipping push for bundle ${RELEASE}" - fi -} - -bundle_release() { - echo "*** Bundling ${full_image} for ${arch}. Bundle location will be ${bundle_image}." - - make bundle bundle-build IMG="${full_image}" BUNDLE_IMG="${bundle_image}" - - return $? -} - -push_bundle() { - echo "****** Pushing bundle: ${bundle_image}" - make bundle-push IMG="${full_image}" BUNDLE_IMG="${bundle_image}" -} - -parse_args() { - while [ $# -gt 0 ]; do - case "$1" in - -u) - shift - readonly DOCKER_USERNAME="${1}" - ;; - -p) - shift - readonly DOCKER_PASSWORD="${1}" - ;; - --registry) - shift - readonly REGISTRY="${1}" - ;; - --prod-image) - shift - readonly PROD_IMAGE="${1}" - ;; - --image) - shift - readonly IMAGE="${1}" - ;; - --skip-push) - readonly SKIP_PUSH=true - ;; - --release) - shift - readonly RELEASE="${1}" - ;; - *) - echo "Error: Invalid argument - $1" - echo "$usage" - exit 1 - ;; - esac - shift - done -} - -main "$@" \ No newline at end of file diff --git a/scripts/bundle-releases.sh b/scripts/bundle-releases.sh deleted file mode 100755 index e9c3cef00..000000000 --- a/scripts/bundle-releases.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash - -######################################################################################### -# -# -# Script to build images for all releases and daily. -# Note: Assumed to run under /scripts -# -# -######################################################################################### - -set -Eeo pipefail - -readonly usage="Usage: $0 -u -p --image [registry/]/ --target " -readonly script_dir="$(dirname "$0")" -readonly release_blocklist="${script_dir}/release-blocklist.txt" - -main() { - parse_args "$@" - - if [[ -z "${TARGET}" ]]; then - echo "****** Missing target release for bundle build, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${IMAGE}" ]]; then - echo "****** Missing target image for bundle build, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then - echo "****** Missing docker authentication information, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${REGISTRY}" ]]; then - echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin - else - echo "${DOCKER_PASSWORD}" | docker login "${REGISTRY}" -u "${DOCKER_USERNAME}" --password-stdin - fi - - # Bundle target release(s) - if [[ "${TARGET}" != "releases" ]]; then - bundle_release "${TARGET}" - else - bundle_releases - fi -} - -bundle_release() { - local tag="${1}" - # Remove 'v' prefix from any releases matching version regex `\d+\.\d+\.\d+.*` - if [[ "${tag}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+ ]]; then - local release_tag="${tag#*v}" - else - local release_tag="${tag}" - fi - local operator_ref="${IMAGE}:${release_tag}" - - # Switch to release tag - if [[ "${tag}" != "daily" ]]; then - git checkout -q "${tag}" - fi - - # Build the bundle - local bundle_ref="${IMAGE}:bundle-${release_tag}" - make kustomize bundle bundle-build bundle-push IMG="${operator_ref}" BUNDLE_IMG="${bundle_ref}" - - # Build the catalog - local catalog_ref="${IMAGE}:catalog-${release_tag}" - if [[ -z "${REGISTRY}" ]]; then - make build-catalog push-catalog IMG="${operator_ref}" BUNDLE_IMG="${bundle_ref}" CATALOG_IMG="${catalog_ref}" - else - make build-catalog push-pipeline-catalog IMG="${operator_ref}" BUNDLE_IMG="${bundle_ref}" CATALOG_IMG="${catalog_ref}" - fi -} - -bundle_releases() { - tags="$(git tag -l)" - while read -r tag; do - if [[ -z "${tag}" ]]; then - break - fi - - # Skip any releases listed in the release blocklist - if grep -q "^${tag}$" "${release_blocklist}"; then - echo "Release ${tag} found in blocklist. Skipping..." - continue - fi - - bundle_release "${tag}" - done <<< "${tags}" -} - -parse_args() { - while [ $# -gt 0 ]; do - case "$1" in - -u) - shift - readonly DOCKER_USERNAME="${1}" - ;; - -p) - shift - readonly DOCKER_PASSWORD="${1}" - ;; - --registry) - shift - readonly REGISTRY="${1}" - ;; - --image) - shift - readonly IMAGE="${1}" - ;; - --target) - shift - readonly TARGET="${1}" - ;; - *) - echo "Error: Invalid argument - $1" - echo "$usage" - exit 1 - ;; - esac - shift - done -} - -main "$@" diff --git a/scripts/catalog-build.sh b/scripts/catalog-build.sh deleted file mode 100755 index b5366e823..000000000 --- a/scripts/catalog-build.sh +++ /dev/null @@ -1,206 +0,0 @@ -#!/bin/bash - -OPM_TOOL="opm" -CONTAINER_TOOL="docker" - -## Variables used for determing which older versions to include in catalog -declare -a arrVersions -declare -a arrExcludeVersions -excludeVersionsFile=catalogVersionExclude.json - -function main() { - parse_arguments "$@" - determineAndPullOlderBundles - build_catalog -} - -usage() { - script_name=`basename ${0}` - echo "Usage: ${script_name} [OPTIONS]" - echo " -n, --opm-version [REQUIRED] Version of opm (e.g. v4.5)" - echo " -b, --base-image [REQUIRED] The base image that the index will be built upon (e.g. registry.redhat.io/openshift4/ose-operator-registry)" - echo " -t, --output [REQUIRED] The location where the database should be output" - echo " -i, --image-name [REQUIRED] The bundle image name" - echo " -p, --prod-image [REQUIRED] The name of the production image the bundle should point to" - echo " -a, --catalog-image-name [REQUIRED] the catalog image name" - echo " -c, --container-tool Tool to build image [docker, podman] (default 'docker')" - echo " -o, --opm-tool Name of the opm tool (default 'opm')" - echo " -h, --help Display this help and exit" - echo " -v, --current-version Identifies the current version of this operator" - exit 0 -} - -function parse_arguments() { - if [[ "$#" == 0 ]]; then - usage - exit 1 - fi - - # process options - while [[ "$1" != "" ]]; do - case "$1" in - -c | --container-tool) - shift - CONTAINER_TOOL=$1 - ;; - -o | --opm-tool) - shift - OPM_TOOL=$1 - ;; - -n | --opm-version) - shift - OPM_VERSION=$1 - ;; - -b | --base-image) - shift - BASE_INDEX_IMG=$1 - ;; - -d | --directory) - shift - BASE_MANIFESTS_DIR=$1 - ;; - -i | --image-name) - shift - BUNDLE_IMAGE=$1 - ;; - -p | --prod-image) - shift - PROD_IMAGE=$1 - ;; - -a | --catalog-image-name) - shift - CATALOG_IMAGE=$1 - ;; - -h | --help) - usage - exit 1 - ;; - -t | --output) - shift - TMP_DIR=$1 - ;; - -v | --current-version) - shift - CURRENT_VERSION=$1 - ;; - esac - shift - done -} - -function create_empty_db() { - mkdir -p "${TMP_DIR}/manifests" - echo "------------ creating an empty bundles.db ---------------" - ${CONTAINER_TOOL} run --rm -v "${TMP_DIR}":/tmp --entrypoint "/bin/initializer" "${BASE_INDEX_IMG}:${OPM_VERSION}" -m /tmp/manifests -o /tmp/bundles.db -} - -function add_historical_versions_to_db(){ - for imageTag in "${arrVersions[@]}" - do - local digest="$(docker pull $PROD_IMAGE:$imageTag | grep Digest | grep -o 'sha[^\"]*')" - local img_digest="${PROD_IMAGE}@${digest}" - echo "------------ adding bundle image ${img_digest} to ${TMP_DIR}/bundles.db ------------" - "${OPM_TOOL}" registry add -b "${img_digest}" -d "${TMP_DIR}/bundles.db" -c "${CONTAINER_TOOL}" --permissive - done -} - -function add_current_version_to_db(){ - local stg_img=$1 - local prod_img=$2 - local digest="$(docker pull $stg_img | grep Digest | grep -o 'sha[^\"]*')" - local img_digest="${prod_img}@${digest}" - echo "------------ adding bundle image ${img_digest} to ${TMP_DIR}/bundles.db ------------" - "${OPM_TOOL}" registry add -b "${img_digest}" -d "${TMP_DIR}/bundles.db" -c "${CONTAINER_TOOL}" --permissive -} - -function createExcludeVersionsArray() { - excludeCount=$(jq '.ExcludeTags | length' $1) - for (( excludeIdx=0; excludeIdx $DESCRIPTION_FILE + cat "$BASE_DIR/../config/manifests/description.md" | sed 's/^/ /' >> $DESCRIPTION_FILE + sed -i.bak '/^ displayName: Runtime Component/r /tmp/description.md' $FILE + rm -f "${FILE}.bak" + rm -f $DESCRIPTION_FILE + } + +if [ "$1" == "update_csv" ]; then + update_csv +else + echo "Usage: $0 update_csv" + exit 1 +fi diff --git a/scripts/e2e-kind.sh b/scripts/e2e-kind.sh index 369176bb3..f602adf77 100755 --- a/scripts/e2e-kind.sh +++ b/scripts/e2e-kind.sh @@ -1,6 +1,6 @@ #!/bin/bash -readonly usage="Usage: e2e-minikube.sh --test-tag " +readonly usage="Usage: scripts/e2e-kind.sh --test-tag -u -k -p -pgid " readonly KUBE_CLUSTER_NAME="kind-e2e-cluster" readonly BUILD_IMAGE="runtime-component-operator:latest" @@ -35,7 +35,7 @@ main() { exit 0 fi - echo "****** Starting minikube scorecard tests..." + echo "****** Starting kind scorecard tests..." operator-sdk scorecard --verbose --kubeconfig ${HOME}/.kube/config --selector=suite=kuttlsuite --namespace "${TEST_NAMESPACE}" --service-account scorecard-kuttl --wait-time 45m ./bundle || { echo "****** Scorecard tests failed..." exit 1 @@ -57,8 +57,8 @@ setup_env() { sudo apt-get install -y docker-ce docker-ce-cli containerd.io sshpass jq if ! command -v kubectl &> /dev/null; then - echo "****** Installing kubectl v1.19.4..." - curl -Lo /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.19.4/bin/linux/amd64/kubectl && chmod +x /usr/local/bin/kubectl + echo "****** Installing kubectl v1.24.2..." + curl -Lo /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.24.2/bin/linux/amd64/kubectl && chmod +x /usr/local/bin/kubectl fi # Create a remote Kind cluster @@ -97,13 +97,12 @@ build_push() { } } -# install_rco: Kustomize and install Runtime Component Operator +# install_rco: Kustomize and install RuntimeComponent-Operator install_rco() { echo "****** Installing RCO in namespace: ${TEST_NAMESPACE}" kubectl apply -f bundle/manifests/rc.app.stacks_runtimecomponents.yaml kubectl apply -f bundle/manifests/rc.app.stacks_runtimeoperations.yaml - sed -i "s|image: .*|image: ${LOCAL_REGISTRY}/${BUILD_IMAGE}| s|RUNTIME_COMPONENT_WATCH_NAMESPACE|${TEST_NAMESPACE}|" internal/deploy/kubectl/runtime-component-operator.yaml @@ -115,14 +114,14 @@ install_rco() { install_tools() { echo "****** Installing Prometheus" - kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/main/bundle.yaml + kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/main/bundle.yaml echo "****** Installing Knative" - kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.3.0/serving-crds.yaml - kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v1.3.0/eventing-crds.yaml + kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.7.4/serving-crds.yaml + kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v1.7.4/eventing-crds.yaml echo "****** Installing Cert Manager" - kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.8.0/cert-manager.yaml + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.2/cert-manager.yaml echo "****** Enabling Ingress" kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml @@ -139,16 +138,15 @@ setup_test() { export PATH="$HOME/.krew/bin:$PATH" kubectl krew install kuttl - ## Add tests for minikube + ## Add tests for kind cluster mv bundle/tests/scorecard/kind-kuttl/ingress bundle/tests/scorecard/kuttl/ mv bundle/tests/scorecard/kind-kuttl/ingress-certificate bundle/tests/scorecard/kuttl/ - ## Remove tests that do not apply for minikube + ## Remove tests that do not apply for kind cluster mv bundle/tests/scorecard/kuttl/network-policy bundle/tests/scorecard/kind-kuttl/ mv bundle/tests/scorecard/kuttl/network-policy-multiple-apps bundle/tests/scorecard/kind-kuttl/ mv bundle/tests/scorecard/kuttl/routes bundle/tests/scorecard/kind-kuttl/ mv bundle/tests/scorecard/kuttl/route-certificate bundle/tests/scorecard/kind-kuttl/ - mv bundle/tests/scorecard/kuttl/stream bundle/tests/scorecard/kind-kuttl/ mv bundle/tests/scorecard/kuttl/manage-tls bundle/tests/scorecard/kind-kuttl/ for image in "${IMAGES[@]}"; do @@ -215,18 +213,9 @@ cleanup() { echo "****** Cleaning up test environment..." $(dirname $0)/delete-fyre-stack.sh --cluster-name ${REMOTE_CLUSTER_NAME} --user "${FYRE_USER}" --key "${FYRE_KEY}" - ## Restore tests - mv bundle/tests/scorecard/kuttl/ingress bundle/tests/scorecard/kind-kuttl/ - mv bundle/tests/scorecard/kuttl/ingress-certificate bundle/tests/scorecard/kind-kuttl/ - - mv bundle/tests/scorecard/kind-kuttl/network-policy bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/kind-kuttl/network-policy-multiple-apps bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/kind-kuttl/routes bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/kind-kuttl/route-certificate bundle/tests/scorecard/kuttl/ - #mv bundle/tests/scorecard/kind-kuttl/image-stream bundle/tests/scorecard/kuttl/ - #mv bundle/tests/scorecard/kind-kuttl/stream bundle/tests/scorecard/kuttl/ - - git checkout bundle/tests/scorecard internal/deploy + ## Restore tests and configs + git clean -fd bundle/tests/scorecard + git restore bundle/tests/scorecard internal/deploy } trap_cleanup() { diff --git a/scripts/e2e-minikube.sh b/scripts/e2e-minikube.sh deleted file mode 100755 index 3cb9984ce..000000000 --- a/scripts/e2e-minikube.sh +++ /dev/null @@ -1,178 +0,0 @@ -#!/bin/bash - -readonly usage="Usage: e2e-minikube.sh --test-tag " -readonly OP_DIR=$(pwd) - -readonly LOCAL_REGISTRY="localhost:5000" -readonly BUILD_IMAGE="runtime-operator:latest" -readonly DAILY_IMAGE="applicationstacks\/operator:daily" - -readonly RUNASUSER="\n securityContext:\n runAsUser: 1001" -readonly APPIMAGE='applicationImage:\s' -readonly IMAGES=('k8s.gcr.io\/pause:2.0' 'navidsh\/demo-day') - - -# setup_env: Download kubectl cli and Minikube, start Minikube, and create a test project -setup_env() { - # Install Minikube and Start a cluster - echo "****** Installing and starting Minikube" - scripts/installers/install-minikube.sh - - readonly TEST_NAMESPACE="rco-test-${TEST_TAG}" - - echo "****** Creating test namespace: ${TEST_NAMESPACE}" - kubectl create namespace "${TEST_NAMESPACE}" - kubectl config set-context $(kubectl config current-context) --namespace="${TEST_NAMESPACE}" - - ## Create service account for Kuttl tests - kubectl apply -f config/rbac/minikube-kuttl-rbac.yaml - - ## Add label to node for affinity test - kubectl label node "minikube" kuttlTest=test1 -} - -build_push() { - eval "$(minikube docker-env --profile=minikube)" && export DOCKER_CLI='docker' - ## Build Docker image and push to local registry - docker build -t "${LOCAL_REGISTRY}/${BUILD_IMAGE}" . - docker push "${LOCAL_REGISTRY}/${BUILD_IMAGE}" -} - -# install_rco: Kustomize and install Runtime-Component-Operator -install_rco() { - echo "****** Installing RCO in namespace: ${TEST_NAMESPACE}" - - make kustomize-build KUSTOMIZE_NAMESPACE=${TEST_NAMESPACE} - sed -i "s/image: ${DAILY_IMAGE}/image: ${LOCAL_REGISTRY}\/${BUILD_IMAGE}/" deploy/kustomize/daily/base/runtime-component-operator.yaml - kubectl create -k deploy/kustomize/daily/base -} - -install_tools() { - echo "****** Installing Prometheus" - kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/main/bundle.yaml - - echo "****** Installing Knative" - kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.3.0/serving-crds.yaml - kubectl apply -f https://github.com/knative/eventing/releases/download/knative-v1.3.0/eventing-crds.yaml - - echo "****** Installing Cert Manager" - kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.8.0/cert-manager.yaml - - echo "****** Enabling Ingress" - minikube addons enable ingress -} - -## cleanup_env : Delete generated resources that are not bound to a test TEST_NAMESPACE. -cleanup_env() { - kubectl delete namespace "${TEST_NAMESPACE}" - minikube stop && minikube delete -} - -setup_test() { - echo "****** Installing kuttl" - mkdir krew && cd krew - curl -OL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz \ - && tar -xvzf krew-linux_amd64.tar.gz \ - && ./krew-linux_amd64 install krew - cd .. && rm -rf krew - export PATH="$HOME/.krew/bin:$PATH" - kubectl krew install kuttl - - ## Add tests for minikube - mv bundle/tests/scorecard/minikube-kuttl/ingress bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/minikube-kuttl/ingress-certificate bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/minikube-kuttl/ingress-manage-tls bundle/tests/scorecard/kuttl/ - - ## Remove tests that do not apply for minikube - mv bundle/tests/scorecard/kuttl/network-policy bundle/tests/scorecard/minikube-kuttl/ - mv bundle/tests/scorecard/kuttl/network-policy-multiple-apps bundle/tests/scorecard/minikube-kuttl/ - mv bundle/tests/scorecard/kuttl/routes bundle/tests/scorecard/minikube-kuttl/ - mv bundle/tests/scorecard/kuttl/route-certificate bundle/tests/scorecard/minikube-kuttl/ - mv bundle/tests/scorecard/kuttl/stream bundle/tests/scorecard/minikube-kuttl/ - mv bundle/tests/scorecard/kuttl/manage-tls bundle/tests/scorecard/minikube-kuttl/ - - for image in "${IMAGES[@]}" - do - files=($(grep -rwl 'bundle/tests/scorecard/kuttl/' -e $APPIMAGE$image)) - for file in "${files[@]}" - do - sed -i "s/$image/$image$RUNASUSER/" $file - done - done -} - -cleanup_test() { - ## Restore tests - mv bundle/tests/scorecard/kuttl/ingress bundle/tests/scorecard/minikube-kuttl/ - mv bundle/tests/scorecard/kuttl/ingress-certificate bundle/tests/scorecard/minikube-kuttl/ - mv bundle/tests/scorecard/kuttl/ingress-manage-tls bundle/tests/scorecard/minikube-kuttl/ - - mv bundle/tests/scorecard/minikube-kuttl/network-policy bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/minikube-kuttl/network-policy-multiple-apps bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/minikube-kuttl/routes bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/minikube-kuttl/route-certificate bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/minikube-kuttl/stream bundle/tests/scorecard/kuttl/ - mv bundle/tests/scorecard/minikube-kuttl/manage-tls bundle/tests/scorecard/kuttl/ - - git restore bundle/tests/scorecard deploy/kustomize -} - -main() { - parse_args "$@" - - if [[ -z "${TEST_TAG}" ]]; then - echo "****** Missing test id, see usage" - echo "${usage}" - exit 1 - fi - - echo "****** Setting up test environment..." - setup_env - build_push - install_rco - install_tools - - # Wait for operator deployment to be ready - while [[ $(kubectl get deploy rco-controller-manager -o jsonpath='{ .status.readyReplicas }') -ne "1" ]]; do - echo "****** Waiting for rco-controller-manager to be ready..." - sleep 10 - done - echo "****** rco-controller-manager deployment is ready..." - - setup_test - - echo "****** Starting minikube scorecard tests..." - operator-sdk scorecard --verbose --selector=suite=kuttlsuite --namespace "${TEST_NAMESPACE}" --service-account scorecard-kuttl --wait-time 30m ./bundle || { - echo "****** Scorecard tests failed..." - echo "****** Cleaning up test environment..." - cleanup_test - cleanup_env - exit 1 - } - result=$? - - echo "****** Cleaning up test environment..." - cleanup_test - cleanup_env - - return $result -} - -parse_args() { - while [ $# -gt 0 ]; do - case "$1" in - --test-tag) - shift - readonly TEST_TAG="${1}" - ;; - *) - echo "Error: Invalid argument - $1" - echo "$usage" - exit 1 - ;; - esac - shift - done -} - -main "$@" diff --git a/scripts/e2e.sh b/scripts/e2e.sh index 212997e7b..cc041d6cc 100755 --- a/scripts/e2e.sh +++ b/scripts/e2e.sh @@ -18,9 +18,9 @@ setup_env() { # Set variables for rest of script to use readonly DEFAULT_REGISTRY=$(oc get route "${REGISTRY_NAME}" -o jsonpath="{ .spec.host }" -n "${REGISTRY_NAMESPACE}") - readonly TEST_NAMESPACE="runtime-operator-test-${TEST_TAG}" - readonly BUILD_IMAGE=${DEFAULT_REGISTRY}/${TEST_NAMESPACE}/runtime-operator - readonly BUNDLE_IMAGE="${DEFAULT_REGISTRY}/${TEST_NAMESPACE}/rco-bundle:latest" + readonly TEST_NAMESPACE="rco-test-${TRAVIS_BUILD_NUMBER}" + readonly BUILD_IMAGE=${DEFAULT_REGISTRY}/${TEST_NAMESPACE}/operator + readonly BUNDLE_IMAGE="${DEFAULT_REGISTRY}/${TEST_NAMESPACE}/operator-bundle:latest" echo "****** Creating test namespace ${TEST_NAMESPACE} for release ${RELEASE}" oc new-project "${TEST_NAMESPACE}" || oc project "${TEST_NAMESPACE}" @@ -107,7 +107,7 @@ main() { push_images echo "****** Installing bundle..." - operator-sdk run bundle --install-mode OwnNamespace --pull-secret-name regcred "${BUNDLE_IMAGE}" || { + operator-sdk run bundle --install-mode OwnNamespace --pull-secret-name regcred "${BUNDLE_IMAGE}" --timeout 5m || { echo "****** Installing bundle failed..." exit 1 } diff --git a/scripts/installers/install-minikube.sh b/scripts/installers/install-minikube.sh deleted file mode 100755 index c01939c55..000000000 --- a/scripts/installers/install-minikube.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -set -e - -## Some minikube job specific ENV variables -export MINIKUBE_WANTUPDATENOTIFICATION=false -export MINIKUBE_HOME=$HOME -export KUBECONFIG=$HOME/.kube/config - -function main () { - install_minikube - - echo "****** Verifying installation..." - kubectl cluster-info - wait_for_kube - - echo "****** Minikube enabled job is running..." - -} - -function install_minikube() { - sudo apt-get update -y - sudo apt-get -qq -y install conntrack - - ## get kubectl - echo "****** Installing kubectl v1.19.4..." - curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.19.4/bin/linux/amd64/kubectl \ - && chmod +x kubectl \ - && sudo mv kubectl /usr/local/bin/ - - ## Download minikube - echo "****** Installing Minikube v1.21.0..." - curl -Lo minikube https://storage.googleapis.com/minikube/releases/v1.21.0/minikube-linux-amd64 \ - && chmod +x minikube \ - && sudo mv minikube /usr/local/bin/ - - ## Download docker - echo "****** Installing Docker..." - if test -f "/usr/share/keyrings/docker-archive-keyring.gpg"; then - rm /usr/share/keyrings/docker-archive-keyring.gpg - fi - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg - echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt-get update -y - sudo apt-get install -y docker-ce docker-ce-cli containerd.io - - mkdir -p $HOME/.kube $HOME/.minikube - touch $KUBECONFIG - minikube start --profile=minikube --kubernetes-version=v1.19.4 --driver=docker --force - minikube update-context --profile=minikube - - eval "$(minikube docker-env --profile=minikube)" && export DOCKER_CLI='docker' - - ## Run Local Registry - docker run -d -p 5000:5000 --restart=always --name local-registry registry -} - - -function wait_for_kube() { - local json_path='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' - echo "****** Waiting for kube-controller-manager to be available..." - - until kubectl -n kube-system get pods -l component=kube-controller-manager -o jsonpath="$json_path" 2>&1 | grep -q "Ready=True"; do - sleep 5; - done - - kubectl get pods --all-namespaces -} - -main "$@" diff --git a/scripts/installers/install-operator-sdk.sh b/scripts/installers/install-operator-sdk.sh index 9f98691e4..884eb3a8f 100755 --- a/scripts/installers/install-operator-sdk.sh +++ b/scripts/installers/install-operator-sdk.sh @@ -4,7 +4,6 @@ set -o errexit set -o nounset main() { - DEFAULT_RELEASE_VERSION=v1.24.0 RELEASE_VERSION=${1:-$DEFAULT_RELEASE_VERSION} @@ -34,12 +33,11 @@ main() { fi echo "****** Installing operator-sdk version $RELEASE_VERSION on $(uname)" - curl -L -o operator-sdk $binary_url + curl -L -o operator-sdk "${binary_url}" chmod +x operator-sdk sudo mv operator-sdk /usr/local/bin/operator-sdk operator-sdk version } -main $@ - +main "$@" diff --git a/scripts/pipeline/await-ciorchestrator.sh b/scripts/pipeline/await-ciorchestrator.sh new file mode 100755 index 000000000..0957e3a15 --- /dev/null +++ b/scripts/pipeline/await-ciorchestrator.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +function main() { + parse_arguments "$@" + await_ciorchestrator +} + +function print_usage() { + script_name=`basename ${0}` + echo "Usage: ${script_name} [OPTIONS]" + echo "" + echo "Await Completion of CI Orchestrator job" + echo "" + echo "Options:" + echo " -u, --user string IntranetId to use to authenticate to CI Orchestrator" + echo " --password string Intranet Password to use to authenticate to CI Orchestrator" + echo " --pipelineId string pipelineId of the request that should be awaited" + echo " -h, --help Print usage information" + echo "" +} + + +function parse_arguments() { + if [[ "$#" == 0 ]]; then + print_usage + exit 1 + fi + + # process options + while [[ "$1" != "" ]]; do + case "$1" in + -u | --user) + shift + USER=$1 + ;; + --password) + shift + PASSWORD=$1 + ;; + --pipelineId) + shift + pipelineId=$1 + ;; + -h | --help) + print_usage + exit 1 + ;; + esac + shift + done +} + +function await_ciorchestrator() { + echo "Checking Pipeline Request in CI Orchestrator as ${USER}, pipelineId: ${pipelineId}" + + cat >ciorchestrator-query.json </dev/null + rc=$? + if [ $rc -eq 0 ]; then + echo "CIOrchestrator Pipeline finished" + cat ciorchestrator-query-output.csv | grep -E "OK" >/dev/null + ok=$? + echo "Exiting $ok" + exit $ok + else + sleep 1m + fi + done +} + +function check_request(){ + curl -s -X POST \ + --insecure \ + -H "Content-Type: application/json" \ + -d @ciorchestrator-query.json \ + -u "${USER}:${PASSWORD}" \ + -o ciorchestrator-query-output.csv \ + https://libh-proxy1.fyre.ibm.com/ci-pipeline-work-views-stateStore/query + + cat ciorchestrator-query-output.csv + +} + + +# --- Run --- + +main $* \ No newline at end of file diff --git a/scripts/pipeline/cd_finish b/scripts/pipeline/cd_finish index 60326332c..f3c48d49a 100755 --- a/scripts/pipeline/cd_finish +++ b/scripts/pipeline/cd_finish @@ -33,7 +33,7 @@ curl -H "Accept: application/vnd.github.v3+json" -H "Authorization: token $(ge set_env ibmcloud-api "cloud.ibm.com" . "${ONE_PIPELINE_PATH}"/internal/security-compliance/scan -. "${ONE_PIPELINE_PATH}"/internal/doi/publish_acceptance_test +. "${ONE_PIPELINE_PATH}"/internal/doi/publish_acceptance_tests #publish_acceptance_test "$(get_env ACCEPTANCE_TESTS_TASK_NAME)" "$(get_env ACCEPTANCE_TESTS_STEP_NAME)" "com.ibm.acceptance_tests" diff --git a/scripts/pipeline/ci_to_secure_pipeline_scan.sh b/scripts/pipeline/ci_to_secure_pipeline_scan.sh index 4918b13d9..5cf7272ed 100755 --- a/scripts/pipeline/ci_to_secure_pipeline_scan.sh +++ b/scripts/pipeline/ci_to_secure_pipeline_scan.sh @@ -46,12 +46,18 @@ if [[ -z "${TRIGGER_NAME}" ]]; then TRIGGER_NAME="Security Scan Manual Trigger Multiscan" fi +#AGGREGATE_IMAGE_SCAN_ISSUES=(get_env aggregate-image-scan-issues) +#if [[ -z "${AGGREGATE_IMAGE_SCAN_ISSUES}" ]]; then +AGGREGATE_IMAGE_SCAN_ISSUES="squad" +#fi + EVIDENCE_REPO=$(get_env evidence-repo) INCIDENT_REPO=$(get_env incident-repo) if [[ -z $EVIDENCE_REPO || -z $INCIDENT_REPO ]]; then TRIGGER_PROPERTIES_JSON="{\"images-to-scan\": \"$(echo ${IMAGES_TO_SCAN})\"}" else TRIGGER_PROPERTIES_JSON="{ + \"aggregate-image-scan-issues\": \"$(echo ${AGGREGATE_IMAGE_SCAN_ISSUES})\", \"images-to-scan\": \"$(echo ${IMAGES_TO_SCAN})\", \"evidence-repo\": \"${EVIDENCE_REPO}\", \"incident-repo\": \"${INCIDENT_REPO}\" diff --git a/scripts/pipeline/clusterWait.sh b/scripts/pipeline/clusterWait.sh new file mode 100755 index 000000000..e63fec0af --- /dev/null +++ b/scripts/pipeline/clusterWait.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +arch=$1 + + +rco_demand_id=$(get_env RCO_DEMAND_ID_$arch) +export demandId=$rco_demand_id +echo "calling ebc_waitForDemand.sh for $arch" +cd ebc-gateway-http + +export ebcEnvironment=prod + +json=$(./ebc_waitForDemand.sh) +rc=$? +echo "return from ebc_waitForDemand.sh for $arch" + +cd .. + +if [[ "$rc" == 0 ]]; then + echo "EBC create of id: $rco_demand_id cluster successful" +else + echo "EBC create of id: $rco_demand_id cluster failed, ask #was-ebc slack channel for help mentioning your demand id: $rco_demand_id" + exit 1 +fi + +status=$(jq -c '.status' <<< $json) +ip=$(jq -c '.machineAddresses.ocpinf' <<< $json) +ip=$(echo "$ip" | tr -d '"') + +PRIVATE_KEY="$(get_env private_key "")" +echo -n "${PRIVATE_KEY}" | base64 -d > id_rsa + +chmod 600 id_rsa +pwd +ls -l id_rsa + +echo "oc version:" +oc version + +token=$(ssh -o StrictHostKeyChecking=no -i id_rsa root@$ip "cat ~/auth/kubeadmin-password") + +echo "json=$json" +echo "status=$status" +echo "token=$token" +echo $ip + diff --git a/scripts/pipeline/fyre-e2e.sh b/scripts/pipeline/fyre-e2e.sh deleted file mode 100755 index fb6e74d9d..000000000 --- a/scripts/pipeline/fyre-e2e.sh +++ /dev/null @@ -1,287 +0,0 @@ -#!/bin/bash - -readonly usage="Usage: fyre-e2e.sh -u -p --cluster-url --cluster-token --registry-name --registry-namespace --registry-user --registry-password --release --test-tag " -readonly OC_CLIENT_VERSION="4.6.0" -readonly CONTROLLER_MANAGER_NAME="rco-controller-manager" - -# setup_env: Download oc cli, log into our persistent cluster, and create a test project -setup_env() { - echo "****** Installing OC CLI..." - # Install kubectl and oc - curl -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${OC_CLIENT_VERSION}/openshift-client-linux.tar.gz | tar xvz - sudo mv oc kubectl /usr/local/bin/ - - # Start a cluster and login - echo "****** Logging into remote cluster..." - oc login "${CLUSTER_URL}" -u "${CLUSTER_USER:-kubeadmin}" -p "${CLUSTER_TOKEN}" --insecure-skip-tls-verify=true - - # Set variables for rest of script to use - readonly TEST_NAMESPACE="runtime-operator-test-${TEST_TAG}" - - echo "****** Creating test namespace: ${TEST_NAMESPACE} for release ${RELEASE}" - oc new-project "${TEST_NAMESPACE}" || oc project "${TEST_NAMESPACE}" - - ## Create service account for Kuttl tests - oc apply -f config/rbac/kuttl-rbac.yaml -} - -## cleanup_env : Delete generated resources that are not bound to a test TEST_NAMESPACE. -cleanup_env() { - oc delete project "${TEST_NAMESPACE}" -} - -## trap_cleanup : Call cleanup_env and exit. For use by a trap to detect if the script is exited at any point. -trap_cleanup() { - last_status=$? - if [[ $last_status != 0 ]]; then - cleanup_env - fi - exit $last_status -} - -#push_images() { -# echo "****** Logging into private registry..." -# oc sa get-token "${SERVICE_ACCOUNT}" -n default | docker login -u unused --password-stdin "${DEFAULT_REGISTRY}" || { -# echo "Failed to log into docker registry as ${SERVICE_ACCOUNT}, exiting..." -# exit 1 -# } - -# echo "****** Creating pull secret using Docker config..." -# oc create secret generic regcred --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson - -# docker push "${BUILD_IMAGE}" || { -# echo "Failed to push ref: ${BUILD_IMAGE} to docker registry, exiting..." -# exit 1 -# } - -# docker push "${BUNDLE_IMAGE}" || { -# echo "Failed to push ref: ${BUNDLE_IMAGE} to docker registry, exiting..." -# exit 1 -# } -#} - -main() { - parse_args "$@" - - if [[ -z "${RELEASE}" ]]; then - echo "****** Missing release, see usage" - fi - - if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then - echo "****** Missing docker authentication information, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${CLUSTER_URL}" ]] || [[ -z "${CLUSTER_TOKEN}" ]]; then - echo "****** Missing OCP URL or token, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${REGISTRY_NAME}" ]]; then - echo "****** Missing OCP registry name, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${REGISTRY_IMAGE}" ]]; then - echo "****** Missing REGISTRY_IMAGE definition, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${REGISTRY_USER}" ]] || [[ -z "${REGISTRY_PASSWORD}" ]]; then - echo "****** Missing registry authentication information, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${TEST_TAG}" ]]; then - echo "****** Missing test tag, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${CATALOG_IMAGE}" ]]; then - echo "****** Missing catalog image, see usage" - echo "${usage}" - exit 1 - fi - - if [[ -z "${CHANNEL}" ]]; then - echo "****** Missing channel, see usage" - echo "${usage}" - exit 1 - fi - - echo "****** Setting up test environment..." - setup_env - - if [[ -z "${DEBUG_FAILURE}" ]]; then - trap trap_cleanup EXIT - else - echo "#####################################################################################" - echo "WARNING: --debug-failure is set. If e2e tests fail, any created resources will remain" - echo "on the cluster for debugging/troubleshooting. YOU MUST DELETE THESE RESOURCES when" - echo "you're done, or else they will cause future tests to fail. To cleanup manually, just" - echo "delete the namespace \"${TEST_NAMESPACE}\": oc delete project \"${TEST_NAMESPACE}\" " - echo "#####################################################################################" - fi - - # login to docker to avoid rate limiting during build - echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin - - trap "rm -f /tmp/pull-secret-*.yaml" EXIT - - echo "****** Logging into private registry..." - echo "${REGISTRY_PASSWORD}" | docker login ${REGISTRY_NAME} -u "${REGISTRY_USER}" --password-stdin - - echo "****** Creating pull secret..." - oc create secret docker-registry regcred --docker-server=${REGISTRY_NAME} "--docker-username=${REGISTRY_USER}" "--docker-password=${REGISTRY_PASSWORD}" --docker-email=unused - - oc get secret/regcred -o jsonpath='{.data.\.dockerconfigjson}' | base64 --decode > /tmp/pull-secret-new.yaml - oc get secret/pull-secret -n openshift-config -o jsonpath='{.data.\.dockerconfigjson}' | base64 --decode > /tmp/pull-secret-global.yaml - - jq -s '.[1] * .[0]' /tmp/pull-secret-new.yaml /tmp/pull-secret-global.yaml > /tmp/pull-secret-merged.yaml - - echo "Updating global pull secret" - oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=/tmp/pull-secret-merged.yaml - - echo "****** Installing operator from catalog: ${CATALOG_IMAGE}" - install_operator - - # Wait for operator deployment to be ready - while [[ $(oc get deploy "${CONTROLLER_MANAGER_NAME}" -o jsonpath='{ .status.readyReplicas }') -ne "1" ]]; do - echo "****** Waiting for ${CONTROLLER_MANAGER_NAME} to be ready..." - sleep 10 - done - - echo "****** ${CONTROLLER_MANAGER_NAME} deployment is ready..." - - echo "****** Starting scorecard tests..." - operator-sdk scorecard --verbose --kubeconfig ${HOME}/.kube/config --selector=suite=kuttlsuite --namespace="${TEST_NAMESPACE}" --service-account="scorecard-kuttl" --wait-time 30m ./bundle || { - echo "****** Scorecard tests failed..." - exit 1 - } - result=$? - - echo "****** Cleaning up test environment..." - cleanup_env - - return $result -} - -install_operator() { - # Apply the catalog - echo "****** Applying the catalog source..." - cat <>" + rhcos_level=$(get_env pre-release-rhcos-url) + ocp_level=$(get_env pre-release-ocp-url) + echo "this is a pre-release OCP cluster build" + echo "ocp level: $ocp_level" + echo "core os level: $rhcos_level" + export ebc_fyre_install_url=${ocp_level}/openshift-install-linux.tar.gz + export ebc_fyre_client_url=${ocp_level}/openshift-client-linux.tar.gz + if [[ "$arch" == "X" ]]; then + # X values + echo "<<1a>>" + export ebc_plan=svl-onepipeline-ocpplus_x_custom.yml + export ebc_fyre_kernel_url=${rhcos_level}/rhcos-live-kernel-x86_64 + export ebc_fyre_initramfs_url=${rhcos_level}/rhcos-live-initramfs.x86_64.img + export ebc_fyre_metal_url=${rhcos_level}/rhcos-metal.x86_64.raw.gz + export ebc_fyre_rootfs_url=${rhcos_level}/rhcos-live-rootfs.x86_64.img + fi + if [[ "$arch" == "Z" ]]; then + # Z values + export ebc_plan=svl-onepipeline-ocpplus_z_custom.yml + export ebc_fyre_kernel_url=${rhcos_level_z}/rhcos-live-kernel-s390x + export ebc_fyre_initramfs_url=${rhcos_level_z}/rhcos-live-initramfs.s390x.img + export ebc_fyre_metal_url=${rhcos_level_z}/rhcos-metal.s390x.raw.gz + export ebc_fyre_rootfs_url=${rhcos_level_z}/rhcos-live-rootfs.s390x.img + fi + if [[ "$arch" == "P" ]]; then + # P + export ebc_plan=svl-onepipeline-ocpplus_p_custom.yml + export ebc_fyre_kernel_url=${rhcos_level_p}/rhcos-live-kernel-ppc64le + export ebc_fyre_initramfs_url=${rhcos_level_p}/rhcos-live-initramfs.ppc64le.img + export ebc_fyre_metal_url=${rhcos_level_p}/rhcos-metal.ppc64le.raw.gz + export ebc_fyre_rootfs_url=${rhcos_level_p}/rhcos-live-rootfs.ppc64le.img + fi +else + if [[ "$arch" == "X" ]]; then + export ebc_plan=svl-onepipeline-ocpplus_x.yml + echo "setting ebc plan for X: $ebc_plan" + fi + if [[ "$arch" == "Z" ]]; then + export ebc_plan=svl-onepipeline-ocpplus_z.yml + fi + if [[ "$arch" == "P" ]]; then + export ebc_plan=svl-onepipeline-ocpplus_p.yml + fi + export ebc_ocp_version=$(get_env ocp_version) +fi +# prod or dev, start out with dev +export ebcEnvironment=prod +# priority is 30 to start, prod priority may be 100 +export ebc_priority=30 +export ebc_autoCompleteAfterXHours=$(get_env ebc_autocomplete_hours "6") +# gather pipeline URL and place in following env var +reason="https://cloud.ibm.com/devops/pipelines/tekton/${PIPELINE_ID}/runs/${PIPELINE_RUN_ID}" +export ebc_reasonForEnvironment=$reason + +./ebc_demand.sh +rc=$? +if [[ "$rc" == 0 ]]; then + echo "cluster requested" +else + echo "Outage impacting demand of cluster, try again later" + exit 1 +fi diff --git a/scripts/pipeline/launch-catalog-build.sh b/scripts/pipeline/launch-catalog-build.sh deleted file mode 100755 index 1ac698f12..000000000 --- a/scripts/pipeline/launch-catalog-build.sh +++ /dev/null @@ -1,190 +0,0 @@ -#!/bin/bash - -TRAVIS_TOKEN= -LAUNCH_TRAVIS= -MONITOR_TRAVIS=yes -GH_REPO= -GH_COMMIT_ID= - -function main() { - parse_arguments "$@" - launch_travis -} - -function print_usage() { - script_name=`basename ${0}` - echo "Usage: ${script_name} [OPTIONS]" - echo "" - echo "Kick off or check status of Travis job" - echo "" - echo "Options:" - echo " -t, --token string Travis API token" - echo " -b, --branch string Github Repository branch" - echo " -l, --launch Launch Travis job" - echo " -m, --monitor Monitor Travis job" - echo " -r, --repository string GitHub Repository to use" - echo " -c, --commit string GH head commit ID" - echo " -h, --help Print usage information" - echo "" -} - - -function parse_arguments() { - if [[ "$#" == 0 ]]; then - print_usage - exit 1 - fi - - # process options - while [[ "$1" != "" ]]; do - case "$1" in - -t | --token) - shift - TRAVIS_TOKEN=$1 - ;; - -b | --branch) - shift - BRANCH=$1 - ;; - -l | --launch) - LAUNCH_TRAVIS=yes - ;; - -m | --monitor) - MONITOR_TRAVIS=yes - ;; - -r | --repository) - shift - GH_REPO=$1 - ;; - -c | --commit) - shift - GH_COMMIT_ID=$1 - ;; - -h | --help) - print_usage - exit 1 - ;; - esac - shift - done -} - -function launch_travis() { - - - echo "Going to work with GH repository: ${GH_REPO} ..." - - # for Travis API call, the repository shall be provided without the full URL - GH_REPO=$( echo $GH_REPO | sed -e 's/.*github.com\///g' ) - - # for Travis API call, the GH repo needs to be encoded using URL encoding - # FIXME proper URL encoding, not only handle backslashes - GH_REPO=$( echo $GH_REPO | sed -e 's/\//%2F/g' ) - - - if [[ ! -z ${LAUNCH_TRAVIS} ]]; then - - body="{ - \"request\": { - \"branch\":\"$BRANCH\", - \"merge_mode\":\"replace\", - \"config\": { - \"dist\": \"focal\", - \"language\": \"go\", - \"go\": [ - \"1.19.x\" - ], - \"go_import_path\": \"github.com/application-stacks/runtime-component-operator\", - \"services\": [ - \"docker\" - ], - \"before_install\": [ - \"sudo apt-get update\" - ], - \"stages\": [ - { - \"name\": \"build\" - } - ], - \"jobs\": { - \"include\": [ - { - \"stage\": \"build\", - \"name\": \"Build bundle on amd64\", - \"os\": \"linux\", - \"arch\": \"amd64\", - \"before_install\": [ - \"sudo apt-get install -qq -y software-properties-common uidmap\", - \"make install-podman\", - \"make install-opm\" - ], - \"script\": [ - \"make bundle-pipeline-releases RELEASE_TARGET=$BRANCH\" - ] - } - ] - } - }, - \"sha\": \"$GH_COMMIT_ID\", - \"message\": \"Run bundle builds\" - }}" - - echo $body - - echo "Requesting Travis build for GH repository: ${GH_REPO}..." - - curl -s -X POST \ - -H "Content-Type: application/json" \ - -H "Accept: application/json" \ - -H "Travis-API-Version: 3" \ - -H "Authorization: token ${TRAVIS_TOKEN}" \ - -d "$body" \ - "https://api.travis-ci.com/repo/${GH_REPO}/requests" > travis-request.json - - fi - - REQUEST_NUMBER=$(jq -r '.request.id' travis-request.json) - echo "Travis build request number: $REQUEST_NUMBER" - - echo "Checking Travis build (${REQUEST_NUMBER}) status...." - - # TODO read these in as env properties? - retries=300 - sleep_time=30 - total_time_mins=$(( sleep_time * retries / 60)) - - while true; do - - if [[ ${retries} -eq 0 ]]; then - echo "Timeout after ${total_time_mins} minutes waiting for Travis request ${REQUEST_NUMBER} to complete." - fi - - curl -s -X GET \ - -H "Accept: application/json" \ - -H "Travis-API-Version: 3" \ - -H "Authorization: token ${TRAVIS_TOKEN}" \ - "https://api.travis-ci.com/repo/${GH_REPO}/request/${REQUEST_NUMBER}" > travis-status-1.json - - REQUEST_STATUS=$(jq -r '.builds[].state' travis-status-1.json) - echo "Travis request ${REQUEST_NUMBER} status: '${REQUEST_STATUS}' ..." - - if [[ "${REQUEST_STATUS}" != "failed" && "${REQUEST_STATUS}" != "passed" ]]; then # FIXME - retries=$(( retries - 1 )) - echo "Retrying waiting for Travis request ${REQUEST_NUMBER}... (${retries} left)" - sleep ${sleep_time} - elif [[ "${REQUEST_STATUS}" == "failed" ]]; then - echo "Travis request ${REQUEST_NUMBER} failed, exiting." - exit 1 - else - echo "Travis request ${REQUEST_NUMBER} completed with status ${REQUEST_STATUS}." - break - fi - - done - -} - - -# --- Run --- - -main $* diff --git a/scripts/pipeline/launch-travis.sh b/scripts/pipeline/launch-travis.sh deleted file mode 100755 index 07fe95828..000000000 --- a/scripts/pipeline/launch-travis.sh +++ /dev/null @@ -1,193 +0,0 @@ -#!/bin/bash - -TRAVIS_TOKEN= -LAUNCH_TRAVIS= -MONITOR_TRAVIS=yes -GH_REPO= -GH_COMMIT_ID= - -function main() { - parse_arguments "$@" - launch_travis -} - -function print_usage() { - script_name=`basename ${0}` - echo "Usage: ${script_name} [OPTIONS]" - echo "" - echo "Kick off or check status of Travis job" - echo "" - echo "Options:" - echo " -t, --token string Travis API token" - echo " -b, --branch string Github Repository branch" - echo " -l, --launch Launch Travis job" - echo " -m, --monitor Monitor Travis job" - echo " -r, --repository string GitHub Repository to use" - echo " -c, --commit string GH head commit ID" - echo " -h, --help Print usage information" - echo "" -} - - -function parse_arguments() { - if [[ "$#" == 0 ]]; then - print_usage - exit 1 - fi - - # process options - while [[ "$1" != "" ]]; do - case "$1" in - -t | --token) - shift - TRAVIS_TOKEN=$1 - ;; - -b | --branch) - shift - BRANCH=$1 - ;; - -l | --launch) - LAUNCH_TRAVIS=yes - ;; - -m | --monitor) - MONITOR_TRAVIS=yes - ;; - -r | --repository) - shift - GH_REPO=$1 - ;; - -c | --commit) - shift - GH_COMMIT_ID=$1 - ;; - -h | --help) - print_usage - exit 1 - ;; - esac - shift - done -} - -function launch_travis() { - - - echo "Going to work with GH repository: ${GH_REPO} ..." - - # for Travis API call, the repository shall be provided without the full URL - GH_REPO=$( echo $GH_REPO | sed -e 's/.*github.com\///g' ) - - # for Travis API call, the GH repo needs to be encoded using URL encoding - # FIXME proper URL encoding, not only handle backslashes - GH_REPO=$( echo $GH_REPO | sed -e 's/\//%2F/g' ) - - - if [[ ! -z ${LAUNCH_TRAVIS} ]]; then - - body="{ - \"request\": { - \"branch\":\"$BRANCH\", - \"merge_mode\":\"replace\", - \"config\": { - \"dist\": \"focal\", - \"language\": \"go\", - \"go\": [ - \"1.19.x\" - ], - \"go_import_path\": \"github.com/application-stacks/runtime-component-operator\", - \"services\": [ - \"docker\" - ], - \"before_install\": [ - \"sudo apt-get update\" - ], - \"stages\": [ - { - \"name\": \"build\" - } - ], - \"jobs\": { - \"include\": [ - { - \"stage\": \"build\", - \"name\": \"Build image on ppc64le\", - \"os\": \"linux\", - \"arch\": \"ppc64le\", - \"script\": [ - \"make build-pipeline-releases RELEASE_TARGET=$BRANCH\" - ] - }, - { - \"name\": \"Build image on s390x\", - \"os\": \"linux\", - \"arch\": \"s390x\", - \"script\": [ - \"make build-pipeline-releases RELEASE_TARGET=$BRANCH\" - ] - } - ] - } - }, - \"sha\": \"$GH_COMMIT_ID\", - \"message\": \"Run architecture builds\" - }}" - - echo $body - - echo "Requesting Travis build for GH repository: ${GH_REPO}..." - - curl -s -X POST \ - -H "Content-Type: application/json" \ - -H "Accept: application/json" \ - -H "Travis-API-Version: 3" \ - -H "Authorization: token ${TRAVIS_TOKEN}" \ - -d "$body" \ - "https://api.travis-ci.com/repo/${GH_REPO}/requests" > travis-request.json - - fi - - REQUEST_NUMBER=$(jq -r '.request.id' travis-request.json) - echo "Travis build request number: $REQUEST_NUMBER" - - echo "Checking Travis build (${REQUEST_NUMBER}) status...." - - # TODO read these in as env properties? - retries=300 - sleep_time=30 - total_time_mins=$(( sleep_time * retries / 60)) - - while true; do - - if [[ ${retries} -eq 0 ]]; then - echo "Timeout after ${total_time_mins} minutes waiting for Travis request ${REQUEST_NUMBER} to complete." - fi - - curl -s -X GET \ - -H "Accept: application/json" \ - -H "Travis-API-Version: 3" \ - -H "Authorization: token ${TRAVIS_TOKEN}" \ - "https://api.travis-ci.com/repo/${GH_REPO}/request/${REQUEST_NUMBER}" > travis-status-1.json - - REQUEST_STATUS=$(jq -r '.builds[].state' travis-status-1.json) - echo "Travis request ${REQUEST_NUMBER} status: '${REQUEST_STATUS}' ..." - - if [[ "${REQUEST_STATUS}" != "failed" && "${REQUEST_STATUS}" != "passed" ]]; then # FIXME - retries=$(( retries - 1 )) - echo "Retrying waiting for Travis request ${REQUEST_NUMBER}... (${retries} left)" - sleep ${sleep_time} - elif [[ "${REQUEST_STATUS}" == "failed" ]]; then - echo "Travis request ${REQUEST_NUMBER} failed, exiting." - exit 1 - else - echo "Travis request ${REQUEST_NUMBER} completed with status ${REQUEST_STATUS}." - break - fi - - done - -} - - -# --- Run --- - -main $* diff --git a/scripts/pipeline/ocp-cluster-e2e.sh b/scripts/pipeline/ocp-cluster-e2e.sh new file mode 100755 index 000000000..ff5ae4b74 --- /dev/null +++ b/scripts/pipeline/ocp-cluster-e2e.sh @@ -0,0 +1,411 @@ +#!/bin/bash + +readonly usage="Usage: ocp-cluster-e2e.sh -u -p --cluster-url --cluster-token --registry-name --registry-image --registry-user --registry-password --release --test-tag --catalog-image --channel " +readonly OC_CLIENT_VERSION="4.6.0" +readonly CONTROLLER_MANAGER_NAME="rco-controller-manager" + +# setup_env: Download oc cli, log into our persistent cluster, and create a test project +setup_env() { + echo "****** Installing OC CLI..." + # Install kubectl and oc + curl -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${OC_CLIENT_VERSION}/openshift-client-linux.tar.gz | tar xvz + sudo mv oc kubectl /usr/local/bin/ + + if [[ "$ARCHITECTURE" == "Z" ]]; then + { + echo "****** Installing kubectl-kuttl..." + curl -L -o kubectl-kuttl https://github.com/kudobuilder/kuttl/releases/download/v0.15.0/kubectl-kuttl_0.15.0_linux_x86_64 + chmod +x kubectl-kuttl + sudo mv kubectl-kuttl /usr/local/bin + } + fi + + # Start a cluster and login + echo "****** Logging into remote cluster..." + oc login "${CLUSTER_URL}" -u "${CLUSTER_USER:-kubeadmin}" -p "${CLUSTER_TOKEN}" --insecure-skip-tls-verify=true + + # Set variables for rest of script to use + readonly TEST_NAMESPACE="rco-test-${TEST_TAG}" + if [[ $INSTALL_MODE = "SingleNamespace" ]]; then + readonly INSTALL_NAMESPACE="rco-test-single-namespace-${TEST_TAG}" + elif [[ $INSTALL_MODE = "AllNamespaces" ]]; then + readonly INSTALL_NAMESPACE="openshift-operators" + else + readonly INSTALL_NAMESPACE="rco-test-${TEST_TAG}" + fi + + if [ $INSTALL_MODE != "AllNamespaces" ]; then + echo "****** Creating install namespace: ${INSTALL_NAMESPACE} for release ${RELEASE}" + oc new-project "${INSTALL_NAMESPACE}" || oc project "${INSTALL_NAMESPACE}" + fi + + echo "****** Creating test namespace: ${TEST_NAMESPACE} for release ${RELEASE}" + oc new-project "${TEST_NAMESPACE}" || oc project "${TEST_NAMESPACE}" + + ## Create service account for Kuttl tests + oc -n $TEST_NAMESPACE apply -f config/rbac/kuttl-rbac.yaml +} + +## cleanup_env : Delete generated resources that are not bound to a test INSTALL_NAMESPACE. +cleanup_env() { + ## Delete CRDs + RCO_CRD_NAMES=$(oc get crd -o name | grep rc.app.stacks | cut -d/ -f2) + echo "*** Deleting CRDs ***" + echo "*** ${RCO_CRD_NAMES}" + oc delete crd $RCO_CRD_NAMES + + ## Delete Subscription + RCO_SUBSCRIPTION_NAME=$(oc -n $INSTALL_NAMESPACE get subscription -o name | grep runtime-component | cut -d/ -f2) + echo "*** Deleting Subscription ***" + echo "*** ${RCO_SUBSCRIPTION_NAME}" + oc -n $INSTALL_NAMESPACE delete subscription $RCO_SUBSCRIPTION_NAME + + ## Delete CSVs + RCO_CSV_NAME=$(oc -n $INSTALL_NAMESPACE get csv -o name | grep runtime-component | cut -d/ -f2) + echo "*** Deleting CSVs ***" + echo "*** ${RCO_CSV_NAME}" + oc -n $INSTALL_NAMESPACE delete csv $RCO_CSV_NAME + + if [ $INSTALL_MODE != "OwnNamespace" ]; then + echo "*** Deleting project ${TEST_NAMESPACE}" + oc delete project "${TEST_NAMESPACE}" + fi + + if [ $INSTALL_MODE != "AllNamespaces" ]; then + echo "*** Deleting project ${INSTALL_NAMESPACE}" + oc delete project "${INSTALL_NAMESPACE}" + fi +} + +## trap_cleanup : Call cleanup_env and exit. For use by a trap to detect if the script is exited at any point. +trap_cleanup() { + last_status=$? + if [[ $last_status != 0 ]]; then + cleanup_env + fi + exit $last_status +} + +#push_images() { +# echo "****** Logging into private registry..." +# oc sa get-token "${SERVICE_ACCOUNT}" -n default | docker login -u unused --password-stdin "${DEFAULT_REGISTRY}" || { +# echo "Failed to log into docker registry as ${SERVICE_ACCOUNT}, exiting..." +# exit 1 +# } + +# echo "****** Creating pull secret using Docker config..." +# oc create secret generic regcred --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson + +# docker push "${BUILD_IMAGE}" || { +# echo "Failed to push ref: ${BUILD_IMAGE} to docker registry, exiting..." +# exit 1 +# } + +# docker push "${BUNDLE_IMAGE}" || { +# echo "Failed to push ref: ${BUNDLE_IMAGE} to docker registry, exiting..." +# exit 1 +# } +#} + +main() { + parse_args "$@" + + if [[ -z "${RELEASE}" ]]; then + echo "****** Missing release, see usage" + fi + + if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then + echo "****** Missing docker authentication information, see usage" + echo "${usage}" + exit 1 + fi + + if [[ -z "${CLUSTER_URL}" ]] || [[ -z "${CLUSTER_TOKEN}" ]]; then + echo "****** Missing OCP URL or token, see usage" + echo "${usage}" + exit 1 + fi + + if [[ -z "${REGISTRY_NAME}" ]]; then + echo "****** Missing OCP registry name, see usage" + echo "${usage}" + exit 1 + fi + + if [[ -z "${REGISTRY_IMAGE}" ]]; then + echo "****** Missing REGISTRY_IMAGE definition, see usage" + echo "${usage}" + exit 1 + fi + + if [[ -z "${REGISTRY_USER}" ]] || [[ -z "${REGISTRY_PASSWORD}" ]]; then + echo "****** Missing registry authentication information, see usage" + echo "${usage}" + exit 1 + fi + + if [[ -z "${TEST_TAG}" ]]; then + echo "****** Missing test tag, see usage" + echo "${usage}" + exit 1 + fi + + if [[ -z "${CATALOG_IMAGE}" ]]; then + echo "****** Missing catalog image, see usage" + echo "${usage}" + exit 1 + fi + + if [[ -z "${CHANNEL}" ]]; then + echo "****** Missing channel, see usage" + echo "${usage}" + exit 1 + fi + + if [[ -z "${INSTALL_MODE}" ]]; then + echo "****** Missing install-mode, see usage" + echo "${usage}" + exit 1 + fi + + if [[ -z "${ARCHITECTURE}" ]]; then + echo "****** Missing architecture, see usage" + echo "${usage}" + exit 1 + fi + + echo "****** Setting up test environment..." + setup_env + + if [[ "${ARCHITECTURE}" != "X" ]]; then + echo "****** Setting up tests for ${ARCHITECTURE} architecture" + setup_tests + fi + + if [[ -z "${DEBUG_FAILURE}" ]]; then + trap trap_cleanup EXIT + else + echo "#####################################################################################" + echo "WARNING: --debug-failure is set. If e2e tests fail, any created resources will remain" + echo "on the cluster for debugging/troubleshooting. YOU MUST DELETE THESE RESOURCES when" + echo "you're done, or else they will cause future tests to fail. To cleanup manually, just" + echo "delete the namespace \"${INSTALL_NAMESPACE}\": oc delete project \"${INSTALL_NAMESPACE}\" " + echo "#####################################################################################" + fi + + # login to docker to avoid rate limiting during build + echo "${DOCKER_PASSWORD}" | docker login -u "${DOCKER_USERNAME}" --password-stdin + + trap "rm -f /tmp/pull-secret-*.yaml" EXIT + + echo "****** Logging into private registry..." + echo "${REGISTRY_PASSWORD}" | docker login ${REGISTRY_NAME} -u "${REGISTRY_USER}" --password-stdin + + echo "sleep for 3 minutes to wait for rook-cepth, knative and cert-manager to start installing, then start monitoring for completion" + sleep 3m + echo "monitoring knative" + ./wait.sh deployment knative-serving + rc_kn=$? + echo "rc_kn=$rc_kn" + if [[ "$rc_kn" == 0 ]]; then + echo "knative up" + fi + if [[ "${ARCHITECTURE}" == "X" ]]; then + echo "monitoring rook-ceph if architecture is ${ARCHITECTURE}" + ./wait.sh deployment rook-ceph + rc_rk=$? + echo "rc_rk=$rc_rk" + if [[ "$rc_rk" == 0 ]]; then + echo "rook-ceph up" + fi + fi + echo "****** Installing operator from catalog: ${CATALOG_IMAGE} using install mode of ${INSTALL_MODE}" + echo "****** Install namespace is ${INSTALL_NAMESPACE}. Test namespace is ${TEST_NAMESPACE}" + install_operator + + # Wait for operator deployment to be ready + while [[ $(oc -n $INSTALL_NAMESPACE get deploy "${CONTROLLER_MANAGER_NAME}" -o jsonpath='{ .status.readyReplicas }') -ne "1" ]]; do + echo "****** Waiting for ${CONTROLLER_MANAGER_NAME} to be ready..." + sleep 10 + done + + echo "****** ${CONTROLLER_MANAGER_NAME} deployment is ready..." + + if [[ "$ARCHITECTURE" != "Z" ]]; then + echo "****** Testing on ${ARCHITECTURE} so starting scorecard tests..." + operator-sdk scorecard --verbose --kubeconfig ${HOME}/.kube/config --selector=suite=kuttlsuite --namespace="${TEST_NAMESPACE}" --service-account="scorecard-kuttl" --wait-time 45m ./bundle || { + echo "****** Scorecard tests failed..." + exit 1 + } + else + echo "****** Testing on ${ARCHITECTURE} so running kubectl-kuttl tests..." + kubectl-kuttl test ./bundle/tests/scorecard/kuttl --namespace "${TEST_NAMESPACE}" --timeout 200 --suppress-log=events --parallel 1 || { + echo "****** kubectl kuttl tests failed..." + exit 1 + } + fi + result=$? + + echo "****** Cleaning up test environment..." + if [[ "${ARCHITECTURE}" != "X" ]]; then + revert_tests + fi + cleanup_env + + return $result +} + +install_operator() { + # Apply the catalog + echo "****** Applying the catalog source..." + cat <ciorchestrator-submit.json <ciorchestrator-submit.id + # add retry logic for Fyre networking issues + echo "Sending Pipeline Request to CI Orchestrator pipelineId: ${pipelineId} as ${USER}" + echo "command to run: $COMMAND" + count=0 + tryAgain=true + while $tryAgain; do + curl --fail --insecure -v -X POST \ + -H "Content-Type: application/json" \ + -d @ciorchestrator-submit.json \ + -u "${USER}:${PASSWORD}" \ + https://libh-proxy1.fyre.ibm.com/eventPublish/rawCIData/${pipelineId} + rc=$? + if [[ $rc -eq 0 ]]; then + echo "Successfully sent CI orchestrator Request" + tryAgain=false + elif [[ $count -gt 600 ]]; then + #Bail after 10 mins + echo "Problem sending CI orchestrator Request after 10 mins of trying, giving up. Curl returned $rc" + exit 1; + else + sleep 10 + count=$((count+10)) + fi + done +} + + +# --- Run --- + +main "$@" diff --git a/scripts/pipeline/runTest.sh b/scripts/pipeline/runTest.sh new file mode 100755 index 000000000..0e78aba08 --- /dev/null +++ b/scripts/pipeline/runTest.sh @@ -0,0 +1,90 @@ +#!/bin/bash +arch=$1 +source ./clusterWait.sh $arch +clusterurl="$ip:6443" + +echo "in directory" +pwd + +echo "running configure-ccluster.sh" +GITHUB_ACCESS_TOKEN=$(get_env git-token) +GITHUB_SCRIPT_URL="https://api.github.ibm.com/repos/websphere/operators/contents/scripts/configure-cluster/configure-cluster.sh" +curl -H "Authorization: token $GITHUB_ACCESS_TOKEN" -H "Accept: application/vnd.github.v3+json" "$GITHUB_SCRIPT_URL" | jq -r ".content" | base64 --decode > configure-cluster.sh +chmod +x configure-cluster.sh +ls -l configure-cluster.sh +echo "**** issuing oc login" +oc login --insecure-skip-tls-verify $clusterurl -u kubeadmin -p $token +echo "Open Shift Console:" +console=$(oc whoami --show-console) +echo $console +echo "*** after issuing oc login" +./configure-cluster.sh -k $(get_env ibmcloud-api-key-staging) -A + + +export GO_VERSION=$(get_env go-version) +make setup-go GO_RELEASE_VERSION=$GO_VERSION +export PATH=$PATH:/usr/local/go/bin +export INSTALL_MODE=$(get_env install-mode) +export ARCHITECTURE=$arch + +# OCP test +export PIPELINE_USERNAME=$(get_env ibmcloud-api-user) +export PIPELINE_PASSWORD=$(get_env ibmcloud-api-key-staging) +export PIPELINE_REGISTRY=$(get_env pipeline-registry) +export PIPELINE_OPERATOR_IMAGE=$(get_env pipeline-operator-image) +export DOCKER_USERNAME=$(get_env docker-username) +export DOCKER_PASSWORD=$(get_env docker-password) +#export CLUSTER_URL=$(get_env test-cluster-url) +export CLUSTER_URL=$clusterurl +#export CLUSTER_USER=$(get_env test-cluster-user kubeadmin) +export CLUSTER_TOKEN=$token +export RELEASE_TARGET=$(get_env branch) +export DEBUG_FAILURE=$(get_env debug-failure) + +# Kind test +export FYRE_USER=$(get_env fyre-user) +export FYRE_KEY=$(get_env fyre-key) +export FYRE_PASS=$(get_env fyre-pass) +export FYRE_PRODUCT_GROUP_ID=$(get_env fyre-product-group-id) + +cd ../.. +echo "directory before acceptance-test.sh" +pwd + +scripts/acceptance-test.sh +rc=$? + +echo "switching back to ebc-gateway-http directory" +cd scripts/pipeline/ebc-gateway-http + +if [[ "$rc" == 0 ]]; then + ./ebc_complete.sh +else + hours=$(get_env ebc_autocomplete_hours "6") + echo "Your acceptance test failed, the cluster will be retained for $hours hours." + echo "debug of cluster may be required, issue @ebc debug $rco_demand_id in #was-ebc channel to keep cluster for debug" + echo "issue @ebc debugcomplete $rco_demand_id when done debugging in #was-ebc channel " + echo "access console at: $console" + echo "credentials: kubeadmin/$token" + slack_users=$(get_env slack_users) + echo "slack_users=$slack_users" + eval "arr=($slack_users)" + for user in "${arr[@]}"; do + echo "user=$user" + curl -X POST -H 'Content-type: application/json' --data '{"text":"<'$user'> accceptance test failure see below "}' $(get_env slack_web_hook_url) + echo " " + done + pipeline_url="https://cloud.ibm.com/devops/pipelines/tekton/${PIPELINE_ID}/runs/${PIPELINE_RUN_ID}" + curl -X POST -H 'Content-type: application/json' --data '{"text":"Your acceptance test failed."}' $(get_env slack_web_hook_url) "3.4.0" ]; then + echo "Executing cv lint command: 'cv lint olm-bundle -o lintOverrides.yaml --container-tool docker ${BUNDLE_IMAGE}'" + $WORK_DIR/cv lint olm-bundle -o lintOverrides.yaml --container-tool docker $BUNDLE_IMAGE + else + echo "Executing cv lint command: 'cv lint -o lintOverrides.yaml operator" + $WORK_DIR/cv lint -o lintOverrides.yaml operator + fi + echo "===========================================================================================================================" + echo "===========================================================================================================================" } parse_args() { while [ $# -gt 0 ]; do case "$1" in + --bundle-image) + shift + readonly BUNDLE_IMAGE="${1}" + ;; --git-token) shift readonly GIT_TOKEN="${1}" diff --git a/scripts/pipeline/twistlock-scan.sh b/scripts/pipeline/twistlock-scan.sh deleted file mode 100755 index baf8b33f6..000000000 --- a/scripts/pipeline/twistlock-scan.sh +++ /dev/null @@ -1,41 +0,0 @@ - #!/bin/bash -e - - function install_twistlock() { - DEBIAN_FRONTEND=noninteractive apt-get -y update && \ - DEBIAN_FRONTEND=noninteractive apt-get -y install uuid-runtime file jq && \ - wget --no-check-certificate https://w3twistlock.sos.ibm.com/download/tt_latest.zip && \ - unzip -l tt_latest.zip | grep linux_x86_64/tt | awk '{print $4}' | xargs unzip -j tt_latest.zip -d /usr/local/bin - chmod +x /usr/local/bin/tt -} - -# Install Twistlock -install_twistlock - -IBMCLOUD_API_KEY=$(get_env ibmcloud-api-key) - -# loop through listed artifact images and scan each amd64 image -for artifact_image in $(list_artifacts); do - IMAGE_LOCATION=$(load_artifact $artifact_image name) - ARCH=$(load_artifact $artifact_image arch) - - echo "image from load_artifact:" $IMAGE_LOCATION - echo "arch:" $ARCH - - if [[ -z ${IMAGE_LOCATION} ]]; then - continue - fi - - if [[ "$ARCH" != "amd64" ]]; then - echo $arch " images not supported by twistlock scanning, skipping" - continue - fi - - # The "pull" in "pull-and-scan" is a remote action. The image will be pulled and scanned on a remote server, and - # the results will be dumped to file here. - - # twistlock command - tt images pull-and-scan ${IMAGE_LOCATION} --iam-api-key $IBMCLOUD_API_KEY -u "$(get_env twistlock-user-id):$(get_env twistlock-api-key)" -g "websphere" - - # save the artifact - for i in twistlock-scan-results*; do save_result scan-artifact ${i}; done -done diff --git a/scripts/pipeline/va_scan b/scripts/pipeline/va_scan deleted file mode 100755 index cb1093833..000000000 --- a/scripts/pipeline/va_scan +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/env bash - - -_toolchain_read() { - jq -r "$1" "$TOOLCHAIN_CONFIG_JSON" | tr -d '\n' -} - -ibmcloud_login() { - local -r ibmcloud_api=$(get_env cr-va-ibmcloud-api "https://cloud.ibm.com") - - ibmcloud config --check-version false - # Use `cr-va-ibmcloud-api-key` if present, if not, fall back to `ibmcloud-api-key` - local SECRET_PATH="/config/ibmcloud-api-key" - if [[ -s "/config/cr-va-ibmcloud-api-key" ]]; then - SECRET_PATH="/config/cr-va-ibmcloud-api-key" - fi - ibmcloud login -a "$ibmcloud_api" -r "$TOOLCHAIN_REGION" --apikey @"$SECRET_PATH" - ibmcloud target -g "$(get_env dev-resource-group)" -} - - -ibmcloud_region_set() { - ibmcloud cr region-set "$1" - ibmcloud cr info -} - -ibmcloud_image_inspect() { - input_image=$1 - if [[ $input_image =~ ^cp. ]]; then - input_image=$(echo "$input_image" | cut -d . -f2-) - fi - - echo -e "Details for image: $input_image" - ibmcloud cr image-inspect "$input_image" -} - -find_registry_region() { - # Find the ibmcloud container registry region - # https://cloud.ibm.com/docs/services/Registry?topic=registry-registry_overview#registry_regions_local - if [[ $1 =~ ^registry\.[a-z]*.bluemix.net$ ]]; then - # deprecated domain name - REGISTRY_REGION=$(echo "$1" | awk -F. '{print $2}') - if [ "$REGISTRY_REGION" == "ng" ]; then - export REGISTRY_REGION="us-south" - fi - elif [[ $1 == icr.io ]]; then - export REGISTRY_REGION="global" - else - REGISTRY_REGION=$(echo "$1" | awk -F. '{print $1}') - if [ "$REGISTRY_REGION" == "jp" ]; then - export REGISTRY_REGION="ap-north" - elif [ "$REGISTRY_REGION" == "au" ]; then - export REGISTRY_REGION="ap-south" - elif [ "$REGISTRY_REGION" == "de" ]; then - export REGISTRY_REGION="eu-central" - elif [ "$REGISTRY_REGION" == "uk" ]; then - export REGISTRY_REGION="uk-south" - elif [ "$REGISTRY_REGION" == "us" ]; then - export REGISTRY_REGION="us-south" - elif [ "$REGISTRY_REGION" == "stg" ]; then - export REGISTRY_REGION="us-south" - elif [ "$REGISTRY_REGION" == "jp2" ]; then - export REGISTRY_REGION="jp-osa" - elif [ "$REGISTRY_REGION" == "fr2" ]; then - export REGISTRY_REGION="eu-fr2" - elif [ "$REGISTRY_REGION" == "ca" ]; then - export REGISTRY_REGION="ca-tor" - else - echo "No IBM Cloud Container Registry region found for the registry url $1">&2 - #exit 1 - fi - fi -} - -check_va_scan_result() { - name=$1 - image=$2 - digest=$3 - - local input_image_url - input_image_url=$(echo "$image" | awk -F: '{print $1}') - - # Parse the image input to find information (region, namespace, image name, tag & digest/sha) - local input_registry_url - input_registry_url=$(echo "$input_image_url" | awk -F/ '{print $1}') - - find_registry_region "$input_registry_url" - - # Log container registry to the appropriate region - retry 5 10 ibmcloud_region_set "$REGISTRY_REGION" - - exit_code=$? - - if [ $exit_code -ne 0 ]; then - echo "Error during the region set. There might be an ibmcloud outage.">&2 - printf "\nFor further information check the documentation: https://pages.github.ibm.com/one-pipeline/docs/#/troubleshooting?id=general-troubleshooting-methods\n" >&2 - printf "\n:Slack channel of the devops-compliance: https://ibm-cloudplatform.slack.com/archives/CFQHG5PP1\n" >&2 - fi - - local pipeline_image_url="$input_image_url@$digest" - - # inspect the image to ensure it exists - retry 5 10 ibmcloud_image_inspect "${pipeline_image_url}" - - exit_code=$? - - if [ $exit_code -ne 0 ]; then - echo "Error during image inspect. There might be an ibmcloud outage.">&2 - printf "\nFor further information check the documentation: https://pages.github.ibm.com/one-pipeline/docs/#/troubleshooting?id=general-troubleshooting-methods\n" >&2 - printf "\n:Slack channel of the devops-compliance: https://ibm-cloudplatform.slack.com/archives/CFQHG5PP1\n" >&2 - fi - - va_report_json="${VA_SCAN_DIR}/${name}_va-report.json" - - # Loop until the scan has been performed - echo -e "Checking vulnerabilities in image: ${pipeline_image_url}">&2 - - retry_count=$(get_env "va-scan-retry-count" 30) - retry_sleep=$(get_env "va-scan-retry-sleep" 10) - - for ((iter = 1; iter < retry_count; iter++)); do - set +e - status="" - ibmcloud cr va -o json "${pipeline_image_url}" >"${va_report_json}" 2>/dev/null - # ibmcloud cr va returns a non valid json output if image not yet scanned - if jq -r -e '.[0].status' "${va_report_json}" >/dev/null 2>&1; then - status=$(jq -r '.[0].status' "${va_report_json}") - fi - if [ -z "$status" ]; then - status="UNSCANNED" - fi - set -e - - echo "VA scan status is ${status}">&2 - - # Possible status from Vulnerability Advisor: OK, WARN, FAIL, UNSUPPORTED, INCOMPLETE, UNSCANNED - # cf https://cloud.ibm.com/apidocs/container-registry/va#get-the-vulnerability-assessment-for-the-list-of-r - if [[ ${status} != "INCOMPLETE" && ${status} != "UNSCANNED" ]]; then - # status is one of the terminated scan action - break the loop - break - fi - - echo -e "${iter} STATUS ${status} : A vulnerability report was not found for the specified image.">&2 - echo "Either the image doesn't exist or the scan hasn't completed yet. ">&2 - echo "Waiting ${retry_sleep}s for scan to complete...">&2 - - sleep "$retry_sleep" - done - - set +e - - echo "Showing extended vulnerability assessment report for ${pipeline_image_url}">&2 - ibmcloud cr va -e "${pipeline_image_url}" || true - - if [ -z "$status" ]; then - status="UNSCANNED" - fi - set -e - - export VA_REPORT_JSON=$va_report_json - export STATUS=$status -} - - -start_va_scan() { - name=$1 - image=$2 - digest=$3 - - if [[ $PIPELINE_DEBUG == 1 ]]; then - pwd - env - trap env EXIT - set -x - fi - - source "${ONE_PIPELINE_PATH}/tools/retry" - - mkdir -p "${WORKSPACE}/cr_va" - export VA_SCAN_DIR="${WORKSPACE}/cr_va" - - export TOOLCHAIN_CONFIG_JSON="/toolchain/toolchain.json" - export REGISTRY_REGION - export TOOLCHAIN_REGION - TOOLCHAIN_REGION=$(_toolchain_read '.region_id' | awk -F: '{print $3}') - - BREAK_GLASS=$(get_env break_glass "") - if [[ -n $BREAK_GLASS ]]; then - echo "Break-Glass mode is on, skipping the rest of the task...">&2 - exit 3 - fi - - retry 5 10 ibmcloud_login - - exit_code=$? - - if [ $exit_code -ne 0 ]; then - echo "Error during the ibmcloud login. There might be an ibmcloud outage.">&2 - printf "For further information check the documentation: https://pages.github.ibm.com/one-pipeline/docs/#/troubleshooting?id=general-troubleshooting-methods\n" >&2 - printf "Slack channel of the devops-compliance: https://ibm-cloudplatform.slack.com/archives/CFQHG5PP1\n" >&2 - fi - - # - # prepare results and statuses to report - # - ARTIFACT_SCAN_RESULTS_JSON_PATH="${WORKSPACE}/artifact-scan-report.json" - echo "[]" | jq '' >"${ARTIFACT_SCAN_RESULTS_JSON_PATH}" - - VA_SCAN_STATUSES_PATH="${VA_SCAN_DIR}/va_scan_statuses" - set_env VA_SCAN_STATUSES_PATH "${VA_SCAN_DIR}/va_scan_statuses" - - # - # Iterate over artifacts and check their VA scan status - # - - export VA_REPORT_JSON - export STATUS - - check_va_scan_result "$name" "$image" "$digest" - - # - # collect statuses - # - result="0" - - if [[ ${STATUS} == "OK" ]] || [[ ${STATUS} == "UNSUPPORTED" ]] || [[ ${STATUS} == "WARN" ]]; then - echo "The vulnerability scan status is ${STATUS}">&2 - echo "success" >>"$VA_SCAN_STATUSES_PATH" - else - echo "ERROR: The vulnerability scan was not successful (status being ${STATUS}).">&2 - echo "failure" >>"$VA_SCAN_STATUSES_PATH" - result="1" - fi - - # - # collect scan artifacts into a single artifact JSON file - # - save_result scan-artifact "${VA_REPORT_JSON}" - - # - # store result and attachment for asset-based evidence locker - # - stage_name="image_vulnerability_scan" - save_artifact "${name}" "${stage_name}-result=${result}" - save_result "${name}-${stage_name}-attachments" "${VA_REPORT_JSON}" - - cat "${ARTIFACT_SCAN_RESULTS_JSON_PATH}" -} diff --git a/scripts/pipeline/wait.sh b/scripts/pipeline/wait.sh new file mode 100755 index 000000000..b3b0de508 --- /dev/null +++ b/scripts/pipeline/wait.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + type=$1 + namespace=${2:-default} + + MAX_RETRIES=99 + + count=0 + + echo "Waiting for ${type} ${namespace} to be ready..." + kubectl get ${type} -n ${namespace} >/dev/null + + while [ $? -ne 0 ]; do + if [ $count -eq $MAX_RETRIES ]; then + echo "Timeout and exit due to maximum retires reached." + return 1 + fi + + count=$((count+1)) + + echo "Unable to get ${type} ${namespace}: retry ${count} of ${MAX_RETRIES}." + sleep 5s + kubectl get ${type} -n ${namespace} + done + + echo "The ${type} ${namespace} is ready." + + if [[ "${type}" == *"deploy"* ]]; then + echo "Waiting for deployment ${name} pods to be ready..." + count=0 + replicas="$(kubectl get deploy -n ${namespace} -o=jsonpath='{.items[*].status.readyReplicas}')" + readyReplicas="$(kubectl get deploy -n ${namespace} -o=jsonpath='{.items[*].status.replicas}')" + echo "replicas: $replicas,readyReplicas: $readyReplicas; Retry ${count} of ${MAX_RETRIES}." + + while true; + do + if [ "$replicas" = "$readyReplicas" ]; then + echo "all deployments ready" + exit 0 + fi + if [ $count -eq $MAX_RETRIES ]; then + echo "Timeout and exit due to maximum retires reached." + exit 1 + fi + + count=$((count+1)) + + echo "replicas: $replicas,readyReplicas: $readyReplicas; Retry ${count} of ${MAX_RETRIES}." + sleep 5s + replicas="$(kubectl get deploy -n ${namespace} -o=jsonpath='{.items[*].status.readyReplicas}')" + readyReplicas="$(kubectl get deploy -n ${namespace} -o=jsonpath='{.items[*].status.replicas}')" + done + + echo "All pods ready for deployment ${name}." + fi diff --git a/scripts/pipeline/whitesource_unified_agent_scan.sh b/scripts/pipeline/whitesource_unified_agent_scan.sh deleted file mode 100755 index 95a043bd1..000000000 --- a/scripts/pipeline/whitesource_unified_agent_scan.sh +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env bash - -source "${ONE_PIPELINE_PATH}/internal/tools/logging" -SCRIPT_RC=0 - -# -# Get required properties from the environment properties -# -WS_APIKEY=$(get_env whitesource-org-token "") -WS_USERKEY=$(get_env whitesource-user-key "") -WS_PRODUCTNAME=$(get_env whitesource-product-name "") -WS_PRODUCTTOKEN=$(get_env whitesource-product-token "") -WS_PROJECTNAME=$(get_env whitesource-project-name "") - -# Check that all required properties/keys/tokens are provided -if [ -z "$WS_APIKEY" ] || [ -z "$WS_USERKEY" ] || [ -z "$WS_PRODUCTNAME" ] || [ -z "$WS_PROJECTNAME" ]; then - error "'whitesource-org-token', 'whitesource-user-key', 'whitesource-product-name', and 'whitesource-project-name' are required properties." - SCRIPT_RC=1 -fi - -# get optional properties -WS_SERVER_URL=$(get_env whitesource-server-url "https://ibmets.whitesourcesoftware.com") -WS_PRINT_SCAN_RESULTS=$(get_env whitesource-print-scan-results "") -WS_JAR_URL=$(get_env wS_jar_url "https://unified-agent.s3.amazonaws.com/wss-unified-agent.jar") - -# If user overrode the whitesource server property, make sure it isn't an empty string -if [ -z "$WS_SERVER_URL" ]; then - error "'whitesource-server-url' cannot be empty." - SCRIPT_RC=1 -fi - -if ((SCRIPT_RC==0)); then - # Download the whitesource unified agent jar we will use to execute the scan - curl -LJO "$WS_JAR_URL" - - # Export environment variables required by the scanner - export WS_APIKEY - export WS_USERKEY - export WS_PRODUCTNAME - export WS_PROJECTNAME - export WS_SERVER_URL - export WS_WSS_URL=${WS_SERVER_URL}/agent - - # Create the base results directory relative to workspace - WHITESOURCE_SCAN_RESULTS_DIR=${WORKSPACE}/whitesource - mkdir -p "$WHITESOURCE_SCAN_RESULTS_DIR" - - # Set default scan status we will pass to collect-evidence - SCAN_STATUS="success" -fi - -if ((SCRIPT_RC==0)); then - # Iterate over repos that were registered to the pipeline by the save_repo of the pipelinectl tool. - while read -r REPO ; do - REPO_PATH="$(load_repo "${REPO}" path)" - REPO_URL="$(load_repo "${REPO}" url)" - - # WS_PROJECTTOKEN needs to be set AFTER the jar invocation, scan will fail if both project name and project token are set. - unset WS_PROJECTTOKEN - - EVIDENCE_PARAMS=( - --tool-type "whitesource" \ - --evidence-type "com.ibm.code_vulnerability_scan" \ - --asset-type "repo" \ - --asset-key "${REPO}" - ) - - collect-evidence ${EVIDENCE_PARAMS[@]} --status "pending" - - # Execute the scan - banner "Executing Whitesource Unified Agent scan against $REPO ($REPO_URL)" - WHITESOURCE_SCAN_LOG="${WHITESOURCE_SCAN_RESULTS_DIR}/$REPO_PATH-ws_scan_output.log" - SCAN_START_TIME=$SECONDS - java -jar wss-unified-agent.jar -d "$WORKSPACE/$REPO_PATH" > "$WHITESOURCE_SCAN_LOG" - SCAN_RC=$? - ELAPSED_TIME=$(( SECONDS - SCAN_START_TIME )) - debug " scan completed in $ELAPSED_TIME seconds" - - if ((SCAN_RC==0)); then - # - # Get the project token programmatically via API calls for the project name. - # Only do this once; once we have the project token variable set, don't execute this loop again. - # - PROJECTTOKEN="" - if [ -z "$PROJECTTOKEN" ]; then - body="{ - \"requestType\": \"getProductProjectTags\", - \"userKey\": \"${WS_USERKEY}\", - \"productToken\": \"${WS_PRODUCTTOKEN}\" - }" - - PROJECT_QUERY_RESULTS="${WHITESOURCE_SCAN_RESULTS_DIR}/projects.json" - PROJECT_QUERY_LOG="${WHITESOURCE_SCAN_RESULTS_DIR}/projects_query.log" - curl -X POST -H "Content-Type: application/json" -d "$body" "${WS_SERVER_URL}/api/v1.3" -o "$PROJECT_QUERY_RESULTS" >> "$PROJECT_QUERY_LOG" 2>&1 - - if [ -e "$PROJECT_QUERY_RESULTS" ]; then - NUM_PROJECT_RESULTS=$(jq '.projectTags | length' "$PROJECT_QUERY_RESULTS") - for (( RESULTS_ROW_NUM=0; RESULTS_ROW_NUM> "$WHITESOURCE_SCAN_LOG" 2>&1 - - if [ -e "$WHITESOURCE_SCAN_RESULTS" ]; then - - if [ -n "$WS_PRINT_SCAN_RESULTS" ]; then - banner "=== Scan results for $REPO ($REPO_URL) ===" - cat "$WHITESOURCE_SCAN_RESULTS" | jq - fi - - debug " saved scan results file $WHITESOURCE_SCAN_RESULTS" - EVIDENCE_PARAMS+=(--attachment "${WHITESOURCE_SCAN_RESULTS}") - - else - SCRIPT_RC=1 - error " Whitesource Unified Agent scan results could not be fetched" - banner "==================== SCAN LOG ====================" - cat "$WHITESOURCE_SCAN_LOG" - EVIDENCE_PARAMS+=(--attachment "${WHITESOURCE_SCAN_LOG}") - fi - else - # we were not able to query the project token - banner "==================== PROJECT QUERY LOG ====================" - cat "$PROJECT_QUERY_LOG" - banner "==================== SCAN LOG ====================" - cat "$WHITESOURCE_SCAN_LOG" - EVIDENCE_PARAMS+=(--attachment "${WHITESOURCE_SCAN_LOG}") - EVIDENCE_PARAMS+=(--attachment "${PROJECT_QUERY_LOG}") - fi - else - # scan returned a non-zero return code - SCRIPT_RC=$SCAN_RC - error " Whitesource Unified Agent scan returned exit code $SCAN_RC" - banner "==================== SCAN LOG ====================" - cat "$WHITESOURCE_SCAN_LOG" - EVIDENCE_PARAMS+=(--attachment "${WHITESOURCE_SCAN_LOG}") - fi - # - # report evidence using `collect-evidence` - # - - if ((SCRIPT_RC>0)); then - SCAN_STATUS="failure" - fi - - EVIDENCE_PARAMS+=( - --status "${SCAN_STATUS}" - ) - collect-evidence "${EVIDENCE_PARAMS[@]}" - - done < <(list_repos) -else - EVIDENCE_PARAMS=( - --tool-type "whitesource" \ - --evidence-type "com.ibm.code_vulnerability_scan" \ - --status "failure" - ) - collect-evidence "${EVIDENCE_PARAMS[@]}" -fi - -if ((SCRIPT_RC>0)); then - exit $SCRIPT_RC -fi \ No newline at end of file diff --git a/scripts/release-blocklist.txt b/scripts/release-blocklist.txt deleted file mode 100644 index d3141057a..000000000 --- a/scripts/release-blocklist.txt +++ /dev/null @@ -1,14 +0,0 @@ -v0.0.1 -v0.3.0 -v0.4.0 -v0.4.1 -v0.4.2 -v0.5.0 -v0.5.1 -v0.6.0 -v0.7.0 -v0.7.1 -v0.8.0 -v0.8.0-20211124-0830 -v0.8.0-20211124-1750 -v0.8.0-rc.1 \ No newline at end of file diff --git a/scripts/setup-kind-cluster.sh b/scripts/setup-kind-cluster.sh index e44272937..e13376520 100755 --- a/scripts/setup-kind-cluster.sh +++ b/scripts/setup-kind-cluster.sh @@ -31,8 +31,8 @@ install_dependencies() { ## Install kubectl if ! command -v kubectl &> /dev/null; then - echo "****** Installing kubectl v1.23.12..." - curl -Lo /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.23.12/bin/linux/amd64/kubectl && chmod +x /usr/local/bin/kubectl + echo "****** Installing kubectl v1.24.2..." + curl -Lo /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.24.2/bin/linux/amd64/kubectl && chmod +x /usr/local/bin/kubectl fi # Install kind @@ -77,7 +77,7 @@ create_kind_cluster() { if [[ -z "$(kind get clusters | grep e2e-cluster)" ]]; then # Create a cluster with the local registry enabled in containerd - cat << EOF | kind create cluster --name e2e-cluster --image kindest/node:v1.23.12 --config=- + cat << EOF | kind create cluster --name e2e-cluster --image kindest/node:v1.24.2 --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: