diff --git a/.github/workflows/continuous-integration-workflow.yml b/.github/workflows/continuous-integration-workflow.yml index 13201ee49..7e8b4ab4a 100644 --- a/.github/workflows/continuous-integration-workflow.yml +++ b/.github/workflows/continuous-integration-workflow.yml @@ -122,7 +122,7 @@ jobs: name: Download golangci-lint run: | curl -sSfL --output /tmp/golangci-lint.sh https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh - cat /tmp/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2 + cat /tmp/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.49.0 - name: Run linter working-directory: jenkins/webhook-proxy @@ -201,7 +201,7 @@ jobs: name: Verify all Go tests pass linting uses: golangci/golangci-lint-action@v3 with: - version: v1.46.2 + version: v1.49.0 working-directory: tests args: --timeout=10m - diff --git a/CHANGELOG.md b/CHANGELOG.md index 884aee7d3..a7dca85c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,13 @@ ## Unreleased +## [4.1.1] - 2022-11-24 + +- Fix CI/CD problems in Jenkins pipelines ([#1177](https://github.com/opendevstack/ods-core/pull/1177)) +- Fixes Python agent does not seems to have java in the path ([#685](https://github.com/opendevstack/ods-quickstarters/issues/685)) +- Removes existing differences between jenkins agent base image in Centos 7 and UBI 8 ([#1181](https://github.com/opendevstack/ods-core/pull/1181)) +- Upgrade to Java 11 Jenkins master and agents and increase logging to know why they sometimes die ([#1185](https://github.com/opendevstack/ods-core/pull/1185)) + ## [4.1] - 2022-11-17 - Create Dockerfile.rhel7 and Dockerfile.centos7 with respectives plugins.rhel7.txt and plugins.centos7.txt definitions ([1000](https://github.com/opendevstack/ods-core/issues/1000)) diff --git a/README.md b/README.md index fd2fe0ad2..99823a252 100644 --- a/README.md +++ b/README.md @@ -48,3 +48,12 @@ b) inside the [tests](tests) directory.

The tests can be started with `make 1. [ODS Development Environment / ODS in a box](ods-devenv)
ODS also ships as Amazon AMI - ready to go. The scripts to create the AMI can be found in ods-devenv. These scripts can be used also be used to install a `developer` version of ODS on a plain linux vm. Simply execute [bootstrap.sh](ods-devenv/scripts/bootstrap.sh) + +## Current AMI build logs +the log files contain color coding, they are best viewed using a tool supporting color coding, like tail. E.g.: + + +``` +# after untaring view the log file 'current' like so: +tail -fn +1 current +``` diff --git a/create-projects/create-cd-jenkins.sh b/create-projects/create-cd-jenkins.sh index adb33a67a..afed9add8 100755 --- a/create-projects/create-cd-jenkins.sh +++ b/create-projects/create-cd-jenkins.sh @@ -7,7 +7,7 @@ set -eu # As this script is executed within the context of Jenkins, which has some # env vars exposed (via the DeploymentConfig, but also from inside the image). # It might be surprising to have them alter what the script does without seeing -# them passed/set in the Jenkinsfile. That's why we reset all env vars here +# them passed/set in the Jenkinsfile_createBuildBot. That's why we reset all env vars here # and require them to be passed as parameters to the script. TAILOR="tailor" diff --git a/create-projects/create-projects.sh b/create-projects/create-projects.sh index 8de3d5fd1..37a5c3752 100755 --- a/create-projects/create-projects.sh +++ b/create-projects/create-projects.sh @@ -4,7 +4,7 @@ set -e # As this script is executed within the context of Jenkins, which has some # env vars exposed (via the DeploymentConfig, but also from inside the image). # It might be surprising to have them alter what the script does without seeing -# them passed/set in the Jenkinsfile. That's why we reset all env vars here +# them passed/set in the Jenkinsfile_createBuildBot. That's why we reset all env vars here # and require them to be passed as parameters to the script. PROJECT_ID="" diff --git a/jenkins/agent-base/Dockerfile.centos7 b/jenkins/agent-base/Dockerfile.centos7 index 48b8137c5..e5dff4a8e 100644 --- a/jenkins/agent-base/Dockerfile.centos7 +++ b/jenkins/agent-base/Dockerfile.centos7 @@ -10,29 +10,43 @@ ENV SONAR_SCANNER_VERSION=3.1.0.1141 \ HELM_PLUGIN_SECRETS_VERSION=3.3.5 \ GIT_LFS_VERSION=2.6.1 \ SKOPEO_VERSION=0.1.37-3 \ - OSTREE_VERSION=2018.5-1 + OSTREE_VERSION=2018.5-1 \ + JNLP_JAVA_OPTIONS="-XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent -XX:+ParallelRefProcEnabled -XX:+UseStringDeduplication -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Dsun.zip.disableMemoryMapping=true" ARG APP_DNS ARG SNYK_DISTRIBUTION_URL ARG AQUASEC_SCANNERCLI_URL -RUN yum -y install \ - openssl \ - && yum clean all \ - && rm -rf /var/cache/yum/* - -ENV JAVA_HOME=/usr/lib/jvm/jre +# Workaroud we use when running docker build behind proxy +# Basically we put the proxy certificates in certs folder +# COPY certs/* /etc/pki/ca-trust/source/anchors/ +# RUN update-ca-trust force-enable && update-ca-trust extract -RUN yum -y install java-1.8.0-openjdk-devel.x86_64 \ +COPY ensure_java_jre_is_adequate.sh /usr/local/bin/ +RUN rm -fv /etc/yum.repos.d/CentOS-Media.repo /etc/yum.repos.d/origin-local-release.repo \ + && ensure_java_jre_is_adequate.sh \ + && yum -y install openssl \ + && yum -y update \ && yum clean all \ && rm -rf /var/cache/yum/* +# Copy use java scripts. +COPY use-j*.sh /usr/local/bin/ +RUN chmod +x /usr/local/bin/use-j*.sh && \ + chmod ugo+s /usr/local/bin/use-j*.sh && \ + sh -c 'chmod ugo+s $(which alternatives)' && \ + ls -la /usr/local/bin/use-j*.sh && \ + echo "--- STARTS JDK 11 TESTS ---" && \ + use-j11.sh && \ + echo "--- ENDS JDK 11 TESTS ---" + COPY ./import_certs.sh /usr/local/bin/import_certs.sh -RUN import_certs.sh +COPY ./fix_java_certs_permissions.sh /usr/local/bin/fix_java_certs_permissions.sh +RUN import_certs.sh && fix_java_certs_permissions.sh # Install Sonar Scanner. RUN cd /tmp \ - && curl -LOv https://repo1.maven.org/maven2/org/sonarsource/scanner/cli/sonar-scanner-cli/${SONAR_SCANNER_VERSION}/sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ + && curl -sSLOv https://repo1.maven.org/maven2/org/sonarsource/scanner/cli/sonar-scanner-cli/${SONAR_SCANNER_VERSION}/sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ && unzip sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ && mv sonar-scanner-${SONAR_SCANNER_VERSION} /usr/local/sonar-scanner-cli \ && rm -rf sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ @@ -41,14 +55,14 @@ ENV PATH=/usr/local/sonar-scanner-cli/bin:$PATH # Add sq cnes report jar. RUN cd /tmp \ - && curl -Lv https://github.com/cnescatlab/sonar-cnes-report/releases/download/${CNES_REPORT_VERSION}/sonar-cnes-report-${CNES_REPORT_VERSION}.jar -o cnesreport.jar \ + && curl -sSLv https://github.com/cnescatlab/sonar-cnes-report/releases/download/${CNES_REPORT_VERSION}/sonar-cnes-report-${CNES_REPORT_VERSION}.jar -o cnesreport.jar \ && mkdir /usr/local/cnes \ && mv cnesreport.jar /usr/local/cnes/cnesreport.jar \ && chmod 777 /usr/local/cnes/cnesreport.jar # Install Tailor. RUN cd /tmp \ - && curl -LOv https://github.com/opendevstack/tailor/releases/download/v${TAILOR_VERSION}/tailor-linux-amd64 \ + && curl -sSLOv https://github.com/opendevstack/tailor/releases/download/v${TAILOR_VERSION}/tailor-linux-amd64 \ && mv tailor-linux-amd64 /usr/local/bin/tailor \ && chmod a+x /usr/local/bin/tailor \ && tailor version @@ -56,12 +70,11 @@ RUN cd /tmp \ # Install Helm. RUN cd /tmp \ && mkdir -p /tmp/helm \ - && curl -LO https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ + && curl -sSLO https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ && tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz -C /tmp/helm \ && mv /tmp/helm/linux-amd64/helm /usr/local/bin/helm \ && chmod a+x /usr/local/bin/helm \ && helm version \ - && helm env \ && helm plugin install https://github.com/databus23/helm-diff --version v${HELM_PLUGIN_DIFF_VERSION} \ && helm plugin install https://github.com/jkroepke/helm-secrets --version v${HELM_PLUGIN_SECRETS_VERSION} \ && sops --version \ @@ -70,7 +83,7 @@ RUN cd /tmp \ # Install GIT-LFS extension https://git-lfs.github.com/. RUN cd /tmp \ && mkdir -p /tmp/git-lfs \ - && curl -LOv https://github.com/git-lfs/git-lfs/releases/download/v${GIT_LFS_VERSION}/git-lfs-linux-amd64-v${GIT_LFS_VERSION}.tar.gz \ + && curl -sSLOv https://github.com/git-lfs/git-lfs/releases/download/v${GIT_LFS_VERSION}/git-lfs-linux-amd64-v${GIT_LFS_VERSION}.tar.gz \ && tar -zxvf git-lfs-linux-amd64-v${GIT_LFS_VERSION}.tar.gz -C /tmp/git-lfs \ && bash /tmp/git-lfs/install.sh \ && git lfs version \ @@ -78,7 +91,7 @@ RUN cd /tmp \ # Optionally install snyk. RUN if [ -z $SNYK_DISTRIBUTION_URL ] ; then echo 'Skipping snyk installation!' ; else echo 'Installing snyk... getting binary from' $SNYK_DISTRIBUTION_URL \ - && curl -Lv $SNYK_DISTRIBUTION_URL --output snyk \ + && curl -sSLv $SNYK_DISTRIBUTION_URL --output snyk \ && mv snyk /usr/local/bin \ && chmod +rwx /usr/local/bin/snyk \ && mkdir -p $HOME/.config/configstore/ \ @@ -90,7 +103,7 @@ RUN if [ -z $SNYK_DISTRIBUTION_URL ] ; then echo 'Skipping snyk installation!' ; # Optionally install Aquasec. RUN if [ -z $AQUASEC_SCANNERCLI_URL ] ; then echo 'Skipping AquaSec installation!' ; else echo 'Installing AquaSec... getting binary from' $AQUASEC_SCANNERCLI_URL \ - && wget $AQUASEC_SCANNERCLI_URL -O aquasec \ + && curl -sSL $AQUASEC_SCANNERCLI_URL --output aquasec \ && mv aquasec /usr/local/bin \ && chmod +rwx /usr/local/bin/aquasec \ && echo 'AquaSec CLI version:' \ @@ -102,7 +115,14 @@ RUN if [ -z $AQUASEC_SCANNERCLI_URL ] ; then echo 'Skipping AquaSec installation COPY set_java_proxy.sh /tmp/set_java_proxy.sh RUN . /tmp/set_java_proxy.sh && echo $JAVA_OPTS -RUN mv /usr/local/bin/run-jnlp-client /usr/local/bin/openshift-run-jnlp-client +# The following line fix incorrect behaviours in the base image. +# It is setting the variable JAVA_TOOL_OPTIONS while it should not. +# Besides, we need to know if this variable has not been set. +# It is a problem very difficult to detect... +COPY fix_openshift_run_jnlp_client.sh /usr/local/bin/fix_openshift_run_jnlp_client.sh +RUN mv /usr/local/bin/run-jnlp-client /usr/local/bin/openshift-run-jnlp-client \ + && fix_openshift_run_jnlp_client.sh /usr/local/bin/openshift-run-jnlp-client + COPY ods-run-jnlp-client.sh /usr/local/bin/run-jnlp-client # Add skopeo. @@ -119,4 +139,3 @@ RUN mkdir -p /home/jenkins/.config && chmod -R g+w /home/jenkins/.config \ && mkdir -p /home/jenkins/.cache && chmod -R g+w /home/jenkins/.cache \ && mkdir -p /home/jenkins/.sonar && chmod -R g+w /home/jenkins/.sonar -RUN chmod g+w $JAVA_HOME/lib/security/cacerts diff --git a/jenkins/agent-base/Dockerfile.ubi8 b/jenkins/agent-base/Dockerfile.ubi8 index b1594f8d1..dcd157f0e 100644 --- a/jenkins/agent-base/Dockerfile.ubi8 +++ b/jenkins/agent-base/Dockerfile.ubi8 @@ -9,7 +9,7 @@ ENV SONAR_SCANNER_VERSION=3.1.0.1141 \ HELM_PLUGIN_DIFF_VERSION=3.3.2 \ HELM_PLUGIN_SECRETS_VERSION=3.3.5 \ GIT_LFS_VERSION=2.6.1 \ - JAVA_HOME=/usr/lib/jvm/jre + JNLP_JAVA_OPTIONS="-XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent -XX:+ParallelRefProcEnabled -XX:+UseStringDeduplication -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Dsun.zip.disableMemoryMapping=true" ARG APP_DNS ARG SNYK_DISTRIBUTION_URL @@ -18,17 +18,35 @@ ARG AQUASEC_SCANNERCLI_URL # Add UBI repositories. COPY yum.repos.d/ubi.repo /etc/yum.repos.d/ubi.repo +COPY ensure_java_jre_is_adequate.sh /usr/local/bin/ RUN cd /etc/yum.repos.d && rm -f localdev-* ci-rpm-mirrors.repo \ + && ensure_java_jre_is_adequate.sh \ && yum -y install make glibc-langpack-en openssl \ + && yum -y update \ && yum clean all \ && rm -rf /var/cache/yum/* +# +# WARNING: We do not install java 8 nor java 11 in this image because they are already intalled in it. +# + +# Copy use java scripts. +COPY use-j*.sh /usr/local/bin/ +RUN chmod +x /usr/local/bin/use-j*.sh && \ + chmod ugo+s /usr/local/bin/use-j*.sh && \ + sh -c 'chmod ugo+s $(which alternatives)' && \ + ls -la /usr/local/bin/use-j*.sh && \ + echo "--- STARTS JDK 11 TESTS ---" && \ + use-j11.sh && \ + echo "--- ENDS JDK 11 TESTS ---" + COPY ./import_certs.sh /usr/local/bin/import_certs.sh -RUN import_certs.sh +COPY ./fix_java_certs_permissions.sh /usr/local/bin/fix_java_certs_permissions.sh +RUN import_certs.sh && fix_java_certs_permissions.sh # Install Sonar Scanner. RUN cd /tmp \ - && curl -LO https://repo1.maven.org/maven2/org/sonarsource/scanner/cli/sonar-scanner-cli/${SONAR_SCANNER_VERSION}/sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ + && curl -sSLO https://repo1.maven.org/maven2/org/sonarsource/scanner/cli/sonar-scanner-cli/${SONAR_SCANNER_VERSION}/sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ && unzip sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ && mv sonar-scanner-${SONAR_SCANNER_VERSION} /usr/local/sonar-scanner-cli \ && rm -rf sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ @@ -37,14 +55,14 @@ ENV PATH=/usr/local/sonar-scanner-cli/bin:$PATH # Add sq cnes report jar. RUN cd /tmp \ - && curl -L https://github.com/cnescatlab/sonar-cnes-report/releases/download/${CNES_REPORT_VERSION}/sonar-cnes-report-${CNES_REPORT_VERSION}.jar -o cnesreport.jar \ + && curl -sSL https://github.com/cnescatlab/sonar-cnes-report/releases/download/${CNES_REPORT_VERSION}/sonar-cnes-report-${CNES_REPORT_VERSION}.jar -o cnesreport.jar \ && mkdir /usr/local/cnes \ && mv cnesreport.jar /usr/local/cnes/cnesreport.jar \ && chmod 777 /usr/local/cnes/cnesreport.jar # Install Tailor. RUN cd /tmp \ - && curl -LO https://github.com/opendevstack/tailor/releases/download/v${TAILOR_VERSION}/tailor-linux-amd64 \ + && curl -sSLO https://github.com/opendevstack/tailor/releases/download/v${TAILOR_VERSION}/tailor-linux-amd64 \ && mv tailor-linux-amd64 /usr/local/bin/tailor \ && chmod a+x /usr/local/bin/tailor \ && tailor version @@ -52,7 +70,7 @@ RUN cd /tmp \ # Install Helm. RUN cd /tmp \ && mkdir -p /tmp/helm \ - && curl -LO https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ + && curl -sSLO https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ && tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz -C /tmp/helm \ && mv /tmp/helm/linux-amd64/helm /usr/local/bin/helm \ && chmod a+x /usr/local/bin/helm \ @@ -66,7 +84,7 @@ RUN cd /tmp \ # Install GIT-LFS extension https://git-lfs.github.com/. RUN cd /tmp \ && mkdir -p /tmp/git-lfs \ - && curl -LO https://github.com/git-lfs/git-lfs/releases/download/v${GIT_LFS_VERSION}/git-lfs-linux-amd64-v${GIT_LFS_VERSION}.tar.gz \ + && curl -sSLO https://github.com/git-lfs/git-lfs/releases/download/v${GIT_LFS_VERSION}/git-lfs-linux-amd64-v${GIT_LFS_VERSION}.tar.gz \ && tar -zxvf git-lfs-linux-amd64-v${GIT_LFS_VERSION}.tar.gz -C /tmp/git-lfs \ && bash /tmp/git-lfs/install.sh \ && git lfs version \ @@ -74,7 +92,7 @@ RUN cd /tmp \ # Optionally install snyk. RUN if [ -z $SNYK_DISTRIBUTION_URL ] ; then echo 'Skipping snyk installation!' ; else echo 'Installing snyk... getting binary from' $SNYK_DISTRIBUTION_URL \ - && curl -L $SNYK_DISTRIBUTION_URL --output snyk \ + && curl -sSL $SNYK_DISTRIBUTION_URL --output snyk \ && mv snyk /usr/local/bin \ && chmod +rwx /usr/local/bin/snyk \ && mkdir -p $HOME/.config/configstore/ \ @@ -86,7 +104,7 @@ RUN if [ -z $SNYK_DISTRIBUTION_URL ] ; then echo 'Skipping snyk installation!' ; # Optionally install Aquasec. RUN if [ -z $AQUASEC_SCANNERCLI_URL ] ; then echo 'Skipping AquaSec installation!' ; else echo 'Installing AquaSec... getting binary from' $AQUASEC_SCANNERCLI_URL \ - && wget $AQUASEC_SCANNERCLI_URL -O aquasec \ + && curl -sSL $AQUASEC_SCANNERCLI_URL --output aquasec \ && mv aquasec /usr/local/bin \ && chmod +rwx /usr/local/bin/aquasec \ && echo 'AquaSec CLI version:' \ @@ -99,7 +117,10 @@ COPY set_java_proxy.sh /tmp/set_java_proxy.sh RUN . /tmp/set_java_proxy.sh && echo $JAVA_OPTS # Customize entrypoint. -RUN mv /usr/local/bin/run-jnlp-client /usr/local/bin/openshift-run-jnlp-client +COPY fix_openshift_run_jnlp_client.sh /usr/local/bin/fix_openshift_run_jnlp_client.sh +RUN mv /usr/local/bin/run-jnlp-client /usr/local/bin/openshift-run-jnlp-client \ + && fix_openshift_run_jnlp_client.sh /usr/local/bin/openshift-run-jnlp-client + COPY ods-run-jnlp-client.sh /usr/local/bin/run-jnlp-client # Add skopeo. @@ -113,4 +134,3 @@ RUN mkdir -p /home/jenkins/.config && chmod -R g+w /home/jenkins/.config \ && mkdir -p /home/jenkins/.cache && chmod -R g+w /home/jenkins/.cache \ && mkdir -p /home/jenkins/.sonar && chmod -R g+w /home/jenkins/.sonar -RUN chmod g+w $JAVA_HOME/lib/security/cacerts diff --git a/jenkins/agent-base/ensure_java_jre_is_adequate.sh b/jenkins/agent-base/ensure_java_jre_is_adequate.sh new file mode 100755 index 000000000..e47617519 --- /dev/null +++ b/jenkins/agent-base/ensure_java_jre_is_adequate.sh @@ -0,0 +1,67 @@ +#!/bin/bash +set -eu -o pipefail + +ME="$(basename $0)" +JAVA_INSTALLED_PKGS_LOGS="/tmp/java_installed_pkgs.log" +JAVA_11_INSTALLED_PKGS_LOGS="/tmp/java_11_installed_pkgs.log" +rm -fv ${JAVA_INSTALLED_PKGS_LOGS} ${JAVA_11_INSTALLED_PKGS_LOGS} + +NEEDS_DEVEL=${1-""} +PKG_NAME_TAIL="headless" +if [ ! -z "${NEEDS_DEVEL}" ] && [ "" != "${NEEDS_DEVEL}" ]; then + NEEDS_DEVEL="true" + PKG_NAME_TAIL="devel" +else + NEEDS_DEVEL="false" + PKG_NAME_TAIL="headless" +fi + +echo "${ME}: Needs development packages? ${NEEDS_DEVEL}" +echo " " +echo "${ME}: Listing versions of java installed: " +yum list installed | grep -i "\(java\|jre\)" | tee -a ${JAVA_INSTALLED_PKGS_LOGS} +touch ${JAVA_11_INSTALLED_PKGS_LOGS} +grep -i "java-11" ${JAVA_INSTALLED_PKGS_LOGS} > ${JAVA_11_INSTALLED_PKGS_LOGS} || echo "No java 11 packages found." + +NEEDS_INSTALLATION="true" +if [ -f ${JAVA_11_INSTALLED_PKGS_LOGS} ]; then + if grep -qi "${PKG_NAME_TAIL}" ${JAVA_11_INSTALLED_PKGS_LOGS} ; then + NEEDS_INSTALLATION="false" + fi +fi + +# We need devel package in masters to have jar binary. +if [ "true" == "${NEEDS_INSTALLATION}" ]; then + echo "${ME}:Java-11 is *not* installed. Installing..." + if [ "true" == "${NEEDS_DEVEL}" ]; then + yum -y install java-11-openjdk-devel + else + yum -y install java-11-openjdk-headless + fi +else + echo "${ME}: Java-11 is already installed." +fi + +if grep -qi "java-1.8" ${JAVA_INSTALLED_PKGS_LOGS} ; then + echo "${ME}: Java-8 is installed. Removing..." + yum -y remove java-1.8* +else + echo "${ME}: Java-8 is not installed. Correct." +fi + +rm -fv ${JAVA_INSTALLED_PKGS_LOGS} ${JAVA_11_INSTALLED_PKGS_LOGS} + +echo " " +echo "${ME}: Checking java tool versions: " +if [ "true" == "${NEEDS_DEVEL}" ]; then + jar --version +fi + +NO_JAVA_LINK="false" +java -version || NO_JAVA_LINK="true" +if [ "true" == "${NO_JAVA_LINK}" ]; then + JAVA_HOME_FOLDER=$(ls -lah /usr/lib/jvm | grep "java-11-openjdk-11.*\.x86_64" | awk '{print $NF}' | head -1) + JAVA_HOME="/usr/lib/jvm/${JAVA_HOME_FOLDER}" + alternatives --set java ${JAVA_HOME}/bin/java +fi +java -version diff --git a/jenkins/agent-base/fix_java_certs_permissions.sh b/jenkins/agent-base/fix_java_certs_permissions.sh new file mode 100755 index 000000000..5ec50efe6 --- /dev/null +++ b/jenkins/agent-base/fix_java_certs_permissions.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -eu + +# Initialize JAVA_HOME if not set. +JAVA_HOME=${JAVA_HOME:-""} + +if [ -f /etc/profile.d/set-default-java.sh ]; then + source /etc/profile.d/set-default-java.sh +else + echo "WARNING: Not setting default java version." +fi + +echo "Trying to setup correct permissions for cacerts folder... " +if [ ! -z "${JAVA_HOME}" ] && [ "" != "${JAVA_HOME}" ]; then + chown -c 1001:0 $JAVA_HOME/lib/security/cacerts + chmod -c g+w $JAVA_HOME/lib/security/cacerts +else + echo "WARNING: Cannot apply permissions 'chmod g+w' to JAVA_HOME/lib/security/cacerts " + echo "WARNING: JAVA_HOME=${JAVA_HOME}" +fi diff --git a/jenkins/agent-base/fix_openshift_run_jnlp_client.sh b/jenkins/agent-base/fix_openshift_run_jnlp_client.sh new file mode 100755 index 000000000..60038ecb0 --- /dev/null +++ b/jenkins/agent-base/fix_openshift_run_jnlp_client.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -eu -o pipefail + +FILEPATH=${1-"/usr/local/bin/openshift-run-jnlp-client"} + +if [ ! -f ${FILEPATH} ]; then + echo " " + echo "ERROR: File does not exist: ${FILEPATH}" + echo " " + exit 1 +fi + +sed -i 's|\#\!/bin/bash|\#\!/bin/bash -x|g' ${FILEPATH} +sed -i 's|^\(\s*\)set\s*-x\s*$|\1set -x\n\1echo "JNLP_JAVA_OPTIONS=\$JNLP_JAVA_OPTIONS"|g' ${FILEPATH} +sed -i "s|^\(\s*\)JAVA_TOOL_OPTIONS\s*=.*|\1JAVA_TOOL_OPTIONS=|g" ${FILEPATH} +sed -i 's|^\(\s*\)JAVA_GC_OPTS\s*=.*|\1JAVA_GC_OPTS=|g' ${FILEPATH} +sed -i 's|curl\s*-sS\s*|curl -sSLv |g' ${FILEPATH} + +set -x +grep -B 5 -A 5 -i '\(bash\|JAVA_TOOL_OPTIONS\|JAVA_GC_OPTS\|JNLP_JAVA_OPTIONS\|curl\)' ${FILEPATH} + + diff --git a/jenkins/agent-base/import_certs.sh b/jenkins/agent-base/import_certs.sh index 4a7977f7b..bcf771e75 100755 --- a/jenkins/agent-base/import_certs.sh +++ b/jenkins/agent-base/import_certs.sh @@ -1,6 +1,15 @@ #!/bin/bash set -eu +# Initialize JAVA_HOME if not set. +JAVA_HOME=${JAVA_HOME:-""} + +if [ -f /etc/profile.d/set-default-java.sh ]; then + source /etc/profile.d/set-default-java.sh +else + echo "WARNING: Not setting default java version." +fi + if [[ ! -z ${APP_DNS:=""} ]]; then echo "Setting up certificates from APP_DNS=${APP_DNS} ..."; \ diff --git a/jenkins/agent-base/ods-run-jnlp-client.sh b/jenkins/agent-base/ods-run-jnlp-client.sh index c63588260..00d648250 100755 --- a/jenkins/agent-base/ods-run-jnlp-client.sh +++ b/jenkins/agent-base/ods-run-jnlp-client.sh @@ -1,6 +1,17 @@ #!/bin/bash set -ue +# Initialize JAVA_HOME if not set. +JAVA_HOME=${JAVA_HOME:-""} + +if [ -f /etc/profile.d/set-default-java.sh ]; then + set -x + source /etc/profile.d/set-default-java.sh + set +x +else + echo "WARNING: Not setting default java version." +fi + # Openshift default CA. See https://docs.openshift.com/container-platform/3.11/dev_guide/secrets.html#service-serving-certificate-secrets SERVICEACCOUNT_CA='/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt' if [[ -f $SERVICEACCOUNT_CA ]]; then diff --git a/jenkins/agent-base/use-j11.sh b/jenkins/agent-base/use-j11.sh new file mode 100644 index 000000000..07954e82f --- /dev/null +++ b/jenkins/agent-base/use-j11.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +JAVA_HOME_FOLDER=$(ls -lah /usr/lib/jvm | grep "java-11-openjdk-11.*\.x86_64" | awk '{print $NF}' | head -1) +JAVA_VERSION="11" + +function msg_and_exit() { + echo "ERROR: ${1}" + exit 1 +} + +echo "Switching to java ${JAVA_VERSION}:" +JAVA_HOME="/usr/lib/jvm/${JAVA_HOME_FOLDER}" + +alternatives --set java ${JAVA_HOME}/bin/java || \ + msg_and_exit "Cannot configure java ${JAVA_VERSION} as the alternative to use for java." +java -version 2>&1 | grep -q "\s\+${JAVA_VERSION}" || msg_and_exit "Java version is not ${JAVA_VERSION}." + +if [ -x ${JAVA_HOME}/bin/javac ]; then + alternatives --set javac ${JAVA_HOME}/bin/javac || \ + msg_and_exit "Cannot configure javac ${JAVA_VERSION} as the alternative to use for javac." + javac -version 2>&1 | grep -q "\s\+${JAVA_VERSION}" || msg_and_exit "Javac version is not ${JAVA_VERSION}." +else + echo "WARNING: Not found binary for javac in path ${JAVA_HOME}/bin/javac " +fi + +java -version 2>&1 +if which 'javac'; then + javac -version 2>&1 +else + echo "WARNING: Binary javac is not available." +fi + +if [ -d ${JAVA_HOME}/bin/ ]; then + export JAVA_HOME +else + msg_and_exit "Cannot configure JAVA_HOME environment variable to ${JAVA_HOME}" +fi +echo "JAVA_HOME: $JAVA_HOME" + +rm -fv /etc/profile.d/set-default-java.sh +echo "export JAVA_HOME=${JAVA_HOME}" >> /etc/profile.d/set-default-java.sh +echo "export USE_JAVA_VERSION=java-11" >> /etc/profile.d/set-default-java.sh +chmod +x /etc/profile.d/set-default-java.sh diff --git a/jenkins/master/Dockerfile.centos7 b/jenkins/master/Dockerfile.centos7 index 4c298d69e..817ca451c 100644 --- a/jenkins/master/Dockerfile.centos7 +++ b/jenkins/master/Dockerfile.centos7 @@ -13,24 +13,34 @@ ENV TAILOR_VERSION=1.3.4 USER root -COPY ./import_certs.sh /usr/local/bin/import_certs.sh -RUN import_certs.sh +COPY ./scripts_for_usr-local-bin/* /usr/local/bin/ +RUN import_certs.sh \ + && rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key \ + && ensure_java_jre_is_adequate.sh master \ + && fix_openshift_scripts.sh \ + && yum -y update \ + && clean_yum_cache.sh + + +# To debug. Warning: too much logs if we enable it. +#RUN sed -i 's|\#\!\s*/bin/bash.*|\#\!/bin/bash -xeu|g' /usr/local/bin/install-plugins.sh \ +# && head -n 10 /usr/local/bin/install-plugins.sh # Copy configuration and plugins. COPY plugins.centos7.txt /opt/openshift/configuration/plugins.txt -COPY kube-slave-common.sh /usr/local/bin/kube-slave-common.sh RUN /usr/local/bin/install-plugins.sh /opt/openshift/configuration/plugins.txt \ && rm -r /opt/openshift/configuration/jobs/OpenShift* || true \ && touch /var/lib/jenkins/configured \ && mv /usr/libexec/s2i/run /usr/libexec/s2i/openshift-run COPY configuration/ /opt/openshift/configuration/ COPY ods-run.sh /usr/libexec/s2i/run +COPY logging.properties /var/lib/jenkins/ RUN chown :0 /etc/pki/java/cacerts && chmod ugo+w /etc/pki/java/cacerts # Install Tailor. RUN cd /tmp \ - && curl -LOv https://github.com/opendevstack/tailor/releases/download/v${TAILOR_VERSION}/tailor-linux-amd64 \ + && curl -sSLO https://github.com/opendevstack/tailor/releases/download/v${TAILOR_VERSION}/tailor-linux-amd64 \ && mv tailor-linux-amd64 /usr/local/bin/tailor \ && chmod a+x /usr/local/bin/tailor diff --git a/jenkins/master/Dockerfile.rhel7 b/jenkins/master/Dockerfile.rhel7 index 0fc33575f..f2549f23f 100644 --- a/jenkins/master/Dockerfile.rhel7 +++ b/jenkins/master/Dockerfile.rhel7 @@ -13,18 +13,22 @@ ENV TAILOR_VERSION=1.3.4 USER root -COPY ./import_certs.sh /usr/local/bin/import_certs.sh -RUN import_certs.sh +COPY ./scripts_for_usr-local-bin/* /usr/local/bin/ +RUN import_certs.sh \ + && rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key \ + && ensure_java_jre_is_adequate.sh master \ + && fix_openshift_scripts.sh \ + && clean_yum_cache.sh # Copy configuration and plugins. COPY plugins.rhel7.txt /opt/openshift/configuration/plugins.txt -COPY kube-slave-common.sh /usr/local/bin/kube-slave-common.sh RUN /usr/local/bin/install-plugins.sh /opt/openshift/configuration/plugins.txt \ && rm -r /opt/openshift/configuration/jobs/OpenShift* || true \ && touch /var/lib/jenkins/configured \ && mv /usr/libexec/s2i/run /usr/libexec/s2i/openshift-run COPY configuration/ /opt/openshift/configuration/ COPY ods-run.sh /usr/libexec/s2i/run +COPY logging.properties /var/lib/jenkins/ RUN chown :0 /etc/pki/java/cacerts && chmod ugo+w /etc/pki/java/cacerts diff --git a/jenkins/master/Dockerfile.ubi8 b/jenkins/master/Dockerfile.ubi8 index d6531ba23..e71724bbc 100644 --- a/jenkins/master/Dockerfile.ubi8 +++ b/jenkins/master/Dockerfile.ubi8 @@ -13,18 +13,24 @@ ENV TAILOR_VERSION=1.3.4 USER root -COPY ./import_certs.sh /usr/local/bin/import_certs.sh -RUN import_certs.sh +COPY ./scripts_for_usr-local-bin/* /usr/local/bin/ +RUN import_certs.sh \ + && rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key \ + && disable_yum_repository.sh /etc/yum.repos.d/ci-rpm-mirrors.repo \ + /etc/yum.repos.d/localdev-* /etc/yum.repos.d/epel.repo \ + && ensure_java_jre_is_adequate.sh master \ + && fix_openshift_scripts.sh \ + && clean_yum_cache.sh # Copy configuration and plugins. COPY plugins.ubi8.txt /opt/openshift/configuration/plugins.txt -COPY kube-slave-common.sh /usr/local/bin/kube-slave-common.sh RUN /usr/local/bin/install-plugins.sh /opt/openshift/configuration/plugins.txt \ && rm -r /opt/openshift/configuration/jobs/OpenShift* || true \ && touch /var/lib/jenkins/configured \ && mv /usr/libexec/s2i/run /usr/libexec/s2i/openshift-run COPY configuration/ /opt/openshift/configuration/ COPY ods-run.sh /usr/libexec/s2i/run +COPY logging.properties /var/lib/jenkins/ RUN chown :0 /etc/pki/java/cacerts && chmod ugo+w /etc/pki/java/cacerts diff --git a/jenkins/master/configuration/init.groovy.d/flow-durability-hint.groovy b/jenkins/master/configuration/init.groovy.d/flow-durability-hint.groovy index d99a61795..212ee002c 100644 --- a/jenkins/master/configuration/init.groovy.d/flow-durability-hint.groovy +++ b/jenkins/master/configuration/init.groovy.d/flow-durability-hint.groovy @@ -1,3 +1,4 @@ +import jenkins.model.Jenkins; import org.jenkinsci.plugins.workflow.flow.*; // See comments in https://github.com/opendevstack/ods-core/pull/1161 @@ -11,8 +12,9 @@ for (FlowDurabilityHint maybeHint : FlowDurabilityHint.values()) { println("\nPrevious value: ") println(GlobalDefaultFlowDurabilityLevel.getDefaultDurabilityHint()) +// https://javadoc.jenkins.io/jenkins/model/class-use/Jenkins.html Jenkins j = Jenkins.getInstanceOrNull() -def global_settings = j.getExtensionList(GlobalDefaultFlowDurabilityLevel.DescriptorImpl.class).get(0).durabilityHint = fdh; +j.getExtensionList(GlobalDefaultFlowDurabilityLevel.DescriptorImpl.class).get(0).durabilityHint = fdh; println("\nConfigured value: ") println(GlobalDefaultFlowDurabilityLevel.getDefaultDurabilityHint()) diff --git a/jenkins/master/logging.properties b/jenkins/master/logging.properties new file mode 100644 index 000000000..9b0c461c9 --- /dev/null +++ b/jenkins/master/logging.properties @@ -0,0 +1,11 @@ +handlers=java.util.logging.ConsoleHandler,java.util.logging.FileHandler + +java.util.logging.FileHandler.level=INFO +java.util.logging.FileHandler.formatter=java.util.logging.SimpleFormatter +java.util.logging.FileHandler.pattern=/var/log/jenkins/jenkins-master.log +java.util.logging.FileHandler.append=true +java.util.logging.FileHandler.limit=10000000 +java.util.logging.FileHandler.count=5 + +java.util.logging.ConsoleHandler.level=INFO +java.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter diff --git a/jenkins/master/ods-run.sh b/jenkins/master/ods-run.sh index f2e373348..0697d4573 100755 --- a/jenkins/master/ods-run.sh +++ b/jenkins/master/ods-run.sh @@ -81,18 +81,36 @@ if [ -e "${JENKINS_HOME}/plugins" ]; then echo "Copy audit-trail plugin configuration ..." cp -n /opt/openshift/configuration/audit-trail.xml ${JENKINS_HOME}/audit-trail.xml + echo " " + echo "Plugins version already installed in Jenkins: " + ls -la "${JENKINS_HOME}/plugins/" + + echo " " echo "Enforcing plugin versions defined in the image ..." if [ "$(ls /opt/openshift/plugins/* 2>/dev/null)" ]; then echo "Copying $(ls /opt/openshift/plugins/* | wc -l) files to ${JENKINS_HOME} ..." for FILENAME in /opt/openshift/plugins/* ; do # also need to nuke the metadir; it will get properly populated on jenkins startup basefilename=`basename $FILENAME .jpi` - rm -rf "${JENKINS_HOME}/plugins/${basefilename}" - cp --remove-destination $FILENAME ${JENKINS_HOME}/plugins + rm -rfv "${JENKINS_HOME}/plugins/${basefilename}" + cp -v --remove-destination $FILENAME ${JENKINS_HOME}/plugins done rm -rf /opt/openshift/plugins fi fi -echo "Booting Jenkins ..." -/usr/libexec/s2i/openshift-run +echo " " +echo "Booting Jenkins ( /usr/libexec/s2i/openshift-run ) ..." +echo " " +JENKINS_ERROR="false" +/usr/libexec/s2i/openshift-run || JENKINS_ERROR="true" + +if [ "false" != "${JENKINS_ERROR}" ]; then + echo " " + echo "Jenkins exit code was not 0. Something went wrong." + echo "Waiting 10 secs, so the pod does not die before showing in its log all jenkins logs." + echo " " + sleep 10 + exit 1 +fi + diff --git a/jenkins/master/scripts_for_usr-local-bin/clean_yum_cache.sh b/jenkins/master/scripts_for_usr-local-bin/clean_yum_cache.sh new file mode 100755 index 000000000..a4ec6657e --- /dev/null +++ b/jenkins/master/scripts_for_usr-local-bin/clean_yum_cache.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -eu -o pipefail + +ME="$(basename $0)" + +yum clean all || true +rm -rf /var/cache/yum/* || true diff --git a/jenkins/master/scripts_for_usr-local-bin/disable_yum_repository.sh b/jenkins/master/scripts_for_usr-local-bin/disable_yum_repository.sh new file mode 100755 index 000000000..df8cc2724 --- /dev/null +++ b/jenkins/master/scripts_for_usr-local-bin/disable_yum_repository.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -eu -o pipefail + +ME="$(basename $0)" + +for filepath in $@; do + if [ -f ${filepath} ]; then + sed -i 's|enabled\s*=\s*1|enabled=0|g' ${filepath} + grep --with-filename 'enabled\s*=' ${filepath} + else + echo "File does not exist: ${filepath}" + fi +done diff --git a/jenkins/master/scripts_for_usr-local-bin/ensure_java_jre_is_adequate.sh b/jenkins/master/scripts_for_usr-local-bin/ensure_java_jre_is_adequate.sh new file mode 100755 index 000000000..e47617519 --- /dev/null +++ b/jenkins/master/scripts_for_usr-local-bin/ensure_java_jre_is_adequate.sh @@ -0,0 +1,67 @@ +#!/bin/bash +set -eu -o pipefail + +ME="$(basename $0)" +JAVA_INSTALLED_PKGS_LOGS="/tmp/java_installed_pkgs.log" +JAVA_11_INSTALLED_PKGS_LOGS="/tmp/java_11_installed_pkgs.log" +rm -fv ${JAVA_INSTALLED_PKGS_LOGS} ${JAVA_11_INSTALLED_PKGS_LOGS} + +NEEDS_DEVEL=${1-""} +PKG_NAME_TAIL="headless" +if [ ! -z "${NEEDS_DEVEL}" ] && [ "" != "${NEEDS_DEVEL}" ]; then + NEEDS_DEVEL="true" + PKG_NAME_TAIL="devel" +else + NEEDS_DEVEL="false" + PKG_NAME_TAIL="headless" +fi + +echo "${ME}: Needs development packages? ${NEEDS_DEVEL}" +echo " " +echo "${ME}: Listing versions of java installed: " +yum list installed | grep -i "\(java\|jre\)" | tee -a ${JAVA_INSTALLED_PKGS_LOGS} +touch ${JAVA_11_INSTALLED_PKGS_LOGS} +grep -i "java-11" ${JAVA_INSTALLED_PKGS_LOGS} > ${JAVA_11_INSTALLED_PKGS_LOGS} || echo "No java 11 packages found." + +NEEDS_INSTALLATION="true" +if [ -f ${JAVA_11_INSTALLED_PKGS_LOGS} ]; then + if grep -qi "${PKG_NAME_TAIL}" ${JAVA_11_INSTALLED_PKGS_LOGS} ; then + NEEDS_INSTALLATION="false" + fi +fi + +# We need devel package in masters to have jar binary. +if [ "true" == "${NEEDS_INSTALLATION}" ]; then + echo "${ME}:Java-11 is *not* installed. Installing..." + if [ "true" == "${NEEDS_DEVEL}" ]; then + yum -y install java-11-openjdk-devel + else + yum -y install java-11-openjdk-headless + fi +else + echo "${ME}: Java-11 is already installed." +fi + +if grep -qi "java-1.8" ${JAVA_INSTALLED_PKGS_LOGS} ; then + echo "${ME}: Java-8 is installed. Removing..." + yum -y remove java-1.8* +else + echo "${ME}: Java-8 is not installed. Correct." +fi + +rm -fv ${JAVA_INSTALLED_PKGS_LOGS} ${JAVA_11_INSTALLED_PKGS_LOGS} + +echo " " +echo "${ME}: Checking java tool versions: " +if [ "true" == "${NEEDS_DEVEL}" ]; then + jar --version +fi + +NO_JAVA_LINK="false" +java -version || NO_JAVA_LINK="true" +if [ "true" == "${NO_JAVA_LINK}" ]; then + JAVA_HOME_FOLDER=$(ls -lah /usr/lib/jvm | grep "java-11-openjdk-11.*\.x86_64" | awk '{print $NF}' | head -1) + JAVA_HOME="/usr/lib/jvm/${JAVA_HOME_FOLDER}" + alternatives --set java ${JAVA_HOME}/bin/java +fi +java -version diff --git a/jenkins/master/scripts_for_usr-local-bin/fix_openshift_scripts.sh b/jenkins/master/scripts_for_usr-local-bin/fix_openshift_scripts.sh new file mode 100755 index 000000000..41d040d44 --- /dev/null +++ b/jenkins/master/scripts_for_usr-local-bin/fix_openshift_scripts.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -eu -o pipefail + +ME="$(basename $0)" +echo "${ME}: INFO: Fixing openshift scripts..." + +FILE_TO_MODIFY=${1-"/usr/libexec/s2i/run"} + +if [ -f ${FILE_TO_MODIFY} ]; then + sed -i 's|\#\!/bin/bash|\#\!/bin/bash -x|g' ${FILE_TO_MODIFY} + sed -i "s|^\s*JAVA_TOOL_OPTIONS\s*=.*| echo 'WARNING: JAVA_TOOL_OPTIONS env variable is UNSET.'|g" \ + ${FILE_TO_MODIFY} + sed -i "s|^\s*export\s*JAVA_TOOL_OPTIONS.*| echo 'WARNING: JAVA_TOOL_OPTIONS env variable is UNSET.'|g" \ + ${FILE_TO_MODIFY} + sed -i 's|^\(\s*\)JAVA_GC_OPTS\s*=.*|\1JAVA_GC_OPTS=|g' ${FILE_TO_MODIFY} + grep -B 3 -A 3 -i '\(bash\|JAVA_TOOL_OPTIONS\|JAVA_GC_OPTS\)' ${FILE_TO_MODIFY} +else + echo " " + echo "${ME}: WARNING: Could not modify file because it does not exist: ${FILE_TO_MODIFY} " + echo " " + echo " " +fi + +echo "${ME}: INFO: Fixed openshift scripts." + diff --git a/jenkins/master/import_certs.sh b/jenkins/master/scripts_for_usr-local-bin/import_certs.sh similarity index 100% rename from jenkins/master/import_certs.sh rename to jenkins/master/scripts_for_usr-local-bin/import_certs.sh diff --git a/jenkins/master/scripts_for_usr-local-bin/install_jenkins_lts.sh b/jenkins/master/scripts_for_usr-local-bin/install_jenkins_lts.sh new file mode 100755 index 000000000..659c53fdd --- /dev/null +++ b/jenkins/master/scripts_for_usr-local-bin/install_jenkins_lts.sh @@ -0,0 +1,45 @@ +#!/bin/bash +set -eu -o pipefail + +ME="$(basename $0)" +echo "${ME}: Upgrading Jenkins to latest LTS version available..." + +# sudo wget -O /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo +# sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key +# sudo yum upgrade +# Add required dependencies for the jenkins package +# sudo yum install java-11-openjdk +# sudo yum install jenkins +# sudo systemctl daemon-reload + +DEFAULT_TARGET="/usr/lib/jenkins/jenkins.war" +TARGET="${DEFAULT_TARGET}" + +curl -sSLO https://get.jenkins.io/war-stable/latest/jenkins.war + +if [ ! -f "${TARGET}" ]; then + echo "${ME}: File does not exist: ${TARGET}" + TARGET="$(find /usr/ -name jenkins.war)" + echo "${ME}: New target: ${TARGET}" +fi + +if [ -f "${TARGET}" ]; then + echo "${ME}: Upgrading Jenkins to latest LTS version... " + rm -fv ${TARGET} + mv -vf jenkins.war ${TARGET} + ls -lah ${TARGET} +else + echo "${ME}: ERROR: Cannot upgrade Jenkins version." + exit 1 +fi + +if [ ! -f "${DEFAULT_TARGET}" ]; then + DEFAULT_TARGET_FOLDER="$(dirname ${DEFAULT_TARGET})" + if [ ! -d ${DEFAULT_TARGET_FOLDER} ]; then + mkdir -pv ${DEFAULT_TARGET_FOLDER} + fi + cd ${DEFAULT_TARGET_FOLDER} && ln -sv ${TARGET} . +fi +ls -la ${DEFAULT_TARGET} ${TARGET} +echo "${ME}: INFO: Jenkins was upgraded to latest LTS version." + diff --git a/jenkins/master/kube-slave-common.sh b/jenkins/master/scripts_for_usr-local-bin/kube-slave-common.sh similarity index 100% rename from jenkins/master/kube-slave-common.sh rename to jenkins/master/scripts_for_usr-local-bin/kube-slave-common.sh diff --git a/jenkins/ocp-config/build/bc.yml b/jenkins/ocp-config/build/bc.yml index 063f1ce3a..99eb4e219 100644 --- a/jenkins/ocp-config/build/bc.yml +++ b/jenkins/ocp-config/build/bc.yml @@ -72,7 +72,7 @@ objects: labels: app: jenkins spec: - failedBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 20 nodeSelector: null output: to: @@ -117,7 +117,7 @@ objects: from: kind: DockerImage name: ${JENKINS_MASTER_BASE_FROM_IMAGE} - successfulBuildsHistoryLimit: 5 + successfulBuildsHistoryLimit: 20 - kind: BuildConfig apiVersion: v1 metadata: @@ -125,7 +125,7 @@ objects: labels: app: jenkins spec: - failedBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 20 nodeSelector: null output: to: @@ -164,7 +164,7 @@ objects: name: ${JENKINS_AGENT_BASE_FROM_IMAGE} dockerfilePath: ${JENKINS_AGENT_DOCKERFILE_PATH} type: Docker - successfulBuildsHistoryLimit: 5 + successfulBuildsHistoryLimit: 20 - apiVersion: v1 kind: BuildConfig metadata: diff --git a/jenkins/ocp-config/deploy/jenkins-master.yml b/jenkins/ocp-config/deploy/jenkins-master.yml index cfbfb1029..756cdc7fe 100644 --- a/jenkins/ocp-config/deploy/jenkins-master.yml +++ b/jenkins/ocp-config/deploy/jenkins-master.yml @@ -17,7 +17,7 @@ parameters: - name: JENKINS_ENABLE_OAUTH value: "true" - name: JENKINS_MEMORY_LIMIT - value: 6Gi + value: 7Gi - name: JENKINS_MEMORY_REQUEST value: 4Gi - name: JENKINS_CPU_LIMIT @@ -27,7 +27,7 @@ parameters: - name: JENKINS_VOLUME_CAPACITY value: 5Gi - name: JENKINS_JAVA_GC_OPTS - value: "-server -XX:NativeMemoryTracking=summary -XX:MaxRAMPercentage=90 -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent -XX:+ParallelRefProcEnabled -XX:+UseStringDeduplication -XX:MaxMetaspaceSize=1g -XX:MetaspaceSize=256M -XX:SoftRefLRUPolicyMSPerMB=1 -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions -XX:G1SummarizeRSetStatsPeriod=1" + value: "-server -XX:NativeMemoryTracking=summary -XX:-UseContainerSupport -XX:MaxRAMPercentage=90 -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent -XX:+ParallelRefProcEnabled -XX:+UseStringDeduplication -XX:MaxMetaspaceSize=1g -XX:MetaspaceSize=256M -XX:SoftRefLRUPolicyMSPerMB=1 -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions -XX:G1SummarizeRSetStatsPeriod=1 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/lib/jenkins" - name: JENKINS_JAVA_MAX_HEAP_PARAM value: "-Xms1024m -Xmx4g" - name: JENKINS_CONTAINER_HEAP_PERCENT diff --git a/jenkins/webhook-proxy/main.go b/jenkins/webhook-proxy/main.go index 64e302997..465d777d4 100644 --- a/jenkins/webhook-proxy/main.go +++ b/jenkins/webhook-proxy/main.go @@ -787,7 +787,11 @@ func getSecureClient() (*http.Client, error) { Certificates: []tls.Certificate{}, RootCAs: caCertPool, } - tlsConfig.BuildNameToCertificate() + // Deprecated. + // The Config.NameToCertificate field, which only supports associating a + // single certificate with a give name, is now deprecated and should be + // left as nil. https://go.dev/pkg/crypto/tls/ + // tlsConfig.BuildNameToCertificate() transport := &http.Transport{TLSClientConfig: tlsConfig} return &http.Client{Transport: transport, Timeout: 10 * time.Second}, nil } diff --git a/ods-devenv/packer/CentOS2ODSBox.json b/ods-devenv/packer/CentOS2ODSBox.json index ccd066999..5eab9f324 100644 --- a/ods-devenv/packer/CentOS2ODSBox.json +++ b/ods-devenv/packer/CentOS2ODSBox.json @@ -53,8 +53,8 @@ "echo 'Adding odsbox.pub to authorized keys'", "mkdir -p ~/.ssh", "chmod go-xrw ~/.ssh", - "mv ~/tmp/odsbox.pub ~/.ssh/odsbox.pub", - "mv ~/tmp/odsbox ~/.ssh/odsbox", + "mv -vf ~/tmp/odsbox.pub ~/.ssh/odsbox.pub", + "mv -vf ~/tmp/odsbox ~/.ssh/odsbox", "touch ~/.ssh/authorized_keys", "cat ~/.ssh/odsbox.pub >> ~/.ssh/authorized_keys", "chmod go-xrw ~/.ssh/*", diff --git a/ods-devenv/packer/configure_buildBot.sh b/ods-devenv/packer/configure_buildBot.sh new file mode 100644 index 000000000..96951c5a0 --- /dev/null +++ b/ods-devenv/packer/configure_buildBot.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -euo pipefail + +echo "${0}" +echo -n "whoami: " +whoami + +# install aws cli +curl -sSL --retry 5 --retry-delay 5 --retry-max-time 300 "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +unzip awscliv2.zip +sudo ./aws/install + +mkdir -p bin logs opendevstack/builds opendevstack/packer_build_result tmp +cd opendevstack || exit 1 +git clone https://github.com/opendevstack/ods-core.git + +echo " " +echo "DONE." +echo " " + +echo "Disabled because can't work." +exit 1 diff --git a/ods-devenv/packer/create_ods_box_image.sh b/ods-devenv/packer/create_ods_box_image.sh index 8988df020..39212bcc0 100755 --- a/ods-devenv/packer/create_ods_box_image.sh +++ b/ods-devenv/packer/create_ods_box_image.sh @@ -1,10 +1,13 @@ #!/usr/bin/env bash +set -euo pipefail + aws_access_key= aws_secret_key= -# default public key to be added to the odsbox authorized_keys +# default public/private key to be added to the odsbox authorized_keys pub_key= +ssh_private_key_file_path= ods_branch=master @@ -12,8 +15,9 @@ s3_bucket_name= s3_upload_folder=image_upload output_directory=output-vmware-iso instance_type=m5ad.4xlarge - +build_folder= dryrun=false +PACKER_CONFIG_FILE=${PACKER_CONFIG_FILE-""} while [[ "$#" -gt 0 ]]; do case $1 in @@ -45,6 +49,12 @@ while [[ "$#" -gt 0 ]]; do --pub-key) pub_key="$2"; shift;; --pub-key=*) pub_key="${1#*=}";; + --priv-key) ssh_private_key_file_path="$2"; shift;; + --priv-key=*) ssh_private_key_file_path="${1#*=}";; + + --build-folder) build_folder="$2"; shift;; + --build-folder=*) build_folder="${1#*=}";; + --target) target="$2"; shift;; --dryrun) dryrun=true;; @@ -87,6 +97,7 @@ function display_usage() { echo " --aws-access-key AWS credentials" echo " --aws-secret-key AWS credentials" echo " --pub-key Public key to be added to the odsbox authorized servers" + echo " --priv-key Private key to be added to the odsbox and used to access the host" echo " --ods-branch branch to build ODS box against, e.g master" echo " --instance-type AWS EC2 instance type to run the AMI build on. Defaults to m5ad.4xlarge." echo " Options: t2.2xlarge, m5ad.4xlarge" @@ -193,6 +204,8 @@ function create_ods_box_ami() { echo "AWS_MAX_ATTEMPTS=${AWS_MAX_ATTEMPTS}" echo "AWS_POLL_DELAY_SECONDS=${AWS_POLL_DELAY_SECONDS}" echo "ods_branch=${ods_branch}" + echo "build_folder=${build_folder}" + echo " " if [[ "${dryrun}" == "true" ]] then @@ -205,31 +218,48 @@ function create_ods_box_ami() { echo -n '.' done echo "done." + echo " " exit 0 - else - if [[ -z ${pub_key:=""} ]]; then - pub_key="ssh-tmp-key.pub" - ssh_private_key_file_path="./ssh-tmp-key" - echo "A public key was not provided... creating tmp ssh key ($pub_key)..." - ssh-keygen -t rsa -n "openshift@odsbox.lan" -C "openshift@odsbox.lan" -m PEM -P "" -f "${ssh_private_key_file_path}" - pwd - cat - cat ./ssh-tmp-key $pub_key - fi - - time packer build -on-error=ask \ - -var "aws_access_key=${aws_access_key}" \ - -var "aws_secret_key=${aws_secret_key}" \ - -var "ami_id=${ami_id}" \ - -var 'username=openshift' \ - -var 'password=openshift' \ - -var "name_tag=ODS Box $(date)" \ - -var "ods_branch=${ods_branch}" \ - -var "instance_type=${instance_type}" \ - -var "pub_key=${pub_key}" \ - -var "ssh_private_key_file_path=${ssh_private_key_file_path}" \ - ods-devenv/packer/CentOS2ODSBox.json fi + + if [[ -z ${pub_key:=""} ]]; then + pub_key="ssh-tmp-key.pub" + ssh_private_key_file_path="./ssh-tmp-key" + echo "A public key was not provided... creating tmp ssh key ($pub_key)..." + ssh-keygen -t rsa -n "openshift@odsbox.lan" -C "openshift@odsbox.lan" -m PEM -P "" -f "${ssh_private_key_file_path}" + pwd + sleep 2 + cat ./ssh-tmp-key $pub_key + sleep 2 + fi + + if [ -z "${PACKER_CONFIG_FILE}" ] || [ "" != "${PACKER_CONFIG_FILE}" ] || [ ! -f "${PACKER_CONFIG_FILE}" ]; then + PACKER_CONFIG_FILE="ods-devenv/packer/CentOS2ODSBox.json" + echo " " + echo "set PACKER_CONFIG_FILE=${PACKER_CONFIG_FILE}" + + fi + + echo " " + set -x + time packer build -on-error=ask \ + -var "aws_access_key=${aws_access_key}" \ + -var "aws_secret_key=${aws_secret_key}" \ + -var "ami_id=${ami_id}" \ + -var 'username=openshift' \ + -var 'password=openshift' \ + -var "name_tag=ODS Box $(date)" \ + -var "ods_branch=${ods_branch}" \ + -var "instance_type=${instance_type}" \ + -var "pub_key=${pub_key}" \ + -var "ssh_private_key_file_path=${ssh_private_key_file_path}" \ + ${PACKER_CONFIG_FILE} + if [ 0 -ne $? ]; then + set +x + echo "Error in packer build !!" + exit 1 + fi + set +x } target="${target:-display_usage}" diff --git a/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox new file mode 100644 index 000000000..f33eaed53 --- /dev/null +++ b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox @@ -0,0 +1,105 @@ +def buildBadge = addEmbeddableBadgeConfiguration(id: "odsbuild", status: "started") + +pipeline { + + environment { + ODS_BRANCH = "experimental" + JOB_NAME = "ods-build-4_x-branch" + BUILD_FOLDER = "${WORKSPACE_TMP}/${BUILD_NUMBER}" + instance_type="m5ad.4xlarge" + } + + agent { + node('buildbot') + } + + + options { + ansiColor('xterm') + } + + + stages { + stage('ODS BuildBot: running AMI build!') { + + steps { + + script { + + try { + echoInfo "Setting build badge status!" + buildBadge.setStatus("running") + + GIT_COMMIT_REV = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .sha").trim() + GIT_COMMIT_REV = GIT_COMMIT_REV.length()>8 ? GIT_COMMIT_REV.substring(1,8) : GIT_COMMIT_REV + GIT_COMMIT_MESSAGE = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .commit.message").trim() + GIT_COMMIT_MESSAGE = GIT_COMMIT_MESSAGE.length()>25 ? GIT_COMMIT_MESSAGE.substring(1,25) : GIT_COMMIT_MESSAGE + BUILD_SUBJECT = "#${GIT_COMMIT_REV}: ${GIT_COMMIT_MESSAGE}..." + buildBadge.setSubject(BUILD_SUBJECT) + + + echoInfo "Started ODS AMI build number '${BUILD_NUMBER}' for branch: '${ODS_BRANCH}'" + echoInfo "BUILD_FOLDER = '${BUILD_FOLDER}'" + echoInfo "... logging some environment details" + sh 'whoami && pwd && ls -lart' + sh 'echo $PATH' + sh 'git version' + + echoInfo "... create temp build folder" + sh 'mkdir -p ${BUILD_FOLDER}' + echoInfo "... cloning ods-core && checkout branch ${ODS_BRANCH}" + sh 'cd ${BUILD_FOLDER} && git clone https://github.com/opendevstack/ods-core.git && cd ods-core && git checkout ${ODS_BRANCH}' + + echoInfo "... preparing temp build folder" + withCredentials([sshUserPrivateKey(credentialsId: "edp_slave_openshift_pub", keyFileVariable: 'ssh_public_key'), + sshUserPrivateKey(credentialsId: "edp_slave_openshift_202205", keyFileVariable: 'ssh_private_key')]) { + sh 'cp ${ssh_public_key} ${BUILD_FOLDER}/jenkins.pub' + sh 'cp ${ssh_private_key} ${BUILD_FOLDER}/jenkins.priv' + } + + withCredentials([usernamePassword(credentialsId: 'registry_credentials', usernameVariable: 'registry_user', passwordVariable: 'registry_token')]) { + withCredentials([[ $class: 'AmazonWebServicesCredentialsBinding', credentialsId: 'awsID', accessKeyVariable: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + // This is safe, client and secret will be masked in the output + // echoInfo "aws client = $AWS_ACCESS_KEY_ID" + // echoInfo "aws secret = $AWS_SECRET_ACCESS_KEY" + echoInfo "... running packer build" + // sh 'export LOG_PATH=. && export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=400 && export AWS_POLL_DELAY_SECONDS=15 && cd ${BUILD_FOLDER} && pwd && ls -lart && date && log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" --instance-type ${instance_type}' + sh ''' + set -x + export LOG_PATH=. && export log_path=. && export build_path="${HOME}/opendevstack/builds" && \ + export build_result_path="${HOME}/opendevstack/packer_build_result" && \ + export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=180 && export AWS_POLL_DELAY_SECONDS=60 && \ + cd ${BUILD_FOLDER} && pwd && ls -lart && date && \ + log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && \ + cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami \ + --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" \ + --instance-type "${instance_type}" --build-folder "${BUILD_FOLDER}" \ + --pub-key "${BUILD_FOLDER}/jenkins.pub" --priv-key "${BUILD_FOLDER}/jenkins.priv" + ''' + } + } + + echoInfo "...done!" + + buildBadge.setStatus("passing") + + } catch (Exception err) { + buildBadge.setStatus("failing") + + echoError "error: " + err + throw err + } + } + } + } + + } +} + +def echoInfo(msg){ + echo "\033[32m ${msg} \033[0m" +} + +def echoError(msg){ + echo "\033[31m ${msg} \033[0m" +} diff --git a/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_4_x b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_4_x new file mode 100644 index 000000000..1d86254cf --- /dev/null +++ b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_4_x @@ -0,0 +1,106 @@ +def buildBadge = addEmbeddableBadgeConfiguration(id: "odsbuild", status: "started") + +pipeline { + + environment { + ODS_BRANCH = "4.x" + JOB_NAME = "ods-build-4_x-branch" + BUILD_FOLDER = "${WORKSPACE_TMP}/${BUILD_NUMBER}" + instance_type="m5ad.4xlarge" + } + + agent { + node('buildbot') + } + + + options { + ansiColor('xterm') + } + + + stages { + stage('ODS BuildBot: running AMI build!') { + + steps { + + script { + + try { + echoInfo "Setting build badge status!" + buildBadge.setStatus("running") + + GIT_COMMIT_REV = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .sha").trim() + GIT_COMMIT_REV = GIT_COMMIT_REV.length()>8 ? GIT_COMMIT_REV.substring(1,8) : GIT_COMMIT_REV + GIT_COMMIT_MESSAGE = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .commit.message").trim() + GIT_COMMIT_MESSAGE = GIT_COMMIT_MESSAGE.length()>25 ? GIT_COMMIT_MESSAGE.substring(1,25) : GIT_COMMIT_MESSAGE + BUILD_SUBJECT = "#${GIT_COMMIT_REV}: ${GIT_COMMIT_MESSAGE}..." + buildBadge.setSubject(BUILD_SUBJECT) + + + echoInfo "Started ODS AMI build number '${BUILD_NUMBER}' for branch: '${ODS_BRANCH}'" + echoInfo "BUILD_FOLDER = '${BUILD_FOLDER}'" + echoInfo "... logging some environment details" + sh 'whoami && pwd && ls -lart' + sh 'echo $PATH' + sh 'git version' + + echoInfo "... create temp build folder" + sh 'mkdir -p ${BUILD_FOLDER}' + echoInfo "... cloning ods-core && checkout branch ${ODS_BRANCH}" + sh 'cd ${BUILD_FOLDER} && git clone https://github.com/opendevstack/ods-core.git && cd ods-core && git checkout ${ODS_BRANCH}' + + echoInfo "... preparing temp build folder" + withCredentials([sshUserPrivateKey(credentialsId: "edp_slave_openshift_pub", keyFileVariable: 'ssh_public_key'), + sshUserPrivateKey(credentialsId: "edp_slave_openshift_202205", keyFileVariable: 'ssh_private_key')]) { + sh 'cp ${ssh_public_key} ${BUILD_FOLDER}/jenkins.pub' + sh 'cp ${ssh_private_key} ${BUILD_FOLDER}/jenkins.priv' + } + + withCredentials([usernamePassword(credentialsId: 'registry_credentials', usernameVariable: 'registry_user', passwordVariable: 'registry_token')]) { + withCredentials([[ $class: 'AmazonWebServicesCredentialsBinding', credentialsId: 'awsID', accessKeyVariable: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + // This is safe, client and secret will be masked in the output + // echoInfo "aws client = $AWS_ACCESS_KEY_ID" + // echoInfo "aws secret = $AWS_SECRET_ACCESS_KEY" + echoInfo "... running packer build" + // sh 'export LOG_PATH=. && export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=400 && export AWS_POLL_DELAY_SECONDS=15 && cd ${BUILD_FOLDER} && pwd && ls -lart && date && log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" --instance-type ${instance_type}' + sh ''' + set -x + export LOG_PATH=. && export log_path=. && export build_path="${HOME}/opendevstack/builds" && \ + export build_result_path="${HOME}/opendevstack/packer_build_result" && \ + export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=180 && export AWS_POLL_DELAY_SECONDS=60 && \ + cd ${BUILD_FOLDER} && pwd && ls -lart && date && \ + log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && \ + + cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami \ + --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" \ + --instance-type "${instance_type}" --build-folder "${BUILD_FOLDER}" \ + --pub-key "${BUILD_FOLDER}/jenkins.pub" --priv-key "${BUILD_FOLDER}/jenkins.priv" + ''' + } + } + + echoInfo "...done!" + + buildBadge.setStatus("passing") + + } catch (Exception err) { + buildBadge.setStatus("failing") + + echoError "error: " + err + throw err + } + } + } + } + + } +} + +def echoInfo(msg){ + echo "\033[32m ${msg} \033[0m" +} + +def echoError(msg){ + echo "\033[31m ${msg} \033[0m" +} diff --git a/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_experimental b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_experimental new file mode 100644 index 000000000..a9e285afd --- /dev/null +++ b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_experimental @@ -0,0 +1,105 @@ +def buildBadge = addEmbeddableBadgeConfiguration(id: "odsbuild", status: "started") + +pipeline { + + environment { + ODS_BRANCH = "experimental" + JOB_NAME = "ods-build-experimental-branch" + BUILD_FOLDER = "${WORKSPACE_TMP}/${BUILD_NUMBER}" + instance_type="m5ad.4xlarge" + } + + agent { + node('buildbot') + } + + + options { + ansiColor('xterm') + } + + + stages { + stage('ODS BuildBot: running AMI build!') { + + steps { + + script { + + try { + echoInfo "Setting build badge status!" + buildBadge.setStatus("running") + + GIT_COMMIT_REV = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .sha").trim() + GIT_COMMIT_REV = GIT_COMMIT_REV.length()>8 ? GIT_COMMIT_REV.substring(1,8) : GIT_COMMIT_REV + GIT_COMMIT_MESSAGE = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .commit.message").trim() + GIT_COMMIT_MESSAGE = GIT_COMMIT_MESSAGE.length()>25 ? GIT_COMMIT_MESSAGE.substring(1,25) : GIT_COMMIT_MESSAGE + BUILD_SUBJECT = "#${GIT_COMMIT_REV}: ${GIT_COMMIT_MESSAGE}..." + buildBadge.setSubject(BUILD_SUBJECT) + + + echoInfo "Started ODS AMI build number '${BUILD_NUMBER}' for branch: '${ODS_BRANCH}'" + echoInfo "BUILD_FOLDER = '${BUILD_FOLDER}'" + echoInfo "... logging some environment details" + sh 'whoami && pwd && ls -lart' + sh 'echo $PATH' + sh 'git version' + + echoInfo "... create temp build folder" + sh 'mkdir -p ${BUILD_FOLDER}' + echoInfo "... cloning ods-core && checkout branch ${ODS_BRANCH}" + sh 'cd ${BUILD_FOLDER} && git clone https://github.com/opendevstack/ods-core.git && cd ods-core && git checkout ${ODS_BRANCH}' + + echoInfo "... preparing temp build folder" + withCredentials([sshUserPrivateKey(credentialsId: "edp_slave_openshift_pub", keyFileVariable: 'ssh_public_key'), + sshUserPrivateKey(credentialsId: "edp_slave_openshift_202205", keyFileVariable: 'ssh_private_key')]) { + sh 'cp ${ssh_public_key} ${BUILD_FOLDER}/jenkins.pub' + sh 'cp ${ssh_private_key} ${BUILD_FOLDER}/jenkins.priv' + } + + withCredentials([usernamePassword(credentialsId: 'registry_credentials', usernameVariable: 'registry_user', passwordVariable: 'registry_token')]) { + withCredentials([[ $class: 'AmazonWebServicesCredentialsBinding', credentialsId: 'awsID', accessKeyVariable: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + // This is safe, client and secret will be masked in the output + // echoInfo "aws client = $AWS_ACCESS_KEY_ID" + // echoInfo "aws secret = $AWS_SECRET_ACCESS_KEY" + echoInfo "... running packer build" + // sh 'export LOG_PATH=. && export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=400 && export AWS_POLL_DELAY_SECONDS=15 && cd ${BUILD_FOLDER} && pwd && ls -lart && date && log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" --instance-type ${instance_type}' + sh ''' + set -x + export LOG_PATH=. && export log_path=. && export build_path="${HOME}/opendevstack/builds" && \ + export build_result_path="${HOME}/opendevstack/packer_build_result" && \ + export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=180 && export AWS_POLL_DELAY_SECONDS=60 && \ + cd ${BUILD_FOLDER} && pwd && ls -lart && date && \ + log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && \ + cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami \ + --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" \ + --instance-type "${instance_type}" --build-folder "${BUILD_FOLDER}" \ + --pub-key "${BUILD_FOLDER}/jenkins.pub" --priv-key "${BUILD_FOLDER}/jenkins.priv" + ''' + } + } + + echoInfo "...done!" + + buildBadge.setStatus("passing") + + } catch (Exception err) { + buildBadge.setStatus("failing") + + echoError "error: " + err + throw err + } + } + } + } + + } +} + +def echoInfo(msg){ + echo "\033[32m ${msg} \033[0m" +} + +def echoError(msg){ + echo "\033[31m ${msg} \033[0m" +} diff --git a/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_master b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_master new file mode 100644 index 000000000..9d826e50c --- /dev/null +++ b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_master @@ -0,0 +1,105 @@ +def buildBadge = addEmbeddableBadgeConfiguration(id: "odsbuild", status: "started") + +pipeline { + + environment { + ODS_BRANCH = "master" + JOB_NAME = "ods-build-master-branch" + BUILD_FOLDER = "${WORKSPACE_TMP}/${BUILD_NUMBER}" + instance_type="m5ad.4xlarge" + } + + agent { + node('buildbot') + } + + + options { + ansiColor('xterm') + } + + + stages { + stage('ODS BuildBot: running AMI build!') { + + steps { + + script { + + try { + echoInfo "Setting build badge status!" + buildBadge.setStatus("running") + + GIT_COMMIT_REV = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .sha").trim() + GIT_COMMIT_REV = GIT_COMMIT_REV.length()>8 ? GIT_COMMIT_REV.substring(1,8) : GIT_COMMIT_REV + GIT_COMMIT_MESSAGE = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .commit.message").trim() + GIT_COMMIT_MESSAGE = GIT_COMMIT_MESSAGE.length()>25 ? GIT_COMMIT_MESSAGE.substring(1,25) : GIT_COMMIT_MESSAGE + BUILD_SUBJECT = "#${GIT_COMMIT_REV}: ${GIT_COMMIT_MESSAGE}..." + buildBadge.setSubject(BUILD_SUBJECT) + + + echoInfo "Started ODS AMI build number '${BUILD_NUMBER}' for branch: '${ODS_BRANCH}'" + echoInfo "BUILD_FOLDER = '${BUILD_FOLDER}'" + echoInfo "... logging some environment details" + sh 'whoami && pwd && ls -lart' + sh 'echo $PATH' + sh 'git version' + + echoInfo "... create temp build folder" + sh 'mkdir -p ${BUILD_FOLDER}' + echoInfo "... cloning ods-core && checkout branch ${ODS_BRANCH}" + sh 'cd ${BUILD_FOLDER} && git clone https://github.com/opendevstack/ods-core.git && cd ods-core && git checkout ${ODS_BRANCH}' + + echoInfo "... preparing temp build folder" + withCredentials([sshUserPrivateKey(credentialsId: "edp_slave_openshift_pub", keyFileVariable: 'ssh_public_key'), + sshUserPrivateKey(credentialsId: "edp_slave_openshift_202205", keyFileVariable: 'ssh_private_key')]) { + sh 'cp ${ssh_public_key} ${BUILD_FOLDER}/jenkins.pub' + sh 'cp ${ssh_private_key} ${BUILD_FOLDER}/jenkins.priv' + } + + withCredentials([usernamePassword(credentialsId: 'registry_credentials', usernameVariable: 'registry_user', passwordVariable: 'registry_token')]) { + withCredentials([[ $class: 'AmazonWebServicesCredentialsBinding', credentialsId: 'awsID', accessKeyVariable: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + // This is safe, client and secret will be masked in the output + // echoInfo "aws client = $AWS_ACCESS_KEY_ID" + // echoInfo "aws secret = $AWS_SECRET_ACCESS_KEY" + echoInfo "... running packer build" + // sh 'export LOG_PATH=. && export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=400 && export AWS_POLL_DELAY_SECONDS=15 && cd ${BUILD_FOLDER} && pwd && ls -lart && date && log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" --instance-type ${instance_type}' + sh ''' + set -x + export LOG_PATH=. && export log_path=. && export build_path="${HOME}/opendevstack/builds" && \ + export build_result_path="${HOME}/opendevstack/packer_build_result" && \ + export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=180 && export AWS_POLL_DELAY_SECONDS=60 && \ + cd ${BUILD_FOLDER} && pwd && ls -lart && date && \ + log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && \ + cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami \ + --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" \ + --instance-type "${instance_type}" --build-folder "${BUILD_FOLDER}" \ + --pub-key "${BUILD_FOLDER}/jenkins.pub" --priv-key "${BUILD_FOLDER}/jenkins.priv" + ''' + } + } + + echoInfo "...done!" + + buildBadge.setStatus("passing") + + } catch (Exception err) { + buildBadge.setStatus("failing") + + echoError "error: " + err + throw err + } + } + } + } + + } +} + +def echoInfo(msg){ + echo "\033[32m ${msg} \033[0m" +} + +def echoError(msg){ + echo "\033[31m ${msg} \033[0m" +} diff --git a/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_ods_devenv b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_ods_devenv new file mode 100644 index 000000000..37719e969 --- /dev/null +++ b/ods-devenv/packer/jenkins/JenkinsFile_createOdsBox_ods_devenv @@ -0,0 +1,105 @@ +def buildBadge = addEmbeddableBadgeConfiguration(id: "odsbuild", status: "started") + +pipeline { + + environment { + ODS_BRANCH = "feature/ods-devenv" + JOB_NAME = "ods-build-ods-devenv-branch" + BUILD_FOLDER = "${WORKSPACE_TMP}/${BUILD_NUMBER}" + instance_type="m5ad.4xlarge" + } + + agent { + node('buildbot') + } + + + options { + ansiColor('xterm') + } + + + stages { + stage('ODS BuildBot: running AMI build!') { + + steps { + + script { + + try { + echoInfo "Setting build badge status!" + buildBadge.setStatus("running") + + GIT_COMMIT_REV = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .sha").trim() + GIT_COMMIT_REV = GIT_COMMIT_REV.length()>8 ? GIT_COMMIT_REV.substring(1,8) : GIT_COMMIT_REV + GIT_COMMIT_MESSAGE = sh(returnStdout: true, script: "curl -sSL https://api.github.com/repos/opendevstack/ods-core/commits/${ODS_BRANCH} | jq .commit.message").trim() + GIT_COMMIT_MESSAGE = GIT_COMMIT_MESSAGE.length()>25 ? GIT_COMMIT_MESSAGE.substring(1,25) : GIT_COMMIT_MESSAGE + BUILD_SUBJECT = "#${GIT_COMMIT_REV}: ${GIT_COMMIT_MESSAGE}..." + buildBadge.setSubject(BUILD_SUBJECT) + + + echoInfo "Started ODS AMI build number '${BUILD_NUMBER}' for branch: '${ODS_BRANCH}'" + echoInfo "BUILD_FOLDER = '${BUILD_FOLDER}'" + echoInfo "... logging some environment details" + sh 'whoami && pwd && ls -lart' + sh 'echo $PATH' + sh 'git version' + + echoInfo "... create temp build folder" + sh 'mkdir -p ${BUILD_FOLDER}' + echoInfo "... cloning ods-core && checkout branch ${ODS_BRANCH}" + sh 'cd ${BUILD_FOLDER} && git clone https://github.com/opendevstack/ods-core.git && cd ods-core && git checkout ${ODS_BRANCH}' + + echoInfo "... preparing temp build folder" + withCredentials([sshUserPrivateKey(credentialsId: "edp_slave_openshift_pub", keyFileVariable: 'ssh_public_key'), + sshUserPrivateKey(credentialsId: "edp_slave_openshift_202205", keyFileVariable: 'ssh_private_key')]) { + sh 'cp ${ssh_public_key} ${BUILD_FOLDER}/jenkins.pub' + sh 'cp ${ssh_private_key} ${BUILD_FOLDER}/jenkins.priv' + } + + withCredentials([usernamePassword(credentialsId: 'registry_credentials', usernameVariable: 'registry_user', passwordVariable: 'registry_token')]) { + withCredentials([[ $class: 'AmazonWebServicesCredentialsBinding', credentialsId: 'awsID', accessKeyVariable: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + // This is safe, client and secret will be masked in the output + // echoInfo "aws client = $AWS_ACCESS_KEY_ID" + // echoInfo "aws secret = $AWS_SECRET_ACCESS_KEY" + echoInfo "... running packer build" + // sh 'export LOG_PATH=. && export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=400 && export AWS_POLL_DELAY_SECONDS=15 && cd ${BUILD_FOLDER} && pwd && ls -lart && date && log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" --instance-type ${instance_type}' + sh ''' + set -x + export LOG_PATH=. && export log_path=. && export build_path="${HOME}/opendevstack/builds" && \ + export build_result_path="${HOME}/opendevstack/packer_build_result" && \ + export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=180 && export AWS_POLL_DELAY_SECONDS=60 && \ + cd ${BUILD_FOLDER} && pwd && ls -lart && date && \ + log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && \ + cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami \ + --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" \ + --instance-type "${instance_type}" --build-folder "${BUILD_FOLDER}" \ + --pub-key "${BUILD_FOLDER}/jenkins.pub" --priv-key "${BUILD_FOLDER}/jenkins.priv" + ''' + } + } + + echoInfo "...done!" + + buildBadge.setStatus("passing") + + } catch (Exception err) { + buildBadge.setStatus("failing") + + echoError "error: " + err + throw err + } + } + } + } + + } +} + +def echoInfo(msg){ + echo "\033[32m ${msg} \033[0m" +} + +def echoError(msg){ + echo "\033[31m ${msg} \033[0m" +} diff --git a/ods-devenv/packer/jenkins/Jenkinsfile_createBuildBot b/ods-devenv/packer/jenkins/Jenkinsfile_createBuildBot new file mode 100644 index 000000000..daf1e35f3 --- /dev/null +++ b/ods-devenv/packer/jenkins/Jenkinsfile_createBuildBot @@ -0,0 +1,121 @@ +def buildBadge = addEmbeddableBadgeConfiguration(id: "odsbuild", status: "started") + +pipeline { + + parameters { + string(name: 'ODS_BRANCH', defaultValue: 'experimental', description: "ods-core branch to checkout.") + } + + environment { + // ODS_BRANCH = "experimental" + JOB_NAME = "ods-build-${ODS_BRANCH}-branch" + BUILD_FOLDER = "${WORKSPACE_TMP}/${BUILD_NUMBER}" + instance_type = "t2.micro" + } + + agent { + node('edpBox') + } + + + options { + ansiColor('xterm') + } + + + stages { + stage('ODS BuildBot: running AMI build!') { + steps { + script { + try { + echoInfo "Setting build badge status!" + buildBadge.setStatus("running") + + echoInfo "Started ODS AMI build number '${BUILD_NUMBER}' for branch: '${ODS_BRANCH}'" + echoInfo "BUILD_FOLDER = '${BUILD_FOLDER}'" + echoInfo "... logging some environment details" + sh 'whoami && pwd && ls -lart' + sh 'echo $PATH' + sh 'git version' + + echoInfo "... create temp build folder" + sh 'mkdir -p ${BUILD_FOLDER}' + echoInfo "... cloning ods-core && checkout branch ${ODS_BRANCH}" + sh 'cd ${BUILD_FOLDER} && git clone https://github.com/opendevstack/ods-core.git && cd ods-core && git checkout ${ODS_BRANCH}' + + echoInfo "... preparing temp build folder" + withCredentials([sshUserPrivateKey(credentialsId: "edp_slave_openshift_pub", keyFileVariable: 'ssh_public_key'), + sshUserPrivateKey(credentialsId: "edp_slave_openshift_202205", keyFileVariable: 'ssh_private_key')]) { + sh 'cp ${ssh_public_key} ${BUILD_FOLDER}/jenkins.pub' + sh 'cp ${ssh_private_key} ${BUILD_FOLDER}/jenkins.priv' + } + + withCredentials([usernamePassword(credentialsId: 'registry_credentials', usernameVariable: 'registry_user', passwordVariable: 'registry_token')]) { + withCredentials([[ $class: 'AmazonWebServicesCredentialsBinding', credentialsId: 'awsID', accessKeyVariable: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + // This is safe, client and secret will be masked in the output + // echoInfo "aws client = $AWS_ACCESS_KEY_ID" + // echoInfo "aws secret = $AWS_SECRET_ACCESS_KEY" + echoInfo "... running packer build" + + // For building BOXES (Initial version): + // sh 'export LOG_PATH=. && export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=400 && export AWS_POLL_DELAY_SECONDS=15 \ + // && cd ${BUILD_FOLDER} && pwd && ls -lart && date && log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" \ + // && cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami \ + // --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" --instance-type ${instance_type}' + + // For building BOXES (Current version): + // set -x + // export LOG_PATH=. && export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=180 && export AWS_POLL_DELAY_SECONDS=60 && \ + // cd ${BUILD_FOLDER} && pwd && ls -lart && source ${BUILD_FOLDER}/.buildbotrc && date && \ + // log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && \ + // cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami \ + // --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" \ + // --instance-type ${instance_type} --pub-key ${BUILD_FOLDER}/jenkins.pub --priv-key ${BUILD_FOLDER}/jenkins.priv + + // For building buildBots (taken from documentation): + // export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=400 && export AWS_POLL_DELAY_SECONDS=15 && \ + // source ~/opendevstack/ods-core/ods-devenv/buildbot/scripts/.buildbotrc && date && cd ~/opendevstack/ods-core && \ + // time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_ods_box_ami --aws-access-key "${aws_access_key}" \ + // --aws-secret-key "${aws_secret_access_key}" --ods-branch "${branch}" --instance-type ${instance_type} \ + // | tr "/" "_")_$(date +\%Y\%m\%dT\%H\%M\%S).log" + + // Now the current version for building buildBots: + sh ''' + set -x + export LOG_PATH=. && export PACKER_LOG=1 && export AWS_MAX_ATTEMPTS=180 && export AWS_POLL_DELAY_SECONDS=60 && \ + export log_path="${HOME}/logs" && export build_path="${HOME}/opendevstack/builds" && + export build_result_path="${HOME}/opendevstack/packer_build_result" && + export log_file="${LOG_PATH}/build_$(echo "${ODS_BRANCH}" | tr "/" "_")_$(date +%Y%m%dT%H%M%S).log" && \ + export branch="${ODS_BRANCH}" && export instance_type="t2.micro" && + date && \ + cd ods-core && time bash 2>&1 ods-devenv/packer/create_ods_box_image.sh --target create_buildBot_ami \ + --aws-access-key "$AWS_ACCESS_KEY_ID" --aws-secret-key "$AWS_SECRET_ACCESS_KEY" --ods-branch "${ODS_BRANCH}" \ + --instance-type ${instance_type} --pub-key ${BUILD_FOLDER}/jenkins.pub --priv-key ${BUILD_FOLDER}/jenkins.priv + ''' + } + } + + echoInfo "...done!" + + buildBadge.setStatus("passing") + + } catch (Exception err) { + buildBadge.setStatus("failing") + + echoError "error: " + err + throw err + } + } + } + } + + } +} + +def echoInfo(msg){ + echo "\033[32m ${msg} \033[0m" +} + +def echoError(msg){ + echo "\033[31m ${msg} \033[0m" +} diff --git a/ods-devenv/scripts/create-base-ami-image.sh b/ods-devenv/scripts/create-base-ami-image.sh index 5d17444b6..de8f6f560 100755 --- a/ods-devenv/scripts/create-base-ami-image.sh +++ b/ods-devenv/scripts/create-base-ami-image.sh @@ -2,6 +2,16 @@ set -eu +echo " " +echo "Pre-Usage: " +echo "---------- " +echo "$ sudo yum -y install epel-release " +echo "$ sudo yum -y install curl jq" +echo "$ base64 --v ; jq --version ; curl --version " +echo "$ curl -sSL "https://api.github.com/repos/opendevstack/ods-core/contents/ods-devenv/scripts/create-base-ami-image.sh?ref=experimental" | jq -r ".content" | base64 --decode > create-base-ami-image.sh " +echo "$ chmod +x create-base-ami-image.sh " +echo "$ ./create-base-ami-image.sh [buildBot] " +echo " " echo "This script is in charge of configuring the base AMI image we use." read -p "Continue (y/n) ? " yn if [ -z "$yn" ] || [ "y" != "$yn" ]; then @@ -10,20 +20,12 @@ if [ -z "$yn" ] || [ "y" != "$yn" ]; then fi function general_configuration() { - sudo yum update -y - sudo yum install -y yum-utils epel-release https://repo.ius.io/ius-release-el7.rpm - sudo yum -y install https://packages.endpointdev.com/rhel/7/os/x86_64/endpoint-repo.x86_64.rpm - sudo yum -y install git gitk iproute lsof tigervnc-server remmina firewalld git2u-all glances golang jq tree htop etckeeper - - curl -LO https://dl.google.com/linux/chrome/rpm/stable/x86_64/google-chrome-stable-94.0.4606.81-1.x86_64.rpm - sudo yum install -y google-chrome-stable-94.0.4606.81-1.x86_64.rpm - rm -f google-chrome-stable-94.0.4606.81-1.x86_64.rpm - - sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo - sudo yum -y install docker-ce-3:19.03.14-3.el7.x86_64 - sudo yum install -y centos-release-openshift-origin311 - sudo yum install -y origin-clients - sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm + sudo yum update -y || true + sudo yum install -y yum-utils epel-release https://repo.ius.io/ius-release-el7.rpm || true + sudo yum -y install https://packages.endpointdev.com/rhel/7/os/x86_64/endpoint-repo.x86_64.rpm || true + sudo yum -y install git iproute lsof git2u-all glances golang jq tree unzip || true + sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo || true + sudo yum -y install docker-ce-3:19.03.14-3.el7.x86_64 || true sudo sed -i "s@.*PasswordAuthentication\ .*@PasswordAuthentication yes@g" /etc/ssh/sshd_config sudo sed -i "s@.*ChallengeResponseAuthentication\ .*@ChallengeResponseAuthentication yes@g" /etc/ssh/sshd_config @@ -32,25 +34,14 @@ function general_configuration() { sudo systemctl restart sshd sudo systemctl status sshd - sudo adduser openshift - echo -e "openshift\nopenshift" | sudo passwd openshift - sudo usermod -a -G wheel openshift - sudo sed -i 's/%wheel\s*ALL=(ALL)\s*ALL/%wheel ALL=(ALL) NOPASSWD: ALL/g' /etc/sudoers - - sudo usermod -a -G docker openshift - - # etckeeper - if [ ! -d /etc/.git ]; then - cd /etc/ - sudo etckeeper init - sudo etckeeper commit -m "Initial commit" - else - echo "WARNING: git repository in etc folder has been created before." + # Packer + sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo || true + if [ -f "/etc/yum.repos.d/hashicorp.repo" ]; then + echo "Disable hashicorp yum repo by default." + sudo sed -i 's@^\s*enabled\s*=.*$@enabled = 0@g' /etc/yum.repos.d/hashicorp.repo + grep -i 'enabled' /etc/yum.repos.d/hashicorp.repo fi - - # GUI: - sudo yum groupinstall -y "MATE Desktop" || echo "ERROR: Could not install mate desktop" - sudo yum groups -y install "GNOME Desktop" || echo "ERROR: Could not install gnome desktop" + sudo yum install -y --enablerepo hashicorp packer || true # JDK rm -fv /tmp/adoptopenjdk.repo || echo "ERROR: Could not remove file /tmp/adoptopenjdk.repo " @@ -61,12 +52,77 @@ function general_configuration() { echo "gpgcheck=1" >> /tmp/adoptopenjdk.repo echo "gpgkey=https://adoptopenjdk.jfrog.io/adoptopenjdk/api/gpg/key/public" >> /tmp/adoptopenjdk.repo - sudo mv /tmp/adoptopenjdk.repo /etc/yum.repos.d/adoptopenjdk.repo + sudo rm -fv /etc/yum.repos.d/adoptopenjdk.repo + sudo mv -vf /tmp/adoptopenjdk.repo /etc/yum.repos.d/adoptopenjdk.repo - sudo yum -y install adoptopenjdk-8-hotspot adoptopenjdk-11-hotspot adoptopenjdk-8-hotspot-jre adoptopenjdk-11-hotspot-jre + # No more in use: adoptopenjdk-8-hotspot adoptopenjdk-8-hotspot-jre + sudo yum -y install adoptopenjdk-11-hotspot adoptopenjdk-11-hotspot-jre || true sudo yum -y remove java-1.7.0-openjdk java-1.7.0-openjdk-headless \ java-1.8.0-openjdk.x86_64 java-1.8.0-openjdk-headless.x86_64 \ java-11-openjdk.x86_64 java-11-openjdk-headless.x86_64 || true + yum list installed | grep -i '\(openjdk\|jdk\|java\)' + + # Install + curl -sSL --retry 5 --retry-delay 5 --retry-max-time 300 "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + sudo rm -fr awscliv2.zip ./aws/install +} + +function permissions_fixes() { + local PENDING="false" + grep -i openshift /etc/passwd || PENDING="true" + if [ "true" == "${PENDING}" ] ; then + sudo adduser openshift + echo -e "openshift\nopenshift" | sudo passwd openshift + fi + + PENDING="false" + grep -i openshift /etc/group | grep -i wheel || PENDING="true" + if [ "true" == "${PENDING}" ] ; then + sudo usermod -a -G wheel openshift + fi + + PENDING="false" + sudo grep -v "^\s*#" /etc/sudoers | grep -i "wheel" | grep -i "nopasswd" || PENDING="true" + if [ "true" == "${PENDING}" ] ; then + sudo sed -i 's/%wheel\s*ALL=(ALL)\s*ALL/%wheel ALL=(ALL) NOPASSWD: ALL/g' /etc/sudoers + fi + + PENDING="false" + grep -i openshift /etc/group | grep -i docker || PENDING="true" + if [ "true" == "${PENDING}" ] ; then + sudo usermod -a -G docker openshift + fi +} + +function configuration_extras() { + + # Tools not needed in buildBot + sudo yum -y install tigervnc-server remmina firewalld gitk htop etckeeper + + # etckeeper + if [ ! -d /etc/.git ]; then + cd /etc/ + sudo etckeeper init + sudo etckeeper commit -m "Initial commit" + else + echo "WARNING: git repository in etc folder has been created before." + fi + + # OCP 3 + sudo yum install -y centos-release-openshift-origin311 + sudo yum install -y origin-clients + sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm + + # GUI: + sudo yum groupinstall -y "MATE Desktop" || echo "ERROR: Could not install mate desktop" + sudo yum groups -y install "GNOME Desktop" || echo "ERROR: Could not install gnome desktop" + + # Google Chrome + curl -LO https://dl.google.com/linux/chrome/rpm/stable/x86_64/google-chrome-stable-94.0.4606.81-1.x86_64.rpm + sudo yum install -y google-chrome-stable-94.0.4606.81-1.x86_64.rpm + rm -f google-chrome-stable-94.0.4606.81-1.x86_64.rpm } @@ -135,6 +191,7 @@ function setup_xrdp() { function fix_locales() { + sudo rm -fv /etc/profile.d/sh.local || true echo ' ' | sudo tee -a /etc/profile.d/sh.local echo 'export LC_ALL="en_US.UTF-8"' | sudo tee -a /etc/profile.d/sh.local echo 'export LC_CTYPE="en_US.UTF-8"' | sudo tee -a /etc/profile.d/sh.local @@ -142,13 +199,22 @@ function fix_locales() { echo 'export LANG="en_US.UTF-8"' | sudo tee -a /etc/profile.d/sh.local cd /etc - sudo git add . - sudo git commit -a -m "Configured centos locales." + if [ -d ".git" ]; then + sudo git add . + sudo git commit -a -m "Configured centos locales." + fi } general_configuration -setup_xrdp +permissions_fixes + +if [ -z "${1}" ] || [ "" == "${1}" ]; then + # No need for this ones if creating a buildBot ... + configuration_extras + setup_xrdp +fi + fix_locales echo " " diff --git a/ods-devenv/scripts/deploy.sh b/ods-devenv/scripts/deploy.sh index 3232bffd7..a82f3dc86 100755 --- a/ods-devenv/scripts/deploy.sh +++ b/ods-devenv/scripts/deploy.sh @@ -73,6 +73,8 @@ function display_usage() { echo "${ME} --branch feature/ods-devenv --target install_docker" echo "${ME} --branch feature/ods-devenv --target startup_atlassian_bitbucket" echo "${ME} --branch task/upgrade-atlassian-stack --target atlassian_stack_reset" + echo "${ME} --target restart_ods # Does a full restart of ods services. Its cost is really high." + echo "${ME} --target check_ods_status # This ones restarts the service not working as expected." echo echo "Since several of the functions will require that other functions have prepared the system first," echo "the script provides utility functions like basic_vm_setup which will call functions in this" @@ -90,6 +92,10 @@ function configure_sshd_server() { sleep 5 echo "Showing sshd_config important settings: " sudo cat /etc/ssh/sshd_config | grep -v '\(^\s*#.*$\|^\s*$\)' + echo "Configure ssh client (used for git, etc)... " + sudo sed -i 's|^\ *\# \+IdentityFile\ \+\(.*\)|IdentityFile \1|g' /etc/ssh/ssh_config + grep -i 'IdentityFile' /etc/ssh/ssh_config + echo " " } function configure_sshd_openshift_keys() { @@ -133,6 +139,7 @@ function check_system_setup() { echo "alias startup_ods='/home/openshift/opendevstack/ods-core/ods-devenv/scripts/deploy.sh --target startup_ods'" echo "alias stop_ods='/home/openshift/opendevstack/ods-core/ods-devenv/scripts/deploy.sh --target stop_ods'" echo "alias restart_atlassian_suite='/home/openshift/opendevstack/ods-core/ods-devenv/scripts/deploy.sh --target restart_atlassian_suite'" + echo "alias restart_ods='/home/openshift/opendevstack/ods-core/ods-devenv/scripts/deploy.sh --target restart_ods'" } >> ~/.bashrc # suppress sudo timeout @@ -202,10 +209,24 @@ function check_system_setup() { # None ####################################### function setup_dnsmasq() { - echo "Setting up dnsmasq DNS service" + echo "setup_dnsmasq: Setting up dnsmasq DNS service" local dnsmasq_conf_path dnsmasq_conf_path="/etc/dnsmasq.conf" + if ! >/dev/null command -v dnsmasq + then + local already_installed="y" + sudo yum list installed 2>&1 | grep -iq 'dnsmasq' || already_installed="n" + if [ "y" != "$already_installed" ] ; then + sudo yum install -y dnsmasq + else + echo "Not installing dnsmasq because already installed." + fi + fi + + sudo systemctl stop dnsmasq || echo "WARNING: Could not stop service dnsmasq !!!" + sleep 10 + # tear down old running dnsmasq instances local job_id for job_id in $(ps -ef | grep dnsmasq | grep -v grep | grep -v setup_dnsmasq | awk -v col=2 '{print $2}') @@ -213,17 +234,12 @@ function setup_dnsmasq() { sudo kill -9 "${job_id}" || true done - if ! >/dev/null command -v dnsmasq - then - sudo yum install -y dnsmasq - fi - sudo systemctl start dnsmasq sleep 10 if ! sudo systemctl status dnsmasq | grep -q active then echo "dnsmasq startup appears to have failed." - exit + exit 1 else echo "dnsmasq service up and running" fi @@ -231,9 +247,9 @@ function setup_dnsmasq() { # if script runs for the 2nd time on a machine, backup dnsmasq.conf from orig if [[ -f "${dnsmasq_conf_path}.orig" ]] then - sudo cp "${dnsmasq_conf_path}.orig" "${dnsmasq_conf_path}" + sudo cp -vf "${dnsmasq_conf_path}.orig" "${dnsmasq_conf_path}" else - sudo cp "${dnsmasq_conf_path}" "${dnsmasq_conf_path}.orig" + sudo cp -vf "${dnsmasq_conf_path}" "${dnsmasq_conf_path}.orig" fi sudo sed -i "s|#domain-needed|domain-needed|" "${dnsmasq_conf_path}" @@ -255,10 +271,29 @@ function setup_dnsmasq() { # dnsmasq logs on stderr (?!) if ! 2>&1 dnsmasq --test | grep -q "dnsmasq: syntax check OK." then + echo " " echo "dnsmasq configuration failed. Please check ${dnsmasq_conf_path} and compare with ${dnsmasq_conf_path}.orig" + echo " " + sleep 2 + echo "File ${dnsmasq_conf_path}: " + echo " " + cat ${dnsmasq_conf_path} + echo " " + echo "File ${dnsmasq_conf_path}.orig: " + echo " " + cat ${dnsmasq_conf_path}.orig + echo " " + echo " " + sleep 2 + diff ${dnsmasq_conf_path} ${dnsmasq_conf_path}.orig || true + echo " " + echo " " + sleep 10 + # return 1 else echo "dnsmasq is ok with configuration changes." fi + echo " " sudo chattr -i /etc/resolv.conf @@ -468,6 +503,11 @@ function install_packages_yum_utils_epel_release() { function setup_rdp() { install_packages_yum_utils_epel_release + if ! sudo yum list installed 2>&1 | grep -iq xrdp ; then + sudo yum -y install xrdp || true + else + echo "Not installing xrdp because it was installed before." + fi sudo systemctl start xrdp || sudo systemctl status xrdp || echo "Error starting xrdp service..." sudo netstat -antup | grep xrdp || echo "Error checking if xrdp ports are listening for connections..." sudo systemctl enable xrdp || echo "No need to enable xrdp service in systemctl. " @@ -563,9 +603,22 @@ function startup_openshift_cluster() { echo "oc cluster up ..." oc cluster up --base-dir="${cluster_dir}" --insecure-skip-tls-verify=true --routing-suffix "ocp.odsbox.lan" --public-hostname "ocp.odsbox.lan" # Only if something fails, please... --loglevel=5 --server-loglevel=5 + if [ 0 -ne $? ]; then + echo "ERROR: Could not start oc cluster (oc cluster up)" + echo " " + exit 1 + fi + echo " " echo "Log into oc cluster with system:admin" oc login -u system:admin + if [ 0 -ne $? ]; then + echo "ERROR: Could not log into cluster with system:admin" + echo " " + exit 1 + fi + + wait_until_ocp_is_up || exit 1 } ####################################### @@ -685,14 +738,53 @@ function print_system_setup() { ###### function atlassian_stack_reset() { - echo "atlassian_stack_reset: " - echo "IMPORTANT: remove from /etc/hosts lines with references to jira and bitbucket before run this method" + echo " " + echo "atlassian_stack_reset: this functionality removes everything saved in mysql for jira, bitbucket and crowd," + echo "atlassian_stack_reset: and reloads the basic information they need to work." + echo "atlassian_stack_reset: IMPORTANT: Before running this method it is recommended to remove from /etc/hosts " + echo " all lines with references to mysql, jira, bitbucket and crowd " + echo " " + read -p "continue? y/n " yn + if [ "y" != "$yn" ] && [ "Y" != "$yn" ]; then + echo "Aborted by user request." + exit 0 + fi - docker ps -a | grep -i "\(jira\|atlass\|bitbucket\)" | sed 's@[[:space:]]\+@ @g' | cut -d' ' -f1 | while read -r container_id ; - do - docker stop $container_id - docker rm $container_id - done + # Previously filtering by \(jira\|atlass\|bitbucket\) + # for container_name in ${atlassian_mysql_container_name} ; + # do + # docker ps -a | grep -i "${container_name}" + # docker ps -a | grep -i "${container_name}" | sed 's@[[:space:]]\+@ @g' | cut -d' ' -f1 | while read -r container_id ; + # do + # docker stop $container_id + # docker rm $container_id + # done + # done + + docker container stop "${atlassian_bitbucket_container_name}" || echo "Not found docker container ${atlassian_bitbucket_container_name}" + docker container stop "${atlassian_jira_container_name}" || echo "Not found docker container ${atlassian_jira_container_name}" + docker container stop "${atlassian_crowd_container_name}" || echo "Not found docker container ${atlassian_crowd_container_name}" + docker container stop "${atlassian_mysql_container_name}" || echo "Not found docker container ${atlassian_mysql_container_name}" + + docker container rm "${atlassian_bitbucket_container_name}" || echo "Not found docker container ${atlassian_bitbucket_container_name}" + docker container rm "${atlassian_jira_container_name}" || echo "Not found docker container ${atlassian_jira_container_name}" + docker container rm "${atlassian_crowd_container_name}" || echo "Not found docker container ${atlassian_crowd_container_name}" + docker container rm "${atlassian_mysql_container_name}" || echo "Not found docker container ${atlassian_mysql_container_name}" + + docker volume rm odsCrowdVolume || echo "Not found docker volume odsCrowdVolume " + rm -fR $HOME/jira_data ${HOME}/bitbucket_data ${HOME}/mysql_data || + sudo rm -fR $HOME/jira_data ${HOME}/bitbucket_data ${HOME}/mysql_data + + if [ -d $HOME/jira_data ] || [ -d ${HOME}/bitbucket_data ] || [ -d ${HOME}/mysql_data ]; then + echo "Could NOT remove folders $HOME/jira_data ${HOME}/bitbucket_data ${HOME}/mysql_data " + exit 1 + fi + + echo " " + echo "Now regenerating all pods needed for atlassian stack... " + echo " " + + prepare_atlassian_stack startup_and_follow_atlassian_mysql @@ -893,24 +985,48 @@ function fix_atlassian_mysql_loaded_data_checks() { ####################################### function startup_and_follow_atlassian_mysql() { startup_atlassian_mysql - echo -n "Waiting for mysqld to become available" - until [[ "$(docker inspect --format '{{.State.Health.Status}}' ${atlassian_mysql_container_name})" == 'healthy' ]] + follow_atlassian_mysql +} + +follow_atlassian_mysql() { + local retryMaxIn=${1:-120} + follow_container_health_status ${atlassian_mysql_container_name} ${retryMaxIn} +} + +follow_container_health_status() { + local container_name=${1} + local retryMaxIn=${2:-120} + local retryMax=$((retryMaxIn)) + local retryNum=0 + + echo "[STATUS CHECK] Testing if service in container ${container_name} is available (or waiting for it). Max retries: ${retryMax} " + echo -n "Working..." + until [[ "$(docker inspect --format '{{.State.Health.Status}}' ${container_name})" == 'healthy' ]] do + let retryNum+=1 + if [ ${retryMax} -le ${retryNum} ]; then + echo "[STATUS CHECK] ERROR: Maximum amount of retries reached looking for container ${container_name} to be ready: $retryNum / ${retryMax}" + sleep 1 + return 1 + fi + echo -n "." sleep 1 done - echo "mysqld up and running." + echo " " + echo "[STATUS CHECK] Service is available (healthy) in container ${container_name}" + echo " " + return 0 } function startup_and_follow_bitbucket() { startup_atlassian_bitbucket - echo -n "Waiting for bitbucket to become available" - until [[ "$(docker inspect --format '{{.State.Health.Status}}' ${atlassian_bitbucket_container_name})" == 'healthy' ]] - do - echo -n "." - sleep 1 - done - echo "bitbucket up and running." + follow_bitbucket +} + +function follow_bitbucket() { + local retryMaxIn=${1:-120} + follow_container_health_status ${atlassian_bitbucket_container_name} ${retryMaxIn} } function startup_and_follow_jira() { @@ -948,6 +1064,7 @@ function configure_jira2crowd() { echo "Configure Jira against Crowd directory ..." # login to Jira curl -sS 'http://172.17.0.1:18080/login.jsp' \ + --retry 10 --retry-delay 5 --retry-max-time 60 --max-time 120 \ -b "${cookie_jar_path}" \ -c "${cookie_jar_path}" \ --data 'os_username=openshift&os_password=openshift&os_destination=&user_role=&atl_token=&login=Log+In' \ @@ -961,7 +1078,8 @@ function configure_jira2crowd() { # docker logs --details jira || echo "Problem getting docker logs of jira container !! " login_page_fn="/tmp/login-page-`date +%Y%m%d_%H%M%S`.log" - curl -sS --insecure --location --connect-timeout 30 --max-time 120 --retry-delay 5 --retry 5 --verbose \ + curl -sS --insecure --location --connect-timeout 30 --verbose \ + --retry 10 --retry-delay 5 --retry-max-time 60 --max-time 120 \ 'http://172.17.0.1:18080/' -u "openshift:openshift" --output ${login_page_fn} if [ ! -f ${login_page_fn} ]; then echo "WARNING: File with login page (${login_page_fn}) is EMPTY or does NOT exist !!! " @@ -972,6 +1090,7 @@ function configure_jira2crowd() { echo "Retrieving Jira xsrf atl_token to file ${atl_token_fn} ..." curl -sS --connect-timeout 30 --max-time 120 --retry-delay 5 --retry 5 --verbose \ 'http://172.17.0.1:18080/plugins/servlet/embedded-crowd/configure/new/' \ + --retry 10 --retry-delay 5 --retry-max-time 60 --max-time 120 \ -u "openshift:openshift" \ -b "${cookie_jar_path}" \ -c "${cookie_jar_path}" \ @@ -1009,6 +1128,7 @@ function configure_jira2crowd() { # WebSudo authentication - sign in as admin curl -sS 'http://172.17.0.1:18080/secure/admin/WebSudoAuthenticate.jspa' \ + --retry 10 --retry-delay 5 --retry-max-time 60 --max-time 120 \ -b "${cookie_jar_path}" \ -c "${cookie_jar_path}" \ --data "webSudoPassword=openshift&webSudoDestination=%2Fsecure%2Fadmin%2FViewApplicationProperties.jspa&webSudoIsPost=false&atl_token=${atl_token}" \ @@ -1021,6 +1141,7 @@ function configure_jira2crowd() { echo "Assuming crowd service listens at ${crowd_service_name}:8095" local crowd_directory_id crowd_directory_id=$(curl -sS 'http://172.17.0.1:18080/plugins/servlet/embedded-crowd/configure/crowd/' \ + --retry 10 --retry-delay 5 --retry-max-time 60 --max-time 120 \ -b "${cookie_jar_path}" \ -c "${cookie_jar_path}" \ --data "name=Crowd+Server&crowdServerUrl=http%3A%2F%2F${crowd_service_name}%3A8095%2Fcrowd%2F&applicationName=jira&applicationPassword=openshift&httpTimeout=&httpMaxConnections=&httpProxyHost=&httpProxyPort=&httpProxyUsername=&httpProxyPassword=&crowdPermissionOption=READ_ONLY&_nestedGroupsEnabled=visible&incrementalSyncEnabled=true&_incrementalSyncEnabled=visible&groupSyncOnAuthMode=ALWAYS&crowdServerSynchroniseIntervalInMin=60&save=Save+and+Test&atl_token=${atl_token}&directoryId=0" \ @@ -1033,6 +1154,7 @@ function configure_jira2crowd() { # sync bitbucket with crowd directory curl -sS "http://172.17.0.1:18080/plugins/servlet/embedded-crowd/directories/sync?directoryId=${crowd_directory_id}&atl_token=${atl_token}" \ + --retry 10 --retry-delay 5 --retry-max-time 60 --max-time 120 \ -b "${cookie_jar_path}" \ -c "${cookie_jar_path}" \ --compressed \ @@ -1041,6 +1163,74 @@ function configure_jira2crowd() { rm "${cookie_jar_path}" } +# Not working as expected. To be removed. +function remove_jira_just_upgraded_message() { + FILES_PATH="${HOME}/tmp/remove_jira_just_upgraded_message" + rm -frv ${FILES_PATH} + mkdir -pv ${FILES_PATH} + + local cookie_jar_path="${FILES_PATH}/jira_cookie_jar.txt" + local errors_file="${FILES_PATH}/errors.txt" + local headers_file="${FILES_PATH}/headers.txt" + rm -fv ${errors_file} + + local jira_login_reply="${FILES_PATH}/q_jira_login_1.html" + rm -fv ${jira_login_reply} + echo "remove_jira_just_upgraded_message: Login to Jira: step 1" + curl -sSL --insecure 'http://172.17.0.1:18080/login.jsp' \ + -b "${cookie_jar_path}" \ + -c "${cookie_jar_path}" \ + --data 'os_username=openshift&os_password=openshift&os_destination=&user_role=&atl_token=&login=Log+In' \ + --compressed \ + --output ${jira_login_reply} --dump-header ${headers_file} --stderr ${errors_file} \ + || echo "Error in login step 2" | tee -a ${errors_file} + + echo "remove_jira_just_upgraded_message: Login to Jira: step 2" + local jira_login_page_fn="${FILES_PATH}/q_jira_login_2.html" + rm -fv ${jira_login_page_fn} + curl -sSL --insecure --connect-timeout 30 --max-time 120 --retry-delay 5 --retry 5 --verbose \ + 'http://172.17.0.1:18080/' -u "openshift:openshift" \ + --output ${jira_login_page_fn} --dump-header ${headers_file} --stderr ${errors_file} \ + || echo "Error in login step 2" | tee -a ${errors_file} + + echo "remove_jira_just_upgraded_message: PostUpgradeLandingPage" + local jira_postUpgradeLandingPage="${FILES_PATH}/q_jira_postUpgradeLandingPage.html" + rm -fv ${jira_postUpgradeLandingPage} + curl -sSL --insecure 'http://172.17.0.1:18080/secure/PostUpgradeLandingPage.jspa' \ + -b "${cookie_jar_path}" \ + -c "${cookie_jar_path}" \ + --compressed \ + --output ${jira_postUpgradeLandingPage} --dump-header ${headers_file} --stderr ${errors_file} \ + || echo "Error in jira_postUpgradeLandingPage" | tee -a ${errors_file} + # | pup --color + + echo "remove_jira_just_upgraded_message: Check 1." + local jira_check1="${FILES_PATH}/q_jira_check1.html" + rm -fv ${jira_check1} + curl -sSL --insecure 'http://172.17.0.1:18080/secure/WelcomeToJIRA.jspa' \ + -b "${cookie_jar_path}" \ + -c "${cookie_jar_path}" \ + --compressed \ + --output ${jira_check1} --dump-header ${headers_file} --stderr ${errors_file} \ + || echo "Error in jira_check1" | tee -a ${errors_file} + # | pup --color + + echo "remove_jira_just_upgraded_message: Check 2." + local jira_check2="${FILES_PATH}/q_jira_check2.html" + rm -fv ${jira_check2} + curl -sSL --insecure 'http://172.17.0.1:18080/secure/BrowseProjects.jspa' \ + -b "${cookie_jar_path}" \ + -c "${cookie_jar_path}" \ + --compressed \ + --output ${jira_check2} --dump-header ${headers_file} --stderr ${errors_file} \ + || echo "Error in jira_check2" | tee -a ${errors_file} + # | pup --color + + echo " " + echo "DONE ?" + echo " " +} + ####################################### # When BitBucket and Crowd both are up and running, this function can be used # to configure a BitBucket directory service against Crowd. @@ -1360,48 +1550,92 @@ SJ+SA7YG9zthbLxRoBBEwIURQr5Zy1B8PonepyLz3UhL7kMVEs=X02q6' } function wait_until_atlassian_crowd_is_up() { - wait_until_http_svc_is_up "${atlassian_crowd_container_name}" "http://${atlassian_crowd_host}:${atlassian_crowd_port_internal}/" + local retryMax=${1:-20} + wait_until_http_svc_is_up "${atlassian_crowd_container_name}" "http://${atlassian_crowd_host}:${atlassian_crowd_port_internal}/" "${retryMax}" } function wait_until_atlassian_bitbucket_is_up() { - wait_until_http_svc_is_up "${atlassian_bitbucket_container_name}" "http://${atlassian_bitbucket_host}:${atlassian_bitbucket_port_internal}/" + local retryMax=${1:-20} + wait_until_http_svc_is_up "${atlassian_bitbucket_container_name}" "http://${atlassian_bitbucket_host}:${atlassian_bitbucket_port_internal}/" "${retryMax}" } function wait_until_atlassian_jira_is_up() { - wait_until_http_svc_is_up "${atlassian_jira_container_name}" "http://${atlassian_jira_host}:8080/" + local retryMax=${1:-20} + wait_until_http_svc_is_up "${atlassian_jira_container_name}" "http://${atlassian_jira_host}:8080/" "${retryMax}" } function wait_until_ocp_is_up() { - wait_until_http_svc_is_up "ocp" "https://ocp.odsbox.lan:8443/" + local retryMax=${1:-20} + wait_until_http_svc_is_up "ocp" "https://ocp.odsbox.lan:8443/" "${retryMax}" } function wait_until_http_svc_is_up() { - SVC_NAME="${1}" - SVC_HTTP_URL="${2}" - CURL_SVC_OUTPUT_FILE="/tmp/result-curl-svc-${SVC_NAME}-output" - CURL_SVC_HEADERS_FILE="/tmp/result-curl-svc-${SVC_NAME}-headers" + local SVC_NAME="${1}" + local SVC_HTTP_URL="${2}" + local CURL_SVC_OUTPUT_FILE="/tmp/result-curl-svc-${SVC_NAME}-output" + local CURL_SVC_HEADERS_FILE="/tmp/result-curl-svc-${SVC_NAME}-headers" + local CURL_LOGS_CHECK_SVC_FILE="/tmp/result-curl-svc-${SVC_NAME}-curlresult" + local retryMax=${3:-20} + + local RETURN_VALUE=0 + wait_until_http_svc_is_up_advanced "$SVC_NAME" "$SVC_HTTP_URL" "$CURL_SVC_OUTPUT_FILE" "$CURL_SVC_HEADERS_FILE" \ + "${CURL_LOGS_CHECK_SVC_FILE}" $retryMax || RETURN_VALUE=1 + if [ 0 -ne ${RETURN_VALUE} ]; then + echo "[STATUS CHECK] ERROR: Service is down and we cannot live without it: ${SVC_NAME}" + return 1 + fi + return 0 +} + +function wait_until_http_svc_is_up_advanced() { + local SVC_NAME="${1}" + local SVC_HTTP_URL="${2}" + local CURL_SVC_OUTPUT_FILE="${3}" + local CURL_SVC_HEADERS_FILE="${4}" + local CURL_LOGS_CHECK_SVC_FILE="${5}" + local retryMaxIn=${6:-20} + local retryMax=$((retryMaxIn)) echo " " local isUp="false" local retryNum=0 - local retryMax=100 + + if [ -f ${CURL_SVC_OUTPUT_FILE} ] || [ -f ${CURL_SVC_HEADERS_FILE} ]; then + echo "Removing files: rm -fv ${CURL_SVC_OUTPUT_FILE} ${CURL_SVC_HEADERS_FILE} " + rm -fv ${CURL_SVC_OUTPUT_FILE} ${CURL_SVC_HEADERS_FILE} ${CURL_LOGS_CHECK_SVC_FILE} || true + + if [ -f ${CURL_SVC_OUTPUT_FILE} ] || [ -f ${CURL_SVC_HEADERS_FILE} ] || [ -f ${CURL_LOGS_CHECK_SVC_FILE} ]; then + echo "Removing files (with sudo): rm -fv ${CURL_SVC_OUTPUT_FILE} ${CURL_SVC_HEADERS_FILE} ${CURL_LOGS_CHECK_SVC_FILE}" + sudo rm -fv ${CURL_SVC_OUTPUT_FILE} ${CURL_SVC_HEADERS_FILE} ${CURL_LOGS_CHECK_SVC_FILE} || true + fi + fi + while [ "true" != "${isUp}" ]; do - echo "Testing if service ${SVC_NAME} is up at \'${SVC_HTTP_URL}\'. Retry $retryNum / $retryMax " + echo "[STATUS CHECK] Testing if service ${SVC_NAME} is up at \'${SVC_HTTP_URL}\'. Retry $retryNum / $retryMax " let retryNum+=1 if [ ${retryMax} -le ${retryNum} ]; then - echo "Maximum amount of retries reached: $retryNum / $retryMax " + echo "[STATUS CHECK] WARNING: Maximum amount of retries reached: $retryNum / $retryMax " sleep 1 - exit 1 + return 1 fi if [ 0 -ne ${retryNum} ]; then sleep 10 fi - rm -fv ${CURL_SVC_OUTPUT_FILE} ${CURL_SVC_HEADERS_FILE} - if ! curl --insecure -sSL --dump-header ${CURL_SVC_HEADERS_FILE} ${SVC_HTTP_URL} -o ${CURL_SVC_OUTPUT_FILE} ; then + # Remove files from previous execution. + rm -fv ${CURL_SVC_OUTPUT_FILE} ${CURL_SVC_HEADERS_FILE} ${CURL_LOGS_CHECK_SVC_FILE} || true + if [ -f ${CURL_SVC_OUTPUT_FILE} ] || [ -f ${CURL_SVC_HEADERS_FILE} ] || [ -f ${CURL_LOGS_CHECK_SVC_FILE} ]; then + echo "[STATUS CHECK] WARNING: Could NOT remove files ${CURL_SVC_OUTPUT_FILE} ${CURL_SVC_HEADERS_FILE} ${CURL_LOGS_CHECK_SVC_FILE}" + fi + + local CURL_RETURN_VAL=0 + curl --insecure -sSL --retry-delay 2 --retry-max-time 20 --retry 10 --dump-header ${CURL_SVC_HEADERS_FILE} \ + -o ${CURL_SVC_OUTPUT_FILE} ${SVC_HTTP_URL} 2>&1 > ${CURL_LOGS_CHECK_SVC_FILE} || CURL_RETURN_VAL=1 + if [ 0 -ne ${CURL_RETURN_VAL} ]; then echo "Curl replied != 0 for query to ${SVC_HTTP_URL} " echo "Checking if it was caused by a redirect... " + grep -i 'HTTP' ${CURL_LOGS_CHECK_SVC_FILE} || true fi if ! grep -q '^\s*HTTP/[0-9\.]*\s*200[\s]*' ${CURL_SVC_HEADERS_FILE} ; then @@ -1413,6 +1647,9 @@ function wait_until_http_svc_is_up() { isUp="true" done + # Removes tmp files used for checking. + rm -fv ${CURL_SVC_OUTPUT_FILE} ${CURL_SVC_HEADERS_FILE} ${CURL_LOGS_CHECK_SVC_FILE} || true + echo "Service ${SVC_NAME} is up at ${SVC_HTTP_URL}" echo " " return 0 @@ -1595,6 +1832,8 @@ function restart_atlassian_mysql() { inspect_mysql_ip echo "New MySQL container got ip ${atlassian_mysql_ip}. Registering with dns svc..." register_dns "${atlassian_mysql_container_name}" "${atlassian_mysql_ip}" + + follow_atlassian_mysql } ####################################### @@ -1833,7 +2072,8 @@ function create_configuration() { echo " " echo "Create the environment configuration and upload it to Bitbucket ods-configuration repository..." pwd - ods-setup/config.sh --verbose --bitbucket "http://openshift:openshift@${atlassian_bitbucket_host}:${atlassian_bitbucket_port_internal}" + # WARN: config.sh param '--verbose' activates bash set -x + ods-setup/config.sh --bitbucket "http://openshift:openshift@${atlassian_bitbucket_host}:${atlassian_bitbucket_port_internal}" pushd ../ods-configuration git init # keep ods-core.env.sample as a reference @@ -1913,7 +2153,8 @@ function create_configuration() { function install_ods_project() { echo " " echo "Installing ods project..." - ods-setup/setup-ods-project.sh --namespace ods --reveal-secrets --verbose --non-interactive + # WARN: setup-ods-project.sh param '--verbose' activates bash set -x + ods-setup/setup-ods-project.sh --namespace ods --reveal-secrets --non-interactive } ####################################### @@ -1928,15 +2169,18 @@ function install_ods_project() { function setup_nexus() { echo "make install-nexus: / apply-nexus:" pushd nexus/ocp-config - tailor apply --namespace "${NAMESPACE}" bc,is --non-interactive --verbose + tailor apply --namespace "${NAMESPACE}" bc,is --non-interactive + # --verbose popd echo "start-nexus-build:" - ocp-scripts/start-and-follow-build.sh --namespace "${NAMESPACE}" --build-config nexus --verbose + ocp-scripts/start-and-follow-build.sh --namespace "${NAMESPACE}" --build-config nexus + # --verbose echo "apply-nexus-deploy:" pushd nexus/ocp-config - tailor apply --namespace "${NAMESPACE}" --exclude bc,is --non-interactive --verbose + tailor apply --namespace "${NAMESPACE}" --exclude bc,is --non-interactive + # --verbose popd echo "make configure-nexus:" @@ -1947,7 +2191,8 @@ function setup_nexus() { nexus_port=$(oc -n ods get route nexus -ojsonpath='{.spec.port.targetPort}') nexus_port=${nexus_port%-*} # truncate -tcp from 8081-tcp - ./configure.sh --namespace ods --nexus="${nexus_url}" --insecure --verbose --admin-password openshift + ./configure.sh --namespace ods --nexus="${nexus_url}" --insecure --admin-password openshift + # --verbose popd } @@ -1965,11 +2210,13 @@ function setup_sonarqube() { sudo sysctl -w vm.max_map_count=262144 echo "apply-sonarqube-build:" pushd sonarqube/ocp-config - tailor apply --namespace ${NAMESPACE} bc,is --non-interactive --verbose + tailor apply --namespace ${NAMESPACE} bc,is --non-interactive + # --verbose popd echo "start-sonarqube-build:" - ocp-scripts/start-and-follow-build.sh --namespace ${NAMESPACE} --build-config sonarqube --verbose + ocp-scripts/start-and-follow-build.sh --namespace ${NAMESPACE} --build-config sonarqube + # --verbose return_value=$? if [[ "${return_value}" != "0" ]]; then echo "start-sonarqube-build failed." @@ -1978,7 +2225,8 @@ function setup_sonarqube() { echo "apply-sonarqube-deploy:" pushd sonarqube/ocp-config - tailor apply --namespace ${NAMESPACE} --exclude bc,is --non-interactive --verbose + tailor apply --namespace ${NAMESPACE} --exclude bc,is --non-interactive + # --verbose local sonarqube_url sonarqube_url=$(oc -n ${NAMESPACE} get route sonarqube --template 'http{{if .spec.tls}}s{{end}}://{{.spec.host}}') echo "Visit ${sonarqube_url}/setup to see if any update actions need to be taken." @@ -1986,11 +2234,12 @@ function setup_sonarqube() { echo "configure-sonarqube:" pushd sonarqube - ./configure.sh --sonarqube="${sonarqube_url}" --verbose --insecure \ + ./configure.sh --sonarqube="${sonarqube_url}" --insecure \ --pipeline-user openshift \ --pipeline-user-password openshift \ --admin-password openshift \ --write-to-config + # --verbose popd # retrieve sonar qube tokens from where configure.sh has put them @@ -2011,7 +2260,11 @@ function setup_sonarqube() { # None ####################################### function setup_jenkins() { - echo "Setting up Jenkins" + echo " " + echo "**********************" + echo "* Setting up Jenkins *" + echo "**********************" + echo " " oc policy add-role-to-user edit -z jenkins -n ${NAMESPACE} echo "make apply-jenkins-build:" @@ -2020,9 +2273,9 @@ function setup_jenkins() { popd echo "make start-jenkins-build:" - ocp-scripts/start-and-follow-build.sh --namespace ${NAMESPACE} --build-config jenkins-master --verbose & - ocp-scripts/start-and-follow-build.sh --namespace ${NAMESPACE} --build-config jenkins-agent-base --verbose & - ocp-scripts/start-and-follow-build.sh --namespace ${NAMESPACE} --build-config jenkins-webhook-proxy --verbose & + ocp-scripts/start-and-follow-build.sh --namespace ${NAMESPACE} --build-config jenkins-master --verbose + ocp-scripts/start-and-follow-build.sh --namespace ${NAMESPACE} --build-config jenkins-agent-base --verbose + ocp-scripts/start-and-follow-build.sh --namespace ${NAMESPACE} --build-config jenkins-webhook-proxy --verbose local fail_count=0 for job in $(jobs -p) @@ -2281,15 +2534,22 @@ function run_smoke_tests() { ./tests/scripts/free-unused-resources.sh fi - # buying extra time for the quickstarter tests - restart_atlassian_suite - echo -n "Waiting for bitbucket to become available" - until [[ $(docker inspect --format '{{.State.Health.Status}}' ${atlassian_bitbucket_container_name}) == 'healthy' ]] - do - echo -n "." - sleep 1 - done - echo "bitbucket up and running." + # The license for the atlassian suite expires in 3h. + # The following lines (2) buys extra time for the quickstarter tests. + # restart_atlassian_suite + # follow_bitbucket + + # This is a better solution to the atlassian suite license expires problem: + if [ -x ./ods-devenv/scripts/restart-atlassian-suite-if-license-expires-in-less-than.sh ]; then + ./ods-devenv/scripts/restart-atlassian-suite-if-license-expires-in-less-than.sh --force-restart + else + echo " " + echo "ERROR: Could not find script ./ods-devenv/scripts/restart-atlassian-suite-if-license-expires-in-less-than.sh " + echo " " + fi + + # Do not understand why this was here. Prefer to check instead: + check_ods_status sleep 5 echo " " @@ -2324,26 +2584,19 @@ function startup_ods() { # for machines derived from legacy images and login-shells that do not source .bashrc export GOPROXY="https://goproxy.io,direct" # for sonarqube - echo "Setting vm.max_map_count=262144" + echo "startup_ods: Setting vm.max_map_count=262144" sudo sysctl -w vm.max_map_count=262144 setup_dnsmasq # restart and follow mysql restart_atlassian_mysql - printf "Waiting for mysqld to become available" - until [[ $(docker inspect --format '{{.State.Health.Status}}' ${atlassian_mysql_container_name}) == 'healthy' ]] - do - printf . - sleep 1 - done - echo "mysqld up and running." - restart_atlassian_suite local KUBEDNS_RESOLV_FILE - echo "setting kubedns in ${HOME}/openshift.local.clusterup/kubedns/resolv.conf" KUBEDNS_RESOLV_FILE="${HOME}/openshift.local.clusterup/kubedns/resolv.conf" + echo "startup_ods: Setting ocp kube dns in file ${KUBEDNS_RESOLV_FILE}" + echo " (adding a line with nameserver ${public_hostname})" local NEEDS_NEW_KUBEDNS_RESOLV_FILE NEEDS_NEW_KUBEDNS_RESOLV_FILE="false" @@ -2364,29 +2617,43 @@ function startup_ods() { cp -vf /etc/resolv.conf ${KUBEDNS_RESOLV_FILE} fi - cp -vf ${KUBEDNS_RESOLV_FILE} "${KUBEDNS_RESOLV_FILE}-backup-$(date +%Y%m%d-%H%M%S)" + KUBEDNS_RESOLV_FILE_BACKUP="${KUBEDNS_RESOLV_FILE}-backup-$(date +%Y%m%d-%H%M%S-%N)" + rm -fv ${KUBEDNS_RESOLV_FILE_BACKUP} || true + cp -vf ${KUBEDNS_RESOLV_FILE} ${KUBEDNS_RESOLV_FILE_BACKUP} sed -i "s|^nameserver.*$|nameserver ${public_hostname}|" ${KUBEDNS_RESOLV_FILE} - rm -fv ${KUBEDNS_RESOLV_FILE}.tmp + rm -fv ${KUBEDNS_RESOLV_FILE}.tmp || true cp -vf ${KUBEDNS_RESOLV_FILE} "${KUBEDNS_RESOLV_FILE}.tmp" - cat "${KUBEDNS_RESOLV_FILE}.tmp" | uniq > ${HOME}/openshift.local.clusterup/kubedns/resolv.conf + cat "${KUBEDNS_RESOLV_FILE}.tmp" | uniq > ${KUBEDNS_RESOLV_FILE} - if ! grep "nameserver ${public_hostname}" ${KUBEDNS_RESOLV_FILE} + if ! grep -q "nameserver ${public_hostname}" ${KUBEDNS_RESOLV_FILE} then - echo "ERROR: Could not update kubedns/resolv.conf ! (File ${HOME}/openshift.local.clusterup/kubedns/resolv.conf )" - return 1 + echo "nameserver ${public_hostname}" >> ${KUBEDNS_RESOLV_FILE} fi - echo "Contents of file ${HOME}/openshift.local.clusterup/kubedns/resolv.conf: " + echo " " + echo "Contents of file ${KUBEDNS_RESOLV_FILE}: " cat "${KUBEDNS_RESOLV_FILE}" echo " " + if ! grep -q "nameserver ${public_hostname}" ${KUBEDNS_RESOLV_FILE} + then + echo "ERROR: Cannot find in ocp resolv.conf file the line: nameserver ${public_hostname} " + echo "ERROR: Could not update kubedns/resolv.conf ! (File ${KUBEDNS_RESOLV_FILE} )" + return 1 + else + echo "startup_ods: Configured kube dns." + echo " " + fi + # allow for OpenShifts to be resolved within OpenShift network - echo "set iptables" - sudo iptables -I INPUT -p tcp --dport 443 -j ACCEPT + echo "startup_ods: set iptables" + sudo iptables -I INPUT -p tcp --dport 443 -j ACCEPT || true startup_openshift_cluster - wait_until_ocp_is_up + echo "startup_ods: SUCCESS." + echo " " + echo " " } function stop_ods() { @@ -2398,6 +2665,115 @@ function stop_ods() { oc cluster down } +function restart_ods() { + stop_ods + + sudo systemctl stop docker.service || sudo systemctl status docker.service + sudo systemctl start docker.service || sudo systemctl status docker.service + sudo systemctl status docker.service + + # Was in e2e tests repositories. Saved for future reference. + # sudo systemctl stop docker.socket + # sudo systemctl enable docker || true + # sudo systemctl start docker + + if ! startup_ods ; then + echo "restart_ods: ERROR." + echo " " + return 1 + fi + echo "restart_ods: SUCCESS." + echo " " +} + +function check_ods_status() { + echo " " + echo " " + wait_until_ocp_is_up 10 || restart_ods + ## Better use restart_ods instead of startup_openshift_cluster + + # SonarQube, Provisioning app, Nexus + check_pods_and_restart_if_necessary 5 10 + + # Atlassian suite + follow_atlassian_mysql "30" || restart_atlassian_mysql + wait_until_atlassian_crowd_is_up 10 || restart_atlassian_crowd + wait_until_atlassian_bitbucket_is_up 10 || restart_atlassian_bitbucket + wait_until_atlassian_jira_is_up 10 || restart_atlassian_jira + + echo " " + echo "[STATUS CHECK] (check_ods_status) Result: SUCCESS" + echo " " +} + + +function check_pods_and_restart_if_necessary() { + local retryMaxIn=${1:-5} + local retryMaxHttpIn=${2:-10} + + check_pod_and_restart_if_necessary 'sonarqube' 'ods/sonarqube' 'https://sonarqube-ods.ocp.odsbox.lan/' \ + ${retryMaxIn} ${retryMaxHttpIn} || restart_ods + check_pod_and_restart_if_necessary 'prov-app' 'ods/ods-provisioning-app' 'https://prov-app-ods.ocp.odsbox.lan/' \ + ${retryMaxIn} ${retryMaxHttpIn} || restart_ods + check_pod_and_restart_if_necessary 'nexus' 'ods/nexus' 'https://nexus-ods.ocp.odsbox.lan/' \ + ${retryMaxIn} ${retryMaxHttpIn} || restart_ods + # https://jenkins-ods.ocp.odsbox.lan +} + +function check_pod_and_restart_if_necessary() { + + local SVC_NAME="${1}" + local SVC_POD_ID="${2}" + local SVC_HTTP_URL="${3}" + local CURL_SVC_OUTPUT_FILE="/tmp/result-curl-svc-${SVC_NAME}-output" + local CURL_SVC_HEADERS_FILE="/tmp/result-curl-svc-${SVC_NAME}-headers" + local CURL_LOGS_CHECK_SVC_FILE="/tmp/result-curl-svc-${SVC_NAME}-curlresult" + local retryMaxIn=${4:-5} + local retryMax=$((retryMaxIn)) + local retryMaxHttpIn=${5:-10} + local retVal=1 + + local retryNum=0 + while [ 0 -ne ${retVal} ]; do + + echo "[STATUS CHECK] Checking if pod in charge of service ${SVC_NAME} is up; stopping (so it restarts automatically) if not. Retry $retryNum / $retryMax " + + let retryNum+=1 + if [ ${retryMax} -le ${retryNum} ]; then + echo "[STATUS CHECK] ERROR: Maximum amount of retries reached: $retryNum / ${retryMax}" + echo "[STATUS CHECK] ERROR: We cannot live without service ${SVC_NAME}" + sleep 1 + return 1 + fi + + retVal=0 + wait_until_http_svc_is_up_advanced "$SVC_NAME" "$SVC_HTTP_URL" "$CURL_SVC_OUTPUT_FILE" "$CURL_SVC_HEADERS_FILE" \ + ${CURL_LOGS_CHECK_SVC_FILE} ${retryMaxHttpIn} || retVal=1 + + if [ 0 -ne ${retVal} ]; then + echo "[STATUS CHECK] WARNING: Stopping pod so it restarts automatically. Service: ${SVC_NAME} " + local docker_process_killed="false" + docker ps -a | grep -v 'Exited .* ago' | grep -i "${SVC_POD_ID}" | cut -d ' ' -f 1 | while read -r containerId ; + do + echo "Stopping $containerId" + docker stop $containerId + docker_process_killed="true" + done + + if [ "false" == "$docker_process_killed" ]; then + echo "No docker process found for pod ${SVC_NAME} with ID ${SVC_POD_ID} " + echo "Current docker pods: " + docker ps -a | grep -v 'Exited .* ago' || true + echo " " + return 1 + fi + fi + + done + return 0 + +} + function setup_aqua() { oc create configmap aqua --from-literal=registry=${aqua_registry} --from-literal=secretName=${aqua_secret_name} --from-literal=url=${aqua_url} --from-literal=nexusRepository=${aqua_nexus_repository} --from-literal=enabled=${aqua_enabled} -n ods } diff --git a/ods-devenv/scripts/import-certificate.sh b/ods-devenv/scripts/import-certificate.sh new file mode 100755 index 000000000..1b406f844 --- /dev/null +++ b/ods-devenv/scripts/import-certificate.sh @@ -0,0 +1,54 @@ +#!/bin/bash +set -eu +set -o pipefail + +ME="$(basename ${0})" + +function usage { + echo " " + echo "This script installs the certificate of a host to the centos 7 os trust store." + echo "${ME}: usage: ${ME} [--url URL] [--port PORT]" + echo "${ME}: example: ${ME} --url ocp.odsbox.lan --port 8443" + echo " " +} + +target_url="ocp.odsbox.lan" +target_folder="/etc/pki/ca-trust/source/anchors/" +port="8443" + +while [[ "$#" -gt 0 ]]; do + case $1 in + + -v|--verbose) set -x;; + + -h|--help) usage; exit 0;; + + --url) target_url="$2"; shift;; + --url=*) target_url="${1#*=}";; + + --port) port="$2"; shift;; + --port=*) port="${1#*=}";; + + *) echo "Unknown parameter passed: $1"; exit 1;; +esac; shift; done + +echo " " + +hostname="$(echo "${target_url}" | sed 's|\.|_|g')" +target_file="/etc/pki/ca-trust/source/anchors/${hostname}.pem" +target_file_tmp="/tmp/${hostname}.pem" + +echo "openssl s_client -showcerts -host ${hostname} -port ${port}" +openssl s_client -showcerts -host ${target_url} -port ${port} "${target_file_tmp}" + +echo " " +cat ${target_file_tmp} +echo " " +echo "Moving certificate to target file ${target_file}: " +sudo mv -vf ${target_file_tmp} ${target_file} +echo "Updating ca-trust store... " +sudo update-ca-trust +echo " " +echo "DONE" +echo " " + diff --git a/ods-devenv/scripts/restart-atlassian-suite-if-license-expires-in-less-than.sh b/ods-devenv/scripts/restart-atlassian-suite-if-license-expires-in-less-than.sh new file mode 100755 index 000000000..2dd441852 --- /dev/null +++ b/ods-devenv/scripts/restart-atlassian-suite-if-license-expires-in-less-than.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ME="$(basename ${0})" +echo " " + +# Initialize variables +HOURS_LEFT=0 +HOURS_ATLASSIAN_CAN_BE_UP=3 +ALLOW_ZERO_HOURS="false" +FORCE_RESTART="false"; +ASSUME_JUST_RESTARTED="false"; +LAST_RESTART_FILE_REGISTRY="/tmp/atlassian-suite-restarts-registry.log" +DEPLOY_SCRIPT="/home/openshift/opendevstack/ods-core/ods-devenv/scripts/deploy.sh" + +function usage() { + echo " " + echo "${ME}: usage: ${ME} [--hours-left X] [--assume-just-restarted] [--force-restart] [--allow-zero-hours] " + echo "${ME}: example: ${ME} --hours-left 1 " + echo " " +} + +function initializeLastRestartFileRegistry() { + if [ ! -f ${LAST_RESTART_FILE_REGISTRY} ]; then + touch ${LAST_RESTART_FILE_REGISTRY} + chmod 777 ${LAST_RESTART_FILE_REGISTRY} + fi +} + +function setCurrenTimeIfAssumeJustRestarted() { + local just_restarted=${0-"false"} + local date_string="$(date +'%Y%m%d_%H%M%S')" + local reason="none" + + if [ "true" == "${ASSUME_JUST_RESTARTED}" ] || [ "true" == "${just_restarted}" ]; then + if [ "true" == "${ASSUME_JUST_RESTARTED}" ]; then + echo "${ME}: Setting current time to registry because assuming we just restarted the atlassian stack... " + reason="assuming just restarted" + else + echo "${ME}: Setting current time to registry because we just restarted the atlassian stack... " + reason="just restarted" + fi + echo "${date_string} ${reason}" >> ${LAST_RESTART_FILE_REGISTRY} + fi +} + +function checkIfWeStillHaveTime() { + local FOUND="" + local MAX_HOURS_SINCE_LAST_UPDATE=$((HOURS_ATLASSIAN_CAN_BE_UP - HOURS_LEFT)) + echo "MAX_HOURS_SINCE_LAST_UPDATE=${MAX_HOURS_SINCE_LAST_UPDATE} (${HOURS_ATLASSIAN_CAN_BE_UP} - ${HOURS_LEFT})" + + echo "Current date: $(date)" + if [ "false" == ${FORCE_RESTART} ]; then + FOUND=$(find ${LAST_RESTART_FILE_REGISTRY} -mtime -${MAX_HOURS_SINCE_LAST_UPDATE}) + echo "Found tha file was modified in last ${MAX_HOURS_SINCE_LAST_UPDATE}h ?? (empty next line if no): " + echo "${FOUND}" + ls -lah ${LAST_RESTART_FILE_REGISTRY} + fi + + if [ -z "${FOUND}" ] || [ "" == "${FOUND}" ]; then + if [ "true" == ${FORCE_RESTART} ]; then + echo " " + echo "Restart atlassian stack is forced !! " + echo " " + else + echo " " + echo "Since the registry file was *NOT* modified in the last ${MAX_HOURS_SINCE_LAST_UPDATE}h, " + echo "we restart the atlassian stack to have at least ${HOURS_LEFT} until next license issue." + echo " " + fi + + ${DEPLOY_SCRIPT} --target restart_atlassian_suite + setCurrenTimeIfAssumeJustRestarted "true" + echo " " + else + echo " " + echo "Since the registry file was modified in the last ${MAX_HOURS_SINCE_LAST_UPDATE}h, " + echo "we do *NOT* need to restart the stack." + echo " " + fi + +} + +while [[ "$#" -gt 0 ]]; do + case $1 in + + -v|--verbose) set -x;; + + -h|--help) usage; exit 0;; + + --hours-left) HOURS_LEFT="$2"; echo "Hours we need the stack to not complaint about license issues: $HOURS_LEFT"; shift;; + + --assume-just-restarted) ASSUME_JUST_RESTARTED="true";; + + --force-restart) FORCE_RESTART="true";; + + --allow-zero-hours) ALLOW_ZERO_HOURS="true";; + + *) echo " "; echo "${ME}: ERROR: Unknown parameter passed: ${1}"; echo " "; exit 1;; +esac; shift; done + +if [ 0 -eq ${HOURS_LEFT} ] && [ "false" == "${ALLOW_ZERO_HOURS}" ] && [ "false" == "${ASSUME_JUST_RESTARTED}" ] && [ "false" == "${FORCE_RESTART}" ]; then + usage + echo " " + echo "${ME}: Please provide the amount of hours you can wait for the stack to restart." + echo " " + exit 1 +fi + +initializeLastRestartFileRegistry +setCurrenTimeIfAssumeJustRestarted +checkIfWeStillHaveTime + +echo " " +exit 0 diff --git a/tests/quickstarter-test.sh b/tests/quickstarter-test.sh index 9d8cb8a94..ff78c1db4 100755 --- a/tests/quickstarter-test.sh +++ b/tests/quickstarter-test.sh @@ -49,8 +49,8 @@ echo "${THIS_SCRIPT}: Running tests (${QUICKSTARTER}). Output will take a while echo " " # Should fix error " panic: test timed out after " -echo "${THIS_SCRIPT}: go test -v -count=1 -timeout 5h -parallel ${PARALLEL} github.com/opendevstack/ods-core/tests/quickstarter -args ${QUICKSTARTER}" -go test -v -count=1 -timeout 5h -parallel ${PARALLEL} github.com/opendevstack/ods-core/tests/quickstarter -args ${QUICKSTARTER} | tee test-quickstarter-results.txt 2>&1 +echo "${THIS_SCRIPT}: go test -v -count=1 -timeout 30h -parallel ${PARALLEL} github.com/opendevstack/ods-core/tests/quickstarter -args ${QUICKSTARTER}" +go test -v -count=1 -timeout 30h -parallel ${PARALLEL} github.com/opendevstack/ods-core/tests/quickstarter -args ${QUICKSTARTER} | tee test-quickstarter-results.txt 2>&1 exitcode="${PIPESTATUS[0]}" if [ -f test-quickstarter-results.txt ]; then go-junit-report < test-quickstarter-results.txt > test-quickstarter-report.xml diff --git a/tests/quickstarter/quickstarter_test.go b/tests/quickstarter/quickstarter_test.go index 3526675a6..2e7e72872 100644 --- a/tests/quickstarter/quickstarter_test.go +++ b/tests/quickstarter/quickstarter_test.go @@ -76,17 +76,8 @@ func TestQuickstarter(t *testing.T) { fmt.Printf("Running tests for quickstarter %s\n", quickstarterName) fmt.Printf("\n\n") - // Run cleanup operations to ensure we always have enough resources. - stdout, stderr, err := utils.RunScriptFromBaseDir( - "tests/scripts/free-unused-resources.sh", - []string{}, []string{}, - ) - - if err != nil { - t.Fatalf("Error cleaning up : \nStdOut: %s\nStdErr: %s\nErr: %s\n", stdout, stderr, err) - } else { - fmt.Printf("Cleaned cluster state.\n") - } + freeUnusedResources(t) + restartAtlassianSuiteIfLicenseExpiresInLessThan(t) // Run each quickstarter test in a subtest to avoid exiting early // when t.Fatal is used. @@ -245,6 +236,36 @@ func TestQuickstarter(t *testing.T) { } } +func freeUnusedResources(t *testing.T) { + + // Run cleanup operations to ensure we always have enough resources. + stdout, stderr, err := utils.RunScriptFromBaseDir( + "tests/scripts/free-unused-resources.sh", + []string{}, []string{}, + ) + + if err != nil { + t.Fatalf("Error cleaning up : \nStdOut: %s\nStdErr: %s\nErr: %s\n", stdout, stderr, err) + } else { + fmt.Printf("Cleaned cluster state.\n") + } +} + +func restartAtlassianSuiteIfLicenseExpiresInLessThan(t *testing.T) { + + // Run cleanup operations to ensure we always have enough resources. + stdout, stderr, err := utils.RunScriptFromBaseDir( + "ods-devenv/scripts/restart-atlassian-suite-if-license-expires-in-less-than.sh", + []string{"--hours-left", "2"}, []string{}, + ) + + if err != nil { + t.Fatalf("Error cleaning up : \nStdOut: %s\nStdErr: %s\nErr: %s\n", stdout, stderr, err) + } else { + fmt.Printf("Checked if needed to restart atlassian suite.\n") + } +} + // collectTestableQuickstarters collects all subdirs of "dir" that contain // a "testdata" directory. func collectTestableQuickstarters(t *testing.T, dir string) []string { diff --git a/tests/scripts/free-unused-resources.sh b/tests/scripts/free-unused-resources.sh index adcbbe925..83890c00c 100755 --- a/tests/scripts/free-unused-resources.sh +++ b/tests/scripts/free-unused-resources.sh @@ -2,12 +2,15 @@ echo " " +ME=$(basename $0) + function clean_containers { echo "Removing docker containers no more used... " if docker ps -a | grep -q 'Exited .* ago' ; then - set -x + docker ps -a | grep 'Exited .* ago' + echo " " + echo "Removing docker containers: " docker ps -a | grep 'Exited .* ago' | sed 's/\s\+/ /g' | cut -d ' ' -f 1 | while read id; do echo "docker rm $id"; docker rm $id; done - set +x else echo "No docker containers to remove. " fi @@ -17,7 +20,7 @@ function clean_tests { echo "Removing tests projects no more used... " oc projects | grep '^\s*tes.*' | grep -v "${OMIT_TESTS_PROJECT}" | while read -r line; do if [ ! -z "$line" ]; then - echo "Removing project $line" + echo "Removing project ${line}: oc delete project $line " oc delete project $line || true else echo "No projects to remove" @@ -26,11 +29,12 @@ function clean_tests { } function clean_odsverify { + echo "Cleaning projects ODS__VERIFY... " if [ "true" == "$CLEAN_ODS_VERIFY" ]; then - echo "Cleaning ODS VERIFY projects..." + echo "Removing ODS VERIFY projects..." oc projects | grep '^\s*odsverify.*' | while read -r line; do if [ ! -z "$line" ]; then - echo "Removing project $line" + echo "Removing project ${line}: oc delete project $line " oc delete project $line || true else echo "No projects to remove" @@ -40,13 +44,14 @@ function clean_odsverify { } function clean_images { + echo "Cleaning OC images" + echo "oc adm prune images --keep-tag-revisions=1 --keep-younger-than=30m --confirm" oc adm prune images --keep-tag-revisions=1 --keep-younger-than=30m --confirm || true } function usage { - ME=$(basename $0) echo " " - echo "usage: ${ME} [--odsVerify] [--omitTestsProject tes22]" + echo "usage: ${ME} [--odsVerify] [--omitTests] [--omitTestsProject tes22]" echo " " } @@ -57,6 +62,7 @@ function echo_error() { OMIT_TESTS_PROJECT=none CLEAN_ODS_VERIFY="false" +CLEAN_TESTS="false" while [[ "$#" -gt 0 ]]; do case $1 in @@ -69,11 +75,19 @@ while [[ "$#" -gt 0 ]]; do --omitTestsProject) OMIT_TESTS_PROJECT="$2"; echo "Tests to omit: $OMIT_TESTS_PROJECT"; shift;; + --cleanTests) CLEAN_TESTS="true";; + *) echo_error "Unknown parameter passed: $1"; exit 1;; esac; shift; done clean_containers -clean_tests +if [ "true" == "${CLEAN_TESTS}" ]; then + clean_tests +else + echo " " + echo "${ME}: INFO: Not cleaning tests" + echo " " +fi clean_odsverify clean_images diff --git a/tests/scripts/print-jenkins-json-status.sh b/tests/scripts/print-jenkins-json-status.sh index 281b1f72a..07edddea2 100755 --- a/tests/scripts/print-jenkins-json-status.sh +++ b/tests/scripts/print-jenkins-json-status.sh @@ -2,6 +2,14 @@ set -eu set -o pipefail +OC_ERROR="false" oc get build $1 -n $2 \ -ojsonpath='{.metadata.annotations.openshift\.io/jenkins-status-json}' \ - | jq '[.stages[] | {stage: .name, status: .status}]' + | jq '[.stages[] | {stage: .name, status: .status}]' || OC_ERROR="true" + +if [ "false" != "${OC_ERROR}" ]; then + echo " " + echo "ERROR: Could not get oc build status named $1 in namespace $2 !!! " + echo " " +fi + diff --git a/tests/scripts/print-jenkins-log.sh b/tests/scripts/print-jenkins-log.sh index e4a7fd52d..3da6d75bc 100755 --- a/tests/scripts/print-jenkins-log.sh +++ b/tests/scripts/print-jenkins-log.sh @@ -2,12 +2,81 @@ set -eu set -o pipefail +JENKINS_LOG_FILE="jenkins-downloaded-log.txt" +JENKINS_SERVER_LOG_FILE="jenkins-server-log.txt" +OC_ERROR="false" +LOG_URL="http://localhost" +TOKEN="none" + +echo " " +echo " " +echo " " PROJECT=$1 BUILD_NAME=$2 -LOG_URL=$(oc -n ${PROJECT} get build ${BUILD_NAME} -o jsonpath='{.metadata.annotations.openshift\.io/jenkins-log-url}') +ME="$(basename $0)[${BUILD_NAME}]" +echo "${ME}: Project: ${PROJECT} BuildName: ${BUILD_NAME} " +echo " " + +LOG_URL=$(oc -n ${PROJECT} get build ${BUILD_NAME} -o jsonpath='{.metadata.annotations.openshift\.io/jenkins-log-url}' || echo "OC_ERROR" ) + +echo " " +echo "${ME}: Jenkins log url: ${LOG_URL}" +echo " " +if [ "OC_ERROR" == "${LOG_URL}" ]; then + OC_ERROR="true" + TOKEN="OC_ERROR" +else + TOKEN=$(oc -n ${PROJECT} get sa/jenkins --template='{{range .secrets}}{{ .name }} {{end}}' | xargs -n 1 oc -n ${PROJECT} get secret --template='{{ if .data.token }}{{ .data.token }}{{end}}' | head -n 1 | base64 -d -) +fi + +if [ -f ${JENKINS_LOG_FILE} ]; then + rm -fv ${JENKINS_LOG_FILE} || echo "Problem removing existing log file (${JENKINS_LOG_FILE})." +fi + +echo "${ME}: Retrieving logs from url: ${LOG_URL}" +curl --insecure -sSL --header "Authorization: Bearer ${TOKEN}" ${LOG_URL} > ${JENKINS_LOG_FILE} || \ + echo "${ME}: Error retrieving jenkins logs of job run in ${BUILD_NAME} with curl." + +# | xargs -n 1 echo "${BUILD_NAME}: " || \ +# echo "Error retrieving jenkins logs of job run in ${BUILD_NAME} with curl." + +NO_JOB_LOGS="true" +echo " " +echo " " +# Appends current ${BUILD_NAME} to each log line. Improves readability. +while read -r line; do + if [ ! -z "$line" ] && [ "" != "${line}" ]; then + NO_JOB_LOGS="false" + fi + echo -e "${BUILD_NAME}: $line "; +done < ${JENKINS_LOG_FILE} + +echo " " +sleep 5 + +if [ -f ${JENKINS_SERVER_LOG_FILE} ]; then + rm -fv ${JENKINS_SERVER_LOG_FILE} || \ + echo "${ME}: Problem removing existing log file (${JENKINS_SERVER_LOG_FILE})." +fi + +BAD_SERVER_LOGS="false" +if grep -q 'Still waiting to schedule task' ${JENKINS_LOG_FILE} ; then + if ! grep -q 'Finished: SUCCESS' ${JENKINS_LOG_FILE} ; then + BAD_SERVER_LOGS="true" + fi +fi + echo " " -echo "Jenkins log url: ${LOG_URL}" +echo "${ME}: NO_JOB_LOGS=${NO_JOB_LOGS}" +# echo "${ME}: NO_SERVER_LOGS=${NO_SERVER_LOGS}" +echo "${ME}: BAD_SERVER_LOGS=${BAD_SERVER_LOGS}" echo " " -TOKEN=$(oc -n ${PROJECT} get sa/jenkins --template='{{range .secrets}}{{ .name }} {{end}}' | xargs -n 1 oc -n ${PROJECT} get secret --template='{{ if .data.token }}{{ .data.token }}{{end}}' | head -n 1 | base64 -d -) +if [ "true" == "${NO_JOB_LOGS}" ] || [ "true" == "${BAD_SERVER_LOGS}" ]; then + echo " " + echo "${ME}: ERROR: Logs retrieved are not good enough." + echo " " + # WARNING: If we exit 1, the whole process aborts !!! + # exit 1 +fi -curl --insecure -sS --header "Authorization: Bearer ${TOKEN}" ${LOG_URL} +exit 0 diff --git a/tests/smoke-test.sh b/tests/smoke-test.sh index 5ed7db5e0..efb605975 100755 --- a/tests/smoke-test.sh +++ b/tests/smoke-test.sh @@ -24,8 +24,8 @@ fi sleep 5 echo " " -echo "${THIS_SCRIPT}: go test -v -count=1 -timeout 140m github.com/opendevstack/ods-core/tests/smoketest " -go test -v -count=1 -timeout 140m github.com/opendevstack/ods-core/tests/smoketest | tee test-smoketest-results.txt 2>&1 +echo "${THIS_SCRIPT}: go test -v -count=1 -timeout 30h github.com/opendevstack/ods-core/tests/smoketest " +go test -v -count=1 -timeout 30h github.com/opendevstack/ods-core/tests/smoketest | tee test-smoketest-results.txt 2>&1 exit_code=$? echo "${THIS_SCRIPT}: return value: ${exit_code}" diff --git a/tests/smoketest/provision-api_test.go b/tests/smoketest/provision-api_test.go index c6ec0941c..299a6e456 100644 --- a/tests/smoketest/provision-api_test.go +++ b/tests/smoketest/provision-api_test.go @@ -8,6 +8,7 @@ import ( "runtime" "strings" "testing" + "time" "github.com/opendevstack/ods-core/tests/utils" projectClientV1 "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1" @@ -58,6 +59,8 @@ func TestVerifyOdsProjectProvisionThruProvisionApi(t *testing.T) { values["ODS_NAMESPACE"], exJob.FullBuildName, ) if err != nil { + time.Sleep(10 * time.Second) + fmt.Printf("Error retrieving jenkins build stages for build: %s\n", projectName) t.Fatal(err) } fmt.Printf("Jenkins stages: \n'%s'\n", stages) diff --git a/tests/utils/jenkins.go b/tests/utils/jenkins.go index 76f22b103..c484c17e7 100644 --- a/tests/utils/jenkins.go +++ b/tests/utils/jenkins.go @@ -93,6 +93,7 @@ func RetrieveJenkinsBuildStagesForBuild(jenkinsNamespace string, buildName strin fmt.Printf("Getting stages for build: %s in project: %s\n", buildName, jenkinsNamespace) + fmt.Printf("To get more info, use print-jenkins-log.sh %s %s \n", jenkinsNamespace, buildName) config, err := GetOCClient() if err != nil { @@ -125,15 +126,18 @@ func RetrieveJenkinsBuildStagesForBuild(jenkinsNamespace string, buildName strin } } } else { - fmt.Printf("Waiting (%d/%d) for build to complete: %s. Current status: %s\n", count, max, buildName, build.Status.Phase) - fmt.Printf("To get more info, use print-jenkins-log.sh %s %s \n", jenkinsNamespace, buildName) + fmt.Printf("Waiting for build of %s to complete (%d/%d). Current status: %s\n", buildName, count, max, build.Status.Phase) } count++ } + buildSeemsToBeComplete := "true" + errorGettingInfoNeeded := "false" + // in case the the build was sort of never really started - get the jenkins pod log, maybe there // is a plugin / sync problem? - if build.Status.Phase == v1.BuildPhaseNew || build.Status.Phase == v1.BuildPhasePending { + if build.Status.Phase == v1.BuildPhaseNew || build.Status.Phase == v1.BuildPhasePending || build.Status.Phase == v1.BuildPhaseRunning { + buildSeemsToBeComplete = "false" // get the jenkins pod log stdoutJPod, stderrJPod, errJPod := RunScriptFromBaseDir( "tests/scripts/print-jenkins-pod-log.sh", @@ -141,38 +145,46 @@ func RetrieveJenkinsBuildStagesForBuild(jenkinsNamespace string, buildName strin jenkinsNamespace, }, []string{}) if errJPod != nil { - fmt.Printf("Error getting jenkins pod logs: %s\nerr:%s", errJPod, stderrJPod) + fmt.Printf("Error getting jenkins pod logs using "+ + "tests/scripts/print-jenkins-pod-log.sh: %s\nerr:%s", + errJPod, stderrJPod) + errorGettingInfoNeeded = "true" } else { fmt.Printf("Jenkins pod logs: \n%s \nerr:%s", stdoutJPod, stderrJPod) } } + fmt.Printf("Build seems to be complete ? : %s \n", buildSeemsToBeComplete) + // get the jenkins run build log stdout, stderr, err := RunScriptFromBaseDir( "tests/scripts/print-jenkins-log.sh", []string{ jenkinsNamespace, buildName, + buildSeemsToBeComplete, }, []string{}) if err != nil { - return "", fmt.Errorf( - "Could not execute tests/scripts/print-jenkins-log.sh\n - err:%s\n - stderr:%s", + fmt.Printf("ERROR: Could not get Jenkins logs using "+ + "tests/scripts/print-jenkins-log.sh\n - err:%s\n - stderr:%s", err, - stderr, - ) + stderr) + errorGettingInfoNeeded = "true" } // print in any case, otherwise when err != nil no logs are shown - fmt.Printf("buildlog: %s\n%s", buildName, stdout) - - // still running, or we could not find it ... - if count >= max { - return "", fmt.Errorf( - "Timeout during build: %s\nStdOut: %s\nStdErr: %s", - buildName, - stdout, - stderr) + fmt.Printf("[Jenkins buildlog]: buildName: %s\n%s", buildName, stdout) + + problematicSubString := "Still waiting to schedule task" + exceptionProblematicSubString := "Finished: SUCCESS" + if len(stdout) > 0 && (strings.Contains(stdout, problematicSubString)) { + if !strings.Contains(stdout, exceptionProblematicSubString) { + fmt.Printf("Jenkins log contains problematic substring ( %s ) and "+ + " it does not contain exception case string: ( %s ) \n", problematicSubString, + exceptionProblematicSubString) + errorGettingInfoNeeded = "true" + } } // get (executed) jenkins stages from run - the caller can compare against the golden record @@ -184,8 +196,35 @@ func RetrieveJenkinsBuildStagesForBuild(jenkinsNamespace string, buildName strin }, []string{}) if err != nil { - return "", fmt.Errorf("Error getting jenkins stages for: %s\rError: %s, %s, %s", + fmt.Printf("ERROR: Problem getting jenkins stages for: %s\rError: %s, %s, %s", buildName, err, stdout, stderr) + errorGettingInfoNeeded = "true" + } + + // print in any case, otherwise when err != nil no logs are shown + fmt.Printf("[get oc build status]: buildName: %s\n%s", buildName, stdout) + + problematicSubString2 := "ERROR: Could not get oc build status named" + if len(stdout) > 0 && (strings.Contains(stdout, problematicSubString2)) { + errorGettingInfoNeeded = "true" + } + + // still running, or we could not find it ... + if count >= max { + return "", fmt.Errorf( + "Timeout during build: %s\nStdOut: %s\nStdErr: %s", + buildName, + stdout, + stderr) + } + + if (errorGettingInfoNeeded == "true") || (buildSeemsToBeComplete == "false") { + if buildSeemsToBeComplete == "false" { + fmt.Printf("ERROR: Something went wrong. Look for the word ERROR above.") + fmt.Printf("ERROR: Sleeping for 20h to allow manual intervention.") + time.Sleep(20 * time.Hour) + } + return "", fmt.Errorf("ERROR: Something went wrong. Look for the word ERROR above.") } return stdout, nil diff --git a/tests/utils/provisioning.go b/tests/utils/provisioning.go index bb92aac53..d12076658 100644 --- a/tests/utils/provisioning.go +++ b/tests/utils/provisioning.go @@ -70,6 +70,7 @@ func (api *ProvisionAPI) CreateProject() ([]byte, error) { return nil, fmt.Errorf("Could not read response file?!, %s", err) } fmt.Printf("Provision results: %s\n", string(log)) + fmt.Printf("-----\n") return log, nil } @@ -130,5 +131,6 @@ func (api *ProvisionAPI) CreateComponent() ([]byte, error) { return nil, fmt.Errorf("Could not read response file?!, %w", err) } fmt.Printf("Provision results: %s\n", string(log)) + fmt.Printf("-----\n") return log, nil } diff --git a/tests/verify.sh b/tests/verify.sh index 8efd585fc..9f551a061 100755 --- a/tests/verify.sh +++ b/tests/verify.sh @@ -15,7 +15,8 @@ fi if [ -f test-verify-results.txt ]; then rm test-verify-results.txt fi -go test -v -count=1 -timeout 60m github.com/opendevstack/ods-core/tests/ods-verify | tee test-verify-results.txt 2>&1 +echo "go test -v -count=1 -timeout 30h github.com/opendevstack/ods-core/tests/ods-verify" +go test -v -count=1 -timeout 30h github.com/opendevstack/ods-core/tests/ods-verify | tee test-verify-results.txt 2>&1 exitcode=$? if [ -f test-verify-results.txt ]; then set -e