diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d6038ff951..9a8675a013 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,18 +25,32 @@ jobs: strategy: fail-fast: false matrix: - java_version: [11, 17, 22] + java_version: [17, 23] steps: - - name: Environment - run: env | sort - - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 1 submodules: true + - name: Get the commit message + id: get_commit_message + run: | + if [ "${{ github.event_name }}" = "pull_request" ]; then + echo "GitHub event=pull_request" + COMMIT_SHA=${{ github.event.pull_request.head.sha }} + COMMIT_MESSAGE=$(curl -s \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + https://api.github.com/repos/${{ github.repository }}/commits/$COMMIT_SHA | jq -r '.commit.message') + echo "Commit message=$COMMIT_MESSAGE" | head -n 1 + echo "commit_message=$COMMIT_MESSAGE" | head -n 1 >> $GITHUB_OUTPUT + else + echo "GitHub event=${{ github.event_name }}" + echo "Commit message=${{ github.event.head_commit.message }}" | head -n 1 + echo "commit_message=${{ github.event.head_commit.message }}" | head -n 1 >> $GITHUB_OUTPUT + fi + - name: Get changed files id: changed-files uses: tj-actions/changed-files@v43 @@ -76,6 +90,7 @@ jobs: - name: Test if: steps.changed-files.outputs.any_changed == 'true' run: | + env | sort # configure test env if [[ "$GOOGLE_SECRET" ]]; then echo $GOOGLE_SECRET | base64 -d > $PWD/google_credentials.json @@ -106,16 +121,17 @@ jobs: outputs: any_changed: ${{ steps.changed-files.outputs.any_changed }} + commit_message: ${{ steps.get_commit_message.outputs.commit_message }} test: - if: ${{ !contains(github.event.head_commit.message, '[ci fast]') && needs.build.outputs.any_changed == 'true' }} + if: ${{ !contains(needs.build.outputs.commit_message, '[ci fast]') && needs.build.outputs.any_changed == 'true' }} needs: build runs-on: ubuntu-latest timeout-minutes: 90 strategy: fail-fast: false matrix: - java_version: [11, 21] + java_version: [17, 23] test_mode: ["test_integration", "test_docs", "test_aws", "test_azure", "test_google", "test_wave"] steps: - name: Checkout @@ -142,6 +158,12 @@ jobs: - name: Run tests run: | + env | sort + # configure test env + if [[ "$GOOGLE_SECRET" ]]; then + echo $GOOGLE_SECRET | base64 -d > $PWD/google_credentials.json + export GOOGLE_APPLICATION_CREDENTIALS=$PWD/google_credentials.json + fi cat $HOME/.nextflow/scm make clean assemble install bash test-ci.sh @@ -173,5 +195,51 @@ jobs: with: name: report-${{ matrix.test_mode }}-jdk-${{ matrix.java_version }} path: | - validation/** + validation/**/* + validation/**/.* integration-tests.tar + + test-e2e: + if: ${{ contains(needs.build.outputs.commit_message,'[e2e stage]') || contains(needs.build.outputs.commit_message,'[e2e prod]') }} + needs: build + runs-on: ubuntu-latest + timeout-minutes: 10 + permissions: + actions: write # Allow writing to actions + contents: write # Allow writing to repository contents + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 1 + submodules: true + + - name: Setup Java 17 + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: 'temurin' + architecture: x64 + cache: gradle + + - name: Setup env + run: | + wget -q -O wave https://github.com/seqeralabs/wave-cli/releases/download/v1.4.1/wave-1.4.1-linux-x86_64 + chmod +x wave + mv wave /usr/local/bin/ + echo "COMMIT_MESSAGE=\"${{ needs.build.outputs.commit_message }}\"" >> $GITHUB_ENV + + - name : Docker Login to Seqera public CR + uses : docker/login-action@v3 + with : + registry : "public.cr.seqera.io" + username : "public-cr-admin" + password : ${{ secrets.SEQERA_PUBLIC_CR_PASSWORD }} + + - name: Launch tests + run: | + cd test-e2e + bash run.sh + env: + GITHUB_TOKEN: ${{ secrets.AUTOMATION_GITHUB_TOKEN }} + GRADLE_OPTS: '-Dorg.gradle.daemon=false' diff --git a/.github/workflows/generate-send-dependencies.yml b/.github/workflows/generate-send-dependencies.yml new file mode 100644 index 0000000000..639b28a7ad --- /dev/null +++ b/.github/workflows/generate-send-dependencies.yml @@ -0,0 +1,27 @@ +name: Generate and submit dependency graph for nextflow +on: + push: + branches: ['master'] + +permissions: + contents: write + +jobs: + dependency-submission: + runs-on: ubuntu-latest + strategy: + matrix: + components: ["nextflow", "plugins:nf-google", "plugins:nf-amazon", "plugins:nf-azure", "plugins:nf-cloudcache", "plugins:nf-codecommit", "plugins:nf-console", "plugins:nf-tower", "plugins:nf-wave"] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin + java-version: 21 + + - name: Generate and submit dependency graph for nextflow + uses: gradle/actions/dependency-submission@v4 + with: + dependency-resolution-task: ":${{ matrix.components }}:dependencies" + additional-arguments: "--configuration runtimeClasspath" + dependency-graph: generate-and-submit diff --git a/.github/workflows/seqeradocs-changelog.yml b/.github/workflows/seqeradocs-changelog.yml new file mode 100644 index 0000000000..f41058fe71 --- /dev/null +++ b/.github/workflows/seqeradocs-changelog.yml @@ -0,0 +1,62 @@ +name: Push release change log to Seqera Docs + +on: + release: + types: [published] + workflow_dispatch: + inputs: + release_name: + description: "Release version (e.g. 1.0.0)" + required: true + release_body: + description: "Release changelog content" + required: true + +jobs: + update-docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Clone seqeralabs/docs + run: | + git clone https://github.com/seqeralabs/docs.git seqeralabs-docs + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Create changelog file + run: | + mkdir -p seqeralabs-docs/changelog/nextflow + cat << EOF > seqeralabs-docs/changelog/nextflow/${{ github.event.release.name || inputs.release_name }}.mdx + --- + title: Nextflow ${{ github.event.release.name || inputs.release_name }} + date: $(date +%Y-%m-%d) + tags: [nextflow] + --- + + ${{ github.event.release.body || inputs.release_body }} + EOF + + - uses: actions/create-github-app-token@v1 + id: generate-token + with: + app-id: ${{ secrets.DOCS_BOT_APP_ID }} + private-key: ${{ secrets.DOCS_BOT_APP_PRIVATE_KEY }} + owner: seqeralabs + repositories: docs + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v7 + with: + token: ${{ steps.generate-token.outputs.token }} + branch-token: ${{ steps.generate-token.outputs.token }} + path: seqeralabs-docs + commit-message: "Changelog: Nextflow ${{ github.event.release.name || inputs.release_name }}" + title: "Changelog: Nextflow ${{ github.event.release.name || inputs.release_name }}" + body: | + This PR adds the changelog for Nextflow ${{ github.event.release.name || inputs.release_name }} to the Seqera documentation. + + This is an automated PR created from the Nextflow repository. + branch: changelog-nextflow-${{ github.event.release.name || inputs.release_name }} + base: master + delete-branch: true diff --git a/VERSION b/VERSION index d4c0237907..2b70d664e4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -24.07.0-edge +24.11.0-edge diff --git a/build.gradle b/build.gradle index a0c72f271a..33c367f0e0 100644 --- a/build.gradle +++ b/build.gradle @@ -15,7 +15,7 @@ */ plugins { - id "io.codearte.nexus-staging" version "0.21.2" + id "io.codearte.nexus-staging" version "0.30.0" id 'java' id 'idea' } @@ -73,12 +73,12 @@ allprojects { } compileJava { - options.release.set(11) + options.release.set(17) } tasks.withType(GroovyCompile) { - sourceCompatibility = '11' - targetCompatibility = '11' + sourceCompatibility = '17' + targetCompatibility = '17' } idea { @@ -109,8 +109,8 @@ allprojects { // Documentation required libraries groovyDoc 'org.fusesource.jansi:jansi:2.4.0' - groovyDoc "org.apache.groovy:groovy-groovydoc:4.0.22" - groovyDoc "org.apache.groovy:groovy-ant:4.0.22" + groovyDoc "org.apache.groovy:groovy-groovydoc:4.0.24" + groovyDoc "org.apache.groovy:groovy-ant:4.0.24" } test { @@ -147,7 +147,6 @@ allprojects { // Required to run tests on Java 9 and higher in compatibility mode tasks.withType(Test) { jvmArgs ([ - '-Dorg.spockframework.mock.ignoreByteBuddy=true', '--enable-preview', '--add-opens=java.base/java.lang=ALL-UNNAMED', '--add-opens=java.base/java.io=ALL-UNNAMED', diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index fa35d4112b..888b0de54a 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -11,7 +11,7 @@ version = "1.0.1" group = "io.nextflow" dependencies { - implementation ('com.amazonaws:aws-java-sdk-s3:1.12.129') + implementation ('com.amazonaws:aws-java-sdk-s3:1.12.777') implementation 'com.google.code.gson:gson:2.10.1' } diff --git a/changelog.txt b/changelog.txt index 33b8299300..54fba39ad4 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,10 +1,155 @@ NEXTFLOW CHANGE-LOG =================== +24.11.0-edge - 3 Dec 2024 +- Add GHA to submit dependencies to dependabot (#5440) [80395a6d] +- Add NotFoundException to retry condition for Google Batch [aa4d19cc] +- Add Rahel Hirsch to run name generator (#5442) [ff2bc6ae] +- Add `env()` function (#5506) [fa0e8e0f] +- Add more scientists to run name generator (#5447) [38d9eda0] +- Add `singularity.libraryDir` to config page (#5498) [b5e31bb0] +- Add RepositoryProvider.revision now public property (#5500) [f0a4c526] +- Deprecate process `shell` block (#5508) [6f527551] +- Detecting errors in data unstaging (#5345) [3c8e602d] +- Disable virtual threads on CI tests [ci slip] [69d07dbc] +- Fix Fargate warning on memory check (#5475) [bdf0ad00] +- Fix `isContainerReady` when wave is disabled (#5509) [3215afa8] +- Fix missing wave response (#5547) [ee252173] +- Fix overlapping conda lock file (#5540) [9248c04d] +- Fix overlapping conda lock exception (#5489) [eaaeb3de] +- Fix possible deadlock in dynamic `maxRetry` resolution (#5474) [25bbb621] +- Fix Wave GCP integration test (#5490) [ad56c89b] +- Fixing bug when execution with stub and no stub defined (#5473) [f7fd56db] +- Fix Incorrect CPU value in Azure example (#5549) [fc5e2c2a] +- Improve docs for using the GPU accelerator directive (#5488) [4b908524] +- Improve groupTuple docs with scatter/gather example (#5520) [b5c63a9f] +- Prevent NPE with null AWS Batch response [12fc1d60] +- Target Java 17 as minimal Java version (#5045) [0140f954] +- Update 'nexus-staging' plugin to latest version (#5462) [07934513] +- Update gradle 'shadow' plugin version to 8.3.5 (#5463) [2a5f15f0] +- Update install docs to reflect change from 'all' to 'dist' (#5496) [c9115659] +- Update process snippets to comply with strict syntax (#5526) [be1694bf] +- Update Wave dependencies [09ccd295] +- Bump aws sdk 1.12.777 (#5458) [8bad0b4b] +- Bump bouncycastle to jdk18on:1.78.1 (#5467) [cd8c385f] +- Bump com.azure:azure-identity from 1.11.3 to 1.12.2 (#5449) [cb70f1df] +- Bump commons-io:2.15.1 [767e4a0a] +- Bump groovy 4.0.24 [dd71ad31] +- Bump netty-common to version 4.1.115.Final [d1bbd3d0] +- Bump nf-amazon@2.10.0 [2b653b07] +- Bump nf-azure@1.11.0 [6af7198d] +- Bump nf-google@1.16.0 [9494f970] +- Bump nf-google@1.8.0 [7e2c8d82] +- Bump protobuf-java:3.25.5 to nf-google [488b7906] + +24.10.2 - 27 Nov 2024 +- Prevent NPE with null AWS Batch response [3d491934] +- Fix overlapping conda lock file (#5540) [df66deaa] +- Fix missing wave response (#5547) [eb85cda8] +- Bump nf-wave@1.7.4 [93d09404] +- Bump nf-amazon@2.9.2 [469a35dd] + +24.10.1 - 18 Nov 2024 +- Fix overlapping file lock exception (#5489) [a2566d54] +- Fix isContainerReady when wave is disabled (#5509) [c69e3711] +- Bump nf-wave@1.7.3 [e7709a0f] +- Bump nf-azure@1.10.2 [54496ac4] +- Bump nf-amazon@2.9.1 [fa227933] +- Bump netty-common to version 4.1.115.Final [90623c1e] + +24.10.0 - 27 Oct 2024 +- Add `manifest.contributors` config option (#5322) [cf0f9690] +- Add wave mirror and scan config [92e69776] +- Add wave mirror vs module bundles conflicst warning [b37a8a5b] +- Add Workflow output definition (second preview) (#5185) [ea128463] +- Demote azure batch task status log level to trace (#5416) [d6c684bb] +- Document formal script syntax (#5336) [a48ac589] +- Add Expose trace record meta info in the task context (#5402) [ffdd3819] +- Fix NextflowMeta compilation error [500f49c0] +- Fix XFileSystem serialization issue (#5411) [711864fd] +- Fix closure rendering in yaml and json config (#5408) [d3a85ceb] +- Fix failing docs snippet (#5430) [a818f4b8] +- Fix http files stream (#5405) [e2e stage] [718dcbe6] +- Fix nf-tower plugin to upload logs when early failures (#5434) [4222442a] +- Fix support for micromamba (#4302) [12431302] +- Harmonise documentation for hybrid cloud execution (#5362) [a69407d2] +- Prevent NPE when containerInfo is null [368a266a] +- Remove invalid equals char (#5414) [7f39e6ed] +- Bump nf-azure@1.10.1 [f67e157a] +- Bump nf-tower@1.9.3 [7a572a12] +- Bump nf-wave@1.7.2 [6524d8dc] + +24.09.2-edge - 14 Oct 2024 +- Add Google LS deprecation notice (#5400) [0ee1d9bc] +- Add hybrid workflow documentation for Azure (#5361) [f81f27de] +- Add more out formats to config command (#5399) [8acdb6e8] +- Change to scan.levels to scan.allowedLevels (#5401) [88a1b1b5] +- Fix inspect concretize option [0ee29a87] +- Manage moduleBinaries feature flag in the pipeline script (#5348) [dd5fd20f] +- Bump nf-wave@1.7.1 [94273967] +- Bump nf-google@1.15.2 [fab01d58] + +24.09.1-edge - 13 Oct 2024 +- Add retry policy in Channel.fromSRA factory (#5389) [fb1c8b2e] +- Add retry policy to Google batch describe task (#5356) [64bb5a92] +- Add support for Wave container status API (#5384) [873703ad] [9ed18a88] +- Fix Identify default git branch when downloading pipelines (#3593) (#5375) [4f74fc37] +- Fix issue when retrying with stub (#5359) [324b611f] +- Fix include aborted in the ProgressRecord total count (#5365) [8eb0c393] +- Redirect logs to stderr during app bootstrap (#5206) [94918105] +- Remove unused reportsOpts (#5379) [e794e868] +- Sunsetting Spack support in Wave [3a54cb3b] [7bbba67b] +- Bump nf-google@1.15.1 [4204435e] +- Bump nf-wave@1.7.0 [cdecb3f5] + +24.09.0-edge - 2 Oct 2024 +- Add Fusion version pinning info to Fusion config scope (#5294) [be1cac37] +- Add container tip in the task error report (#5299) [62e26043] +- Add support for Java 23 (#5329) [6e10c372] +- Add Platform workflow prefix in AWS Batch job names (#5318) [42dd4ba8] +- Fix AWS spot attempts with zero value (#5331) [bac2da12] +- Fix Azure Fusion env misses credentials when no key or SAS provided (#5328) [e11382c8] +- Fix CI build taking only the commit msg header [9e702c4d] +- Fix Inconsistency with camelCase and kebab-case config params #4702 (solve #4033) [349f4f6f] +- Fix XPath default de-serializer issue (#5339) [290f2d07] +- Fix failOnIgnore causes task monitor to stop submitting tasks (#5293) [d6870335] +- Fix large mermaid diagrams (#5302) [a46edfa0] +- Improve Wave build timeout handling (#5304) [05bef7e4] +- Include additional fields to manifest (#5314) [33fab52d] +- Remove `mini` from Flux submit command (#5229) [a0f69025] +- Update container handling with charliecloud (#5300) [8e6068db] +- Update Documentation structure (#4766) [25e5eba0] +- Update nextflow install docs (#5198) [7c453815] +- Bump groovy 4.0.23 (#5303) [fe3e3ac7] +- Bump nf-wave@1.6.0 [179093dc] +- Bump nf-azure@1.10.0 [41d37fa8] +- Bump nf-amazon@2.9.0 [e38980fb] + +24.08.0-edge - 4 Sep 2024 +- Add Google Batch warning when for conflicting disk image config (#5279) [96cb57cb] +- Add support for Google Batch used specified boot images (#5268) [0aaa6482] +- Disable AWS spot retry (#5215) [f28fcb25] +- Disable Google Batch automatic spot retries (#5223) [aad21533] +- Disable automatic detection of virtual threads (#5270) [b3ba2c2d] +- Fix missing .command.env when eval is used and task runs on a cloud env [4a6b54aa] +- Fix job array syntax for PBS/Torque executor (#5281) [d59f5fae] +- Fix k8s client status cond is possible null in podState (#5264) [46672415] +- Fix non-determinist behaviour of join operator with bag array as key (#5189) [e7dc0d69] +- Fix stage retry on corrupted HTTP downloads (#5275) [bf0cd326] +- Support Azure Managed Identities in Fusion configuration logic (#5278) [a0bf8b40] +- Use public.cr.seqera.io in place of AWS ECR [5a01f277] +- Wave client logs improvement [5a37e617] +- Bump amazoncorretto:21-al2023 [59aed581] +- Bump nf-wave@1.5.1 [97c4e08f] +- Bump nf-google@1.15.0 [24133f2a] +- Bump nf-azure@1.9.0 [29f49ba7] +- Bump nf-amazon@2.8.0 [bbc3adca] + 24.07.0-edge - 8 Aug 2024 - Add runtime error for missing channel factory (#5170) [1f9210ab] - Apply k8s.cpuLimits to kuberun driver pod (#5160) [4300adf1] - Await build completion for all Wave containers [2b8117e9] -- Deprecate module addParams() and params() (#5200) [ci fast] [82c97f8c] +- Deprecate module addParams() and params() (#5200) [82c97f8c] +- Remove capsule launcher dependencies (#3395) [f15e4246] - Fix AWS Cloudwatch access when using custom log group name [30195838] - Fix Invalid AWS Fargate CPUs usage error reporting [d9c50e59] - Fix Prevent AwS Batch retry the job execution when the container does not exist [4e218f22] @@ -60,7 +205,7 @@ NEXTFLOW CHANGE-LOG - Bump jgit 6.10.0 [4cf6b9f7] 24.04.3 - 9 Jul 2024 -- Add ability to override failOnError setting default via env variable (#5117) [ci fast] [6852429c] +- Add ability to override failOnError setting default via env variable (#5117) [6852429c] - Fix normalization of consecutive slashes in uri path (#5114) [3f366b7e] - Fix executions hangs on finalisation exception (#5070) [4c207c23] - Bump nf-google@1.13.2-patch1 [55ec5ec5] diff --git a/docker-scratch/Dockerfile b/docker-scratch/Dockerfile deleted file mode 100644 index c2919602af..0000000000 --- a/docker-scratch/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM amazoncorretto:17-al2023 -COPY .nextflow /.nextflow -COPY nextflow /usr/bin/nextflow -ENV NXF_HOME=/.nextflow -RUN nextflow info -RUN NXF_PLUGINS_DEFAULT=false nextflow plugin install nf-tower,nf-wave,nf-cloudcache,nf-azure,nf-google,nf-amazon,xpack-amzn,xpack-google,nf-cloudcache - -#FROM scratch -#COPY --from=0 /.nextflow /.nextflow -#COPY --from=0 /usr/bin/nextflow /usr/bin/nextflow - diff --git a/docker-scratch/make.sh b/docker-scratch/make.sh deleted file mode 100644 index 8217e6b44b..0000000000 --- a/docker-scratch/make.sh +++ /dev/null @@ -1,58 +0,0 @@ -# -# Copyright 2013-2024, Seqera Labs -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# - -# cleanup -rm -rf .nextflow && mkdir .nextflow -# copy nextflow dependencies -(cd .. -./gradlew compile assemble -BUILD_PACK=1 ./gradlew installScratch publishToMavenLocal -Dmaven.repo.local=${PWD}/docker-scratch/.nextflow/capsule/deps/ -) - -# copy nextflow launcher script -cp ../nextflow . && chmod +x nextflow -cp ../modules/nextflow/src/main/resources/META-INF/build-info.properties . -source build-info.properties - -if [ -z "$version" ]; then - echo "Error: version is empty or missing"; exit 1 -fi -if [ -z "$build" ]; then - echo "Error: build is empty or missing"; exit 1 -fi -if [ -z "$commitId" ]; then - echo "Error: commitId is empty or missing"; exit 1 -fi - - -TAG=${version}-${commitId} -repository=${repository:-'docker.io/pditommaso/nf-launcher-dev'} -image=${repository}:${TAG} -base=${base:-'docker.io/pditommaso/nf-lancher:j17-base'} - -docker buildx build \ - --no-cache \ - --platform linux/amd64 \ - --output=type=docker \ - --progress=plain \ - --tag ${image} \ - --build-arg TARGETPLATFORM=linux/amd64 \ - . - -#launcher=$(wave -i ${base} --include ${image} --config-env NXF_HOME=/.nextflow) -# -#echo $launcher diff --git a/docker/Makefile b/docker/Makefile index 894e568577..e65294f7fe 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -24,14 +24,10 @@ build-arm: dist/docker/arm64 docker buildx build --platform linux/arm64 --output=type=docker --progress=plain --tag nextflow/nextflow:${version} --build-arg TARGETPLATFORM=linux/arm64 . release: build - docker tag nextflow/nextflow:${version} nextflow/nextflow:latest docker push nextflow/nextflow:${version} - docker push nextflow/nextflow:latest # - docker tag nextflow/nextflow:${version} public.ecr.aws/seqera-labs/nextflow:${version} - docker tag nextflow/nextflow:${version} public.ecr.aws/seqera-labs/nextflow:latest - docker push public.ecr.aws/seqera-labs/nextflow:${version} - docker push public.ecr.aws/seqera-labs/nextflow:latest + docker tag nextflow/nextflow:${version} public.cr.seqera.io/nextflow/nextflow:${version} + docker push public.cr.seqera.io/nextflow/nextflow:${version} #Static builds can now be found at: # diff --git a/docs/Dockerfile b/docs/Dockerfile index 967b660bd3..a961249faf 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -1,6 +1,4 @@ -FROM mambaorg/micromamba:1.3.1 - -MAINTAINER Ben Sherman +FROM mambaorg/micromamba:1.5.8 RUN micromamba install --yes --name base --channel conda-forge \ make=4.3 \ diff --git a/docs/_static/dag.mmd b/docs/_static/dag.mmd index 0963e2c106..989a536ae9 100644 --- a/docs/_static/dag.mmd +++ b/docs/_static/dag.mmd @@ -1,3 +1,17 @@ +%%{ + init: { + 'theme': 'base', + 'themeVariables': { + 'primaryColor': '#B6ECE2', + 'primaryTextColor': '#160F26', + 'primaryBorderColor': '#065647', + 'lineColor': '#545555', + 'clusterBkg': '#BABCBD22', + 'clusterBorder': '#DDDEDE', + 'fontFamily': 'arial' + } + } +}%% flowchart TB subgraph " " v0["Channel.fromFilePairs"] diff --git a/docs/_static/report-resource-memory.png b/docs/_static/report-resource-memory.png deleted file mode 100644 index 9460c5b343..0000000000 Binary files a/docs/_static/report-resource-memory.png and /dev/null differ diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index f928363914..ad62698389 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -10,7 +10,7 @@ @@ -21,7 +21,7 @@ {% endblock %} diff --git a/docs/amazons3.md b/docs/amazons3.md index 41bcb407fa..6da7ac4b0d 100644 --- a/docs/amazons3.md +++ b/docs/amazons3.md @@ -1,6 +1,6 @@ (amazons3-page)= -# AWS S3 storage +# Amazon S3 Nextflow includes support for AWS S3 storage. Files stored in an S3 bucket can be accessed transparently in your pipeline script like any other file in the local file system. @@ -16,11 +16,11 @@ s3://my-bucket/data/sequences.fa The usual file operations can be applied to a path handle with the above notation. For example, the content of an S3 file can be printed as follows: -```groovy +```nextflow println file('s3://my-bucket/data/sequences.fa').text ``` -See the {ref}`script-file-io` section to learn more about available file operations. +See {ref}`working-with-files` and the {ref}`stdlib-types-path` reference to learn more about available file operations. ## Security credentials diff --git a/docs/aws.md b/docs/aws.md index abecc989ed..3de628e3cc 100644 --- a/docs/aws.md +++ b/docs/aws.md @@ -1,6 +1,6 @@ (aws-page)= -# AWS Cloud +# Amazon Web Services :::{tip} This page describes how to manually set up and use Nextflow with AWS Cloud. @@ -46,53 +46,54 @@ Minimal permissions policies to be attached to the AWS account used by Nextflow - To use AWS Batch: ```json - "batch:DescribeJobQueues" "batch:CancelJob" - "batch:SubmitJob" - "batch:ListJobs" "batch:DescribeComputeEnvironments" - "batch:TerminateJob" + "batch:DescribeJobDefinitions" + "batch:DescribeJobQueues" "batch:DescribeJobs" + "batch:ListJobs" "batch:RegisterJobDefinition" - "batch:DescribeJobDefinitions" + "batch:SubmitJob" + "batch:TagResource" + "batch:TerminateJob" ``` - To view [EC2](https://aws.amazon.com/ec2/) instances: ```json - "ecs:DescribeTasks" + "ec2:DescribeInstanceAttribute" "ec2:DescribeInstances" + "ec2:DescribeInstanceStatus" "ec2:DescribeInstanceTypes" - "ec2:DescribeInstanceAttribute" "ecs:DescribeContainerInstances" - "ec2:DescribeInstanceStatus" + "ecs:DescribeTasks" ``` - To pull container images from [ECR](https://aws.amazon.com/ecr/) repositories: ```json - "ecr:GetAuthorizationToken" "ecr:BatchCheckLayerAvailability" - "ecr:GetDownloadUrlForLayer" - "ecr:GetRepositoryPolicy" - "ecr:DescribeRepositories" - "ecr:ListImages" - "ecr:DescribeImages" "ecr:BatchGetImage" + "ecr:DescribeImages" + "ecr:DescribeImageScanFindings" + "ecr:DescribeRepositories" + "ecr:GetAuthorizationToken" + "ecr:GetDownloadUrlForLayer" "ecr:GetLifecyclePolicy" "ecr:GetLifecyclePolicyPreview" + "ecr:GetRepositoryPolicy" + "ecr:ListImages" "ecr:ListTagsForResource" - "ecr:DescribeImageScanFindings" ``` :::{note} If you are running Fargate or Fargate Spot, you may need the following policies in addition to the listed above: ```json + "ec2:DescribeSubnets" "ecs:CreateCluster" "ecs:DeleteCluster" "ecs:DescribeClusters" "ecs:ListClusters" - "ec2:DescribeSubnets" ``` ::: @@ -252,7 +253,7 @@ Container options may be passed in long form (e.g `--option value`) or short for Few examples: -```groovy +```nextflow containerOptions '--tmpfs /run:rw,noexec,nosuid,size=128 --tmpfs /app:ro,size=64' containerOptions '-e MYVAR1 --env MYVAR2=foo2 --env MYVAR3=foo3 --memory-swap 3240000 --memory-swappiness 20 --shm-size 16000000' @@ -408,20 +409,6 @@ To do that, first create a **Job Definition** in the AWS Console (or by other me process.container = 'job-definition://your-job-definition-name' ``` -### Pipeline execution - -The pipeline can be launched either in a local computer or an EC2 instance. The latter is suggested for heavy or long-running workloads. - -Pipeline input data can be stored either locally or in an [S3](https://aws.amazon.com/s3/) bucket. The pipeline execution must specify an S3 bucket to store intermediate results with the `-bucket-dir` (`-b`) command line option. For example: - -```bash -nextflow run my-pipeline -bucket-dir s3://my-bucket/some/path -``` - -:::{warning} -The bucket path should include at least a top level directory name, e.g. `s3://my-bucket/work` rather than `s3://my-bucket`. -::: - ### Hybrid workloads Nextflow allows the use of multiple executors in the same workflow application. This feature enables the deployment of hybrid workloads in which some jobs are executed in the local computer or local computing cluster and some jobs are offloaded to AWS Batch. @@ -429,13 +416,6 @@ Nextflow allows the use of multiple executors in the same workflow application. To enable this feature, use one or more {ref}`config-process-selectors` in your Nextflow configuration to apply the AWS Batch configuration to the subset of processes that you want to offload. For example: ```groovy -aws { - region = 'eu-west-1' - batch { - cliPath = '/home/ec2-user/miniconda/bin/aws' - } -} - process { withLabel: bigTask { executor = 'awsbatch' @@ -443,9 +423,27 @@ process { container = 'my/image:tag' } } + +aws { + region = 'eu-west-1' +} ``` -With the above configuration, processes with the `bigTask` {ref}`process-label` will run on AWS Batch, while the remaining processes with run in the local computer. +With the above configuration, processes with the `bigTask` {ref}`process-label` will run on AWS Batch, while the remaining processes will run in the local computer. + +Then launch the pipeline with the -bucket-dir option to specify an AWS S3 path for the jobs computed with AWS Batch and, optionally, the -work-dir to specify the local storage for the jobs computed locally: + +```bash +nextflow run diff --git a/modules/nextflow/src/main/resources/nextflow/executor/command-run.txt b/modules/nextflow/src/main/resources/nextflow/executor/command-run.txt index 2bf34b617a..26cbf6a829 100644 --- a/modules/nextflow/src/main/resources/nextflow/executor/command-run.txt +++ b/modules/nextflow/src/main/resources/nextflow/executor/command-run.txt @@ -99,7 +99,13 @@ nxf_fs_fcp() { } on_exit() { - exit_status=${nxf_main_ret:=$?} + ## Capture possible errors. + ## Can be caused either by the task script, unstage script or after script if defined + local last_err=$? + ## capture the task error first or fallback to unstage error + local exit_status=${nxf_main_ret:=0} + [[ ${exit_status} -eq 0 && ${nxf_unstage_ret:=0} -ne 0 ]] && exit_status=${nxf_unstage_ret:=0} + [[ ${exit_status} -eq 0 && ${last_err} -ne 0 ]] && exit_status=${last_err} printf -- $exit_status {{exit_file}} set +u {{cleanup_cmd}} @@ -121,13 +127,26 @@ nxf_stage() { {{stage_inputs}} } -nxf_unstage() { +nxf_unstage_outputs() { true - {{unstage_controls}} - [[ ${nxf_main_ret:=0} != 0 ]] && return {{unstage_outputs}} } +nxf_unstage_controls() { + true + {{unstage_controls}} +} + +nxf_unstage() { + ## Deactivate fast failure to allow uploading stdout and stderr files later + if [[ ${nxf_main_ret:=0} == 0 ]]; then + ## Data unstaging redirecting stdout and stderr with append mode + (set -e -o pipefail; (nxf_unstage_outputs | tee -a {{stdout_file}}) 3>&1 1>&2 2>&3 | tee -a {{stderr_file}}) + nxf_unstage_ret=$? + fi + nxf_unstage_controls +} + nxf_main() { trap on_exit EXIT trap on_term TERM INT USR2 diff --git a/modules/nextflow/src/main/resources/nextflow/executor/command-trace.txt b/modules/nextflow/src/main/resources/nextflow/executor/command-trace.txt index 8b5b2195f6..cf9d0b5d77 100644 --- a/modules/nextflow/src/main/resources/nextflow/executor/command-trace.txt +++ b/modules/nextflow/src/main/resources/nextflow/executor/command-trace.txt @@ -114,26 +114,28 @@ nxf_mem_watch() { done ## result struct: pid %mem vmem rss peak_vmem peak_rss - echo "%mem=${nxf_stat_ret[1]}" >> $trace_file - echo "vmem=${nxf_stat_ret[2]}" >> $trace_file - echo "rss=${nxf_stat_ret[3]}" >> $trace_file - echo "peak_vmem=${nxf_stat_ret[4]}" >> $trace_file - echo "peak_rss=${nxf_stat_ret[5]}" >> $trace_file - echo "vol_ctxt=${nxf_stat_ret[6]}" >> $trace_file - echo "inv_ctxt=${nxf_stat_ret[7]}" >> $trace_file + printf "%s\n" \ + "%mem=${nxf_stat_ret[1]}" \ + "vmem=${nxf_stat_ret[2]}" \ + "rss=${nxf_stat_ret[3]}" \ + "peak_vmem=${nxf_stat_ret[4]}" \ + "peak_rss=${nxf_stat_ret[5]}" \ + "vol_ctxt=${nxf_stat_ret[6]}" \ + "inv_ctxt=${nxf_stat_ret[7]}" >> "$trace_file" || >&2 echo "Error: Failed to append to file: $trace_file" } nxf_write_trace() { - echo "nextflow.trace/v2" > $trace_file - echo "realtime=$wall_time" >> $trace_file - echo "%cpu=$ucpu" >> $trace_file - echo "cpu_model=$cpu_model" >> $trace_file - echo "rchar=${io_stat1[0]}" >> $trace_file - echo "wchar=${io_stat1[1]}" >> $trace_file - echo "syscr=${io_stat1[2]}" >> $trace_file - echo "syscw=${io_stat1[3]}" >> $trace_file - echo "read_bytes=${io_stat1[4]}" >> $trace_file - echo "write_bytes=${io_stat1[5]}" >> $trace_file + printf "%s\n" \ + "nextflow.trace/v2" \ + "realtime=$wall_time" \ + "%cpu=$ucpu" \ + "cpu_model=$cpu_model" \ + "rchar=${io_stat1[0]}" \ + "wchar=${io_stat1[1]}" \ + "syscr=${io_stat1[2]}" \ + "syscw=${io_stat1[3]}" \ + "read_bytes=${io_stat1[4]}" \ + "write_bytes=${io_stat1[5]}" > "$trace_file" || >&2 echo "Error: Failed to write to file: $trace_file" } nxf_trace_mac() { @@ -199,16 +201,17 @@ nxf_trace_linux() { local wall_time=$((end_millis-start_millis)) [ $NXF_DEBUG = 1 ] && echo "+++ STATS %CPU=$ucpu TIME=$wall_time I/O=${io_stat1[*]}" - echo "nextflow.trace/v2" > $trace_file - echo "realtime=$wall_time" >> $trace_file - echo "%cpu=$ucpu" >> $trace_file - echo "cpu_model=$cpu_model" >> $trace_file - echo "rchar=${io_stat1[0]}" >> $trace_file - echo "wchar=${io_stat1[1]}" >> $trace_file - echo "syscr=${io_stat1[2]}" >> $trace_file - echo "syscw=${io_stat1[3]}" >> $trace_file - echo "read_bytes=${io_stat1[4]}" >> $trace_file - echo "write_bytes=${io_stat1[5]}" >> $trace_file + printf "%s\n" \ + "nextflow.trace/v2" \ + "realtime=$wall_time" \ + "%cpu=$ucpu" \ + "cpu_model=$cpu_model" \ + "rchar=${io_stat1[0]}" \ + "wchar=${io_stat1[1]}" \ + "syscr=${io_stat1[2]}" \ + "syscw=${io_stat1[3]}" \ + "read_bytes=${io_stat1[4]}" \ + "write_bytes=${io_stat1[5]}" > "$trace_file" || >&2 echo "Error: Failed to write to file: $trace_file" ## join nxf_mem_watch [ -e /proc/$mem_proc ] && eval "echo 'DONE' >&$mem_fd" || true diff --git a/modules/nextflow/src/test/groovy/nextflow/ChannelTest.groovy b/modules/nextflow/src/test/groovy/nextflow/ChannelTest.groovy index e954c4bbcc..c7b45a6395 100644 --- a/modules/nextflow/src/test/groovy/nextflow/ChannelTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/ChannelTest.groovy @@ -990,4 +990,23 @@ class ChannelTest extends Specification { } + def 'should not fail when setting SRA correct properties' () { + given: + def id = 'SRR389222' + def retryPolicy = [maxAttempts: 2] + + when: + def result = Channel.fromSRA(id, apiKey: '1234', retryPolicy: retryPolicy, cache: false, max: 10, protocol: 'http') + then: + result != null + + } + + def 'should fail when SRA incorrect property' () { + when: + def result = Channel.fromSRA('SRR389222', incorrectKey: '1234') + then: + thrown(IllegalArgumentException) + } + } diff --git a/modules/nextflow/src/test/groovy/nextflow/NextflowTest.groovy b/modules/nextflow/src/test/groovy/nextflow/NextflowTest.groovy index c32a461ecb..f49db65779 100644 --- a/modules/nextflow/src/test/groovy/nextflow/NextflowTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/NextflowTest.groovy @@ -35,6 +35,17 @@ class NextflowTest extends Specification { System.getenv('CI_GROOVY_VERSION') == GroovySystem.getVersion() } + def 'should get an environment variable' () { + given: + SysEnv.push(FOO: 'FOO_VALUE') + + expect: + Nextflow.env('FOO') == 'FOO_VALUE' + + cleanup: + SysEnv.pop() + } + def testFile() { expect: Nextflow.file('file.log').toFile() == new File('file.log').canonicalFile diff --git a/modules/nextflow/src/test/groovy/nextflow/SessionTest.groovy b/modules/nextflow/src/test/groovy/nextflow/SessionTest.groovy index fbcb3469e6..edcf06c4ff 100644 --- a/modules/nextflow/src/test/groovy/nextflow/SessionTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/SessionTest.groovy @@ -601,16 +601,22 @@ class SessionTest extends Specification { } + @Unroll def 'should get module binaries status'() { given: - def session = new Session(CONFIG) + def session = new Session() + NextflowMeta.instance.moduleBinaries(MODE) expect: session.enableModuleBinaries() == EXPECTED - + + cleanup: + NextflowMeta.instance.moduleBinaries(false) + where: - CONFIG | EXPECTED - [:] | false - [nextflow:[enable:[moduleBinaries: true]]] | true + MODE | EXPECTED + false | false + true | true + } } diff --git a/modules/nextflow/src/test/groovy/nextflow/cli/CmdConfigTest.groovy b/modules/nextflow/src/test/groovy/nextflow/cli/CmdConfigTest.groovy index 232332b52b..a2f97b7496 100644 --- a/modules/nextflow/src/test/groovy/nextflow/cli/CmdConfigTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/cli/CmdConfigTest.groovy @@ -90,6 +90,7 @@ class CmdConfigTest extends Specification { .stripIndent().leftTrim() } + def 'should canonical notation' () { given: @@ -196,6 +197,86 @@ class CmdConfigTest extends Specification { } + def 'should print config using json' () { + given: + ByteArrayOutputStream buffer + ConfigObject config + def cmd = new CmdConfig(sort: true) + + when: + buffer = new ByteArrayOutputStream() + + config = new ConfigObject() + config.process.executor = 'slurm' + config.process.queue = 'long' + config.docker.enabled = true + config.dummy = new ConfigObject() // <-- empty config object should not be print + config.mail.from = 'yo@mail.com' + config.mail.smtp.host = 'mail.com' + config.mail.smtp.port = 25 + config.mail.smtp.user = 'yo' + + cmd.printJson0(config, buffer) + then: + buffer.toString() == '''\ + { + "docker": { + "enabled": true + }, + "mail": { + "from": "yo@mail.com", + "smtp": { + "host": "mail.com", + "port": 25, + "user": "yo" + } + }, + "process": { + "executor": "slurm", + "queue": "long" + } + } + ''' + .stripIndent() + } + + def 'should print config using yaml' () { + given: + ByteArrayOutputStream buffer + ConfigObject config + def cmd = new CmdConfig(sort: true) + + when: + buffer = new ByteArrayOutputStream() + + config = new ConfigObject() + config.process.executor = 'slurm' + config.process.queue = 'long' + config.docker.enabled = true + config.dummy = new ConfigObject() // <-- empty config object should not be print + config.mail.from = 'yo@mail.com' + config.mail.smtp.host = 'mail.com' + config.mail.smtp.port = 25 + config.mail.smtp.user = 'yo' + + cmd.printYaml0(config, buffer) + then: + buffer.toString() == '''\ + docker: + enabled: true + mail: + from: yo@mail.com + smtp: + host: mail.com + port: 25 + user: yo + process: + executor: slurm + queue: long + ''' + .stripIndent() + } + def 'should parse config file' () { given: def folder = Files.createTempDirectory('test') @@ -409,4 +490,101 @@ class CmdConfigTest extends Specification { folder?.deleteDir() SysEnv.pop() } + + def 'should render json config output' () { + given: + def folder = Files.createTempDirectory('test') + def CONFIG = folder.resolve('nextflow.config') + + CONFIG.text = ''' + manifest { + author = 'me' + mainScript = 'foo.nf' + } + + process { + cpus = 4 + queue = 'cn-el7' + memory = { 10.GB } + ext.other = { 10.GB * task.attempt } + } + ''' + def buffer = new ByteArrayOutputStream() + // command definition + def cmd = new CmdConfig(outputFormat: 'json') + cmd.launcher = new Launcher(options: new CliOptions(config: [CONFIG.toString()])) + cmd.stdout = buffer + cmd.args = [ '.' ] + + when: + cmd.run() + + then: + buffer.toString() == ''' + { + "manifest": { + "author": "me", + "mainScript": "foo.nf" + }, + "process": { + "cpus": 4, + "queue": "cn-el7", + "memory": "{ 10.GB }", + "ext": { + "other": "{ 10.GB * task.attempt }" + } + } + } + ''' + .stripIndent().leftTrim() + + cleanup: + folder.deleteDir() + } + + def 'should render yaml config output' () { + given: + def folder = Files.createTempDirectory('test') + def CONFIG = folder.resolve('nextflow.config') + + CONFIG.text = ''' + manifest { + author = 'me' + mainScript = 'foo.nf' + } + + process { + cpus = 4 + queue = 'cn-el7' + memory = { 10.GB } + ext.other = { 10.GB * task.attempt } + } + ''' + def buffer = new ByteArrayOutputStream() + // command definition + def cmd = new CmdConfig(outputFormat: 'yaml') + cmd.launcher = new Launcher(options: new CliOptions(config: [CONFIG.toString()])) + cmd.stdout = buffer + cmd.args = [ '.' ] + + when: + cmd.run() + + then: + buffer.toString() == ''' + manifest: + author: me + mainScript: foo.nf + process: + cpus: 4 + queue: cn-el7 + memory: '{ 10.GB }' + ext: + other: '{ 10.GB * task.attempt }' + ''' + .stripIndent().leftTrim() + + cleanup: + folder.deleteDir() + } } diff --git a/modules/nextflow/src/test/groovy/nextflow/cli/CmdInfoTest.groovy b/modules/nextflow/src/test/groovy/nextflow/cli/CmdInfoTest.groovy index ab2be742bd..e8949c946d 100644 --- a/modules/nextflow/src/test/groovy/nextflow/cli/CmdInfoTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/cli/CmdInfoTest.groovy @@ -71,6 +71,7 @@ class CmdInfoTest extends Specification { screen.contains(" main script : main.nf") screen.contains(" revisions : ") screen.contains(" * master (default)") + !screen.contains(" HEAD") } def 'should print json info' () { @@ -89,11 +90,12 @@ class CmdInfoTest extends Specification { json.repository == "https://github.com/nextflow-io/hello" json.localPath == "$tempDir/nextflow-io/hello" json.manifest.mainScript == 'main.nf' - json.manifest.defaultBranch == 'master' + json.manifest.defaultBranch == null json.revisions.current == 'master' json.revisions.master == 'master' json.revisions.branches.size()>1 json.revisions.branches.any { it.name == 'master' } + !json.revisions.branches.any { it.name == 'HEAD' } json.revisions.tags.size()>1 json.revisions.tags.any { it.name == 'v1.1' } @@ -115,11 +117,12 @@ class CmdInfoTest extends Specification { json.repository == "https://github.com/nextflow-io/hello" json.localPath == "$tempDir/nextflow-io/hello" json.manifest.mainScript == 'main.nf' - json.manifest.defaultBranch == 'master' + json.manifest.defaultBranch == null json.revisions.current == 'master' json.revisions.master == 'master' json.revisions.branches.size()>1 json.revisions.branches.any { it.name == 'master' } + !json.revisions.branches.any { it.name == 'HEAD' } json.revisions.tags.size()>1 json.revisions.tags.any { it.name == 'v1.1' } diff --git a/modules/nextflow/src/test/groovy/nextflow/cli/CmdInspectTest.groovy b/modules/nextflow/src/test/groovy/nextflow/cli/CmdInspectTest.groovy deleted file mode 100644 index 5f76ecf033..0000000000 --- a/modules/nextflow/src/test/groovy/nextflow/cli/CmdInspectTest.groovy +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2013-2024, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package nextflow.cli - - -import spock.lang.Specification -import spock.lang.Unroll -/** - * - * @author Paolo Di Tommaso - */ -class CmdInspectTest extends Specification { - - @Unroll - def 'should ask for confirmation' () { - given: - def cmd = Spy(new CmdInspect(concretize: CONCRETIZE)) - Map wave - - when: - wave = WAVE - cmd.checkWaveConfig(wave) - then: - wave == EXPECTED - - where: - WAVE | CONCRETIZE | EXPECTED - [:] | false | [:] - [:] | true | [:] - and: - [enabled:true] | false | [enabled:true] - [enabled:true] | true | [enabled:true] - and: - [enabled:true, freeze: true] | false | [enabled:true, freeze:true, dryRun: true] - [enabled:true, freeze: true] | true | [enabled:true, freeze:true, dryRun: false] - - } - -} diff --git a/modules/nextflow/src/test/groovy/nextflow/cli/CmdRunTest.groovy b/modules/nextflow/src/test/groovy/nextflow/cli/CmdRunTest.groovy index 426890ef99..2de2397181 100644 --- a/modules/nextflow/src/test/groovy/nextflow/cli/CmdRunTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/cli/CmdRunTest.groovy @@ -19,6 +19,7 @@ package nextflow.cli import java.nio.file.Files +import nextflow.NextflowMeta import nextflow.SysEnv import nextflow.config.ConfigMap import nextflow.exception.AbortOperationException @@ -94,6 +95,36 @@ class CmdRunTest extends Specification { [:] | /x.y\.z/ | 'Hola' | ['x': ['y.z': 'Hola']] } + def 'should convert cli params from kebab case to camel case' () { + + when: + def params = [:] + CmdRun.addParam0(params, 'alphaBeta', 1) + CmdRun.addParam0(params, 'alpha-beta', 10) + then: + params['alphaBeta'] == 10 + !params.containsKey('alpha-beta') + + when: + params = [:] + CmdRun.addParam0(params, 'aaa-bbb-ccc', 1) + CmdRun.addParam0(params, 'aaaBbbCcc', 10) + then: + params['aaaBbbCcc'] == 10 + !params.containsKey('aaa-bbb-ccc') + + } + + def 'should convert kebab case to camel case' () { + + expect: + CmdRun.kebabToCamelCase('a') == 'a' + CmdRun.kebabToCamelCase('A') == 'A' + CmdRun.kebabToCamelCase('a-b-c-') == 'aBC' + CmdRun.kebabToCamelCase('aa-bb-cc') == 'aaBbCc' + CmdRun.kebabToCamelCase('alpha-beta-delta') == 'alphaBetaDelta' + CmdRun.kebabToCamelCase('Alpha-Beta-delta') == 'AlphaBetaDelta' + } @Unroll def 'should check run name #STR' () { @@ -392,4 +423,55 @@ class CmdRunTest extends Specification { and: !warning } + + @Unroll + def 'should detect moduleBinaries' () { + given: + NextflowMeta.instance.moduleBinaries(INITIAL) + CmdRun.detectModuleBinaryFeature(new ConfigMap(CONFIG)) + + expect: + NextflowMeta.instance.isModuleBinariesEnabled() == EXPECTED + + cleanup: + NextflowMeta.instance.moduleBinaries(false) + + where: + INITIAL | CONFIG | EXPECTED + true | [nextflow: [enable: [ moduleBinaries: true ]]] | true + false | [nextflow: [enable: [ moduleBinaries: true ]]] | true + false | [nextflow: [enable: [ moduleBinaries: false ]]] | false + true | [nextflow: [enable: [ moduleBinaries: false ]]] | true + false | [:] | false + true | [:] | true + } + + @Unroll + def 'should detect strict mode' () { + given: + NextflowMeta.instance.strictMode(INITIAL) + CmdRun.detectStrictFeature(new ConfigMap(CONFIG), ENV) + + expect: + NextflowMeta.instance.isStrictModeEnabled() == EXPECTED + + cleanup: + NextflowMeta.instance.strictMode(false) + + where: + INITIAL | CONFIG | ENV | EXPECTED + true | [nextflow: [enable: [ strict: true ]]] | [:] | true + false | [nextflow: [enable: [ strict: true ]]] | [:] | true + false | [nextflow: [enable: [ strict: false ]]] | [:] | false + true | [nextflow: [enable: [ strict: false ]]] | [:] | true + false | [:] | [:] | false + true | [:] | [:] | true + true | [nextflow: [enable: [ strict: true ]]] | [NXF_ENABLE_STRICT: true ] | true + false | [nextflow: [enable: [ strict: true ]]] | [NXF_ENABLE_STRICT: true ] | true + false | [nextflow: [enable: [ strict: false ]]] | [NXF_ENABLE_STRICT: true ] | false + true | [nextflow: [enable: [ strict: false ]]] | [NXF_ENABLE_STRICT: true ] | true + false | [:] | [NXF_ENABLE_STRICT: true ] | true + true | [:] | [NXF_ENABLE_STRICT: true ] | true + + } } diff --git a/modules/nextflow/src/test/groovy/nextflow/conda/CondaCacheTest.groovy b/modules/nextflow/src/test/groovy/nextflow/conda/CondaCacheTest.groovy index 9e1f0c5ad4..f6b7fac271 100644 --- a/modules/nextflow/src/test/groovy/nextflow/conda/CondaCacheTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/conda/CondaCacheTest.groovy @@ -89,6 +89,7 @@ class CondaCacheTest extends Specification { def cache = Spy(CondaCache) def BASE = Paths.get('/conda/envs') def ENV = folder.resolve('foo.yml') + def hash = CondaCache.sipHash(ENV) ENV.text = ''' channels: - bioconda @@ -99,13 +100,12 @@ class CondaCacheTest extends Specification { - bwa=0.7.15 ''' .stripIndent(true) // https://issues.apache.org/jira/browse/GROOVY-9423 - when: def prefix = cache.condaPrefixPath(ENV.toString()) then: 1 * cache.isYamlFilePath(ENV.toString()) 1 * cache.getCacheDir() >> BASE - prefix.toString() == '/conda/envs/foo-9416240708c49c4e627414b46a743664' + prefix.toString() == "/conda/envs/env-${hash}-9416240708c49c4e627414b46a743664" cleanup: folder?.deleteDir() @@ -118,6 +118,7 @@ class CondaCacheTest extends Specification { def cache = Spy(CondaCache) def BASE = Paths.get('/conda/envs') def ENV = Files.createTempFile('test','.yml') + def hash = CondaCache.sipHash(ENV) ENV.text = ''' name: my-env-1.1 channels: @@ -135,7 +136,7 @@ class CondaCacheTest extends Specification { then: 1 * cache.isYamlFilePath(ENV.toString()) 1 * cache.getCacheDir() >> BASE - prefix.toString() == '/conda/envs/my-env-1.1-e7fafe40ca966397a2c0d9bed7181aa7' + prefix.toString() == "/conda/envs/env-${hash}-e7fafe40ca966397a2c0d9bed7181aa7" } @@ -146,6 +147,7 @@ class CondaCacheTest extends Specification { def cache = Spy(CondaCache) def BASE = Paths.get('/conda/envs') def ENV = folder.resolve('bar.txt') + def hash = CondaCache.sipHash(ENV) ENV.text = ''' star=2.5.4a bwa=0.7.15 @@ -159,7 +161,7 @@ class CondaCacheTest extends Specification { 1 * cache.isYamlFilePath(ENV.toString()) 1 * cache.isTextFilePath(ENV.toString()) 1 * cache.getCacheDir() >> BASE - prefix.toString() == '/conda/envs/bar-8a4aa7db8ddb8ce4eb4d450d4814a437' + prefix.toString() == "/conda/envs/env-${hash}-8a4aa7db8ddb8ce4eb4d450d4814a437" cleanup: folder?.deleteDir() @@ -195,9 +197,8 @@ class CondaCacheTest extends Specification { when: // the prefix directory exists ==> no conda command is executed - def result = cache.createLocalCondaEnv(ENV) + def result = cache.createLocalCondaEnv(ENV, PREFIX) then: - 1 * cache.condaPrefixPath(ENV) >> PREFIX 0 * cache.isYamlFilePath(ENV) 0 * cache.runCommand(_) result == PREFIX @@ -222,9 +223,8 @@ class CondaCacheTest extends Specification { when: // the prefix directory exists ==> no mamba command is executed - def result = cache.createLocalCondaEnv(ENV) + def result = cache.createLocalCondaEnv(ENV, PREFIX) then: - 1 * cache.condaPrefixPath(ENV) >> PREFIX 0 * cache.isYamlFilePath(ENV) 0 * cache.runCommand(_) result == PREFIX @@ -240,6 +240,32 @@ class CondaCacheTest extends Specification { } + def 'should create a conda environment - using micromamba' () { + + given: + def ENV = 'bwa=1.1.1' + def PREFIX = Files.createTempDirectory('foo') + def cache = Spy(new CondaCache(useMicromamba: true)) + + when: + // the prefix directory exists ==> no mamba command is executed + def result = cache.createLocalCondaEnv(ENV, PREFIX) + then: + 0 * cache.isYamlFilePath(ENV) + 0 * cache.runCommand(_) + result == PREFIX + + when: + PREFIX.deleteDir() + result = cache.createLocalCondaEnv0(ENV, PREFIX) + then: + 1 * cache.isYamlFilePath(ENV) + 0 * cache.makeAbsolute(_) + 1 * cache.runCommand("micromamba create --yes --quiet --prefix $PREFIX $ENV") >> null + result == PREFIX + + } + def 'should create a conda environment using mamba and remote lock file' () { given: @@ -249,9 +275,8 @@ class CondaCacheTest extends Specification { when: // the prefix directory exists ==> no mamba command is executed - def result = cache.createLocalCondaEnv(ENV) + def result = cache.createLocalCondaEnv(ENV, PREFIX) then: - 1 * cache.condaPrefixPath(ENV) >> PREFIX 0 * cache.isYamlFilePath(ENV) 0 * cache.runCommand(_) result == PREFIX @@ -265,6 +290,31 @@ class CondaCacheTest extends Specification { 1 * cache.runCommand("mamba env create --prefix $PREFIX --file $ENV") >> null result == PREFIX + } + def 'should create a conda environment using micromamba and remote lock file' () { + + given: + def ENV = 'http://foo.com/some/file-lock.yml' + def PREFIX = Files.createTempDirectory('foo') + def cache = Spy(new CondaCache(useMicromamba: true)) + + when: + // the prefix directory exists ==> no mamba command is executed + def result = cache.createLocalCondaEnv(ENV, PREFIX) + then: + 0 * cache.isYamlFilePath(ENV) + 0 * cache.runCommand(_) + result == PREFIX + + when: + PREFIX.deleteDir() + result = cache.createLocalCondaEnv0(ENV, PREFIX) + then: + 1 * cache.isYamlFilePath(ENV) + 0 * cache.makeAbsolute(_) + 1 * cache.runCommand("micromamba env create --yes --prefix $PREFIX --file $ENV") >> null + result == PREFIX + } def 'should create conda env with options' () { @@ -301,6 +351,23 @@ class CondaCacheTest extends Specification { result == PREFIX } + def 'should create conda env with options - using micromamba' () { + given: + def ENV = 'bwa=1.1.1' + def PREFIX = Paths.get('/foo/bar') + and: + def cache = Spy(new CondaCache(useMicromamba: true, createOptions: '--this --that')) + + when: + def result = cache.createLocalCondaEnv0(ENV, PREFIX) + then: + 1 * cache.isYamlFilePath(ENV) + 1 * cache.isTextFilePath(ENV) + 0 * cache.makeAbsolute(_) + 1 * cache.runCommand("micromamba create --this --that --yes --quiet --prefix $PREFIX $ENV") >> null + result == PREFIX + } + def 'should create conda env with channels' () { given: def ENV = 'bwa=1.1.1' @@ -336,6 +403,24 @@ class CondaCacheTest extends Specification { } + def 'should create a conda env with a yaml file - using micromamba' () { + + given: + def ENV = 'foo.yml' + def PREFIX = Paths.get('/conda/envs/my-env') + def cache = Spy(new CondaCache(useMicromamba: true)) + + when: + def result = cache.createLocalCondaEnv0(ENV, PREFIX) + then: + 1 * cache.isYamlFilePath(ENV) + 0 * cache.isTextFilePath(ENV) + 1 * cache.makeAbsolute(ENV) >> Paths.get('/usr/base').resolve(ENV) + 1 * cache.runCommand( "micromamba env create --yes --prefix $PREFIX --file /usr/base/foo.yml" ) >> null + result == PREFIX + + } + def 'should create a conda env with a text file' () { given: @@ -355,6 +440,25 @@ class CondaCacheTest extends Specification { } + def 'should create a conda env with a text file - using micromamba' () { + + given: + def ENV = 'foo.txt' + def PREFIX = Paths.get('/conda/envs/my-env') + and: + def cache = Spy(new CondaCache(useMicromamba: true, createOptions: '--this --that')) + + when: + def result = cache.createLocalCondaEnv0(ENV, PREFIX) + then: + 1 * cache.isYamlFilePath(ENV) + 1 * cache.isTextFilePath(ENV) + 1 * cache.makeAbsolute(ENV) >> Paths.get('/usr/base').resolve(ENV) + 1 * cache.runCommand( "micromamba create --this --that --yes --quiet --prefix $PREFIX --file /usr/base/foo.txt" ) >> null + result == PREFIX + + } + def 'should get options from the config' () { when: diff --git a/modules/nextflow/src/test/groovy/nextflow/config/ConfigParserTest.groovy b/modules/nextflow/src/test/groovy/nextflow/config/ConfigParserTest.groovy index ebe6f02e47..a128f0fd36 100644 --- a/modules/nextflow/src/test/groovy/nextflow/config/ConfigParserTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/config/ConfigParserTest.groovy @@ -16,8 +16,6 @@ package nextflow.config -import spock.lang.Ignore - import java.nio.file.Files import java.nio.file.NoSuchFileException import java.nio.file.Path @@ -26,11 +24,12 @@ import com.sun.net.httpserver.Headers import com.sun.net.httpserver.HttpExchange import com.sun.net.httpserver.HttpHandler import com.sun.net.httpserver.HttpServer +import nextflow.SysEnv import nextflow.exception.ConfigParseException -import spock.lang.Specification - import nextflow.util.Duration import nextflow.util.MemoryUnit +import spock.lang.Ignore +import spock.lang.Specification /** * @@ -38,6 +37,23 @@ import nextflow.util.MemoryUnit */ class ConfigParserTest extends Specification { + def 'should get an environment variable' () { + given: + SysEnv.push(MAX_CPUS: '1') + + when: + def CONFIG = ''' + process.cpus = env('MAX_CPUS') + ''' + def config = new ConfigParser().parse(CONFIG) + + then: + config.process.cpus == '1' + + cleanup: + SysEnv.pop() + } + def 'should parse plugins id' () { given: def CONFIG = ''' diff --git a/modules/nextflow/src/test/groovy/nextflow/config/ManifestTest.groovy b/modules/nextflow/src/test/groovy/nextflow/config/ManifestTest.groovy index d189b28f28..2860cb3c3e 100644 --- a/modules/nextflow/src/test/groovy/nextflow/config/ManifestTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/config/ManifestTest.groovy @@ -16,7 +16,10 @@ package nextflow.config +import nextflow.exception.AbortOperationException import spock.lang.Specification + +import static nextflow.config.Manifest.ContributionType /** * * @author Paolo Di Tommaso @@ -26,38 +29,126 @@ class ManifestTest extends Specification { def 'should check manifest object' () { given: - def MAN = [author: 'pablo', nextflowVersion: '1.2.3', name: 'foo'] - + def MAP = [ + author: 'pablo', + contributors: [ + [ + name: 'Alice', + affiliation: 'University', + email: 'alice@university.edu', + contribution: ['author', 'maintainer'], + orcid: 'https://orcid.org/0000-0000-0000-0000' + ], + [ + name: 'Bob', + affiliation: 'Company', + email: 'bob@company.com', + contribution: ['contributor'], + ] + ], + nextflowVersion: '1.2.3', + name: 'foo', + organization: 'My Organization', + icon: 'icon.png', + docsUrl: 'https://docs.io', + license: 'Apache v2' + ] when: - def manifest = new Manifest(MAN) + def manifest = new Manifest(MAP) then: - manifest.with { - author == 'pablo' - nextflowVersion == '1.2.3' - name == 'foo' - } + manifest.author == 'pablo' + manifest.contributors.size() == 2 + manifest.contributors[0].name == 'Alice' + manifest.contributors[0].affiliation == 'University' + manifest.contributors[0].email == 'alice@university.edu' + manifest.contributors[0].contribution == [ContributionType.AUTHOR, ContributionType.MAINTAINER] as Set + manifest.contributors[0].orcid == 'https://orcid.org/0000-0000-0000-0000' + manifest.contributors[1].name == 'Bob' + manifest.contributors[1].affiliation == 'Company' + manifest.contributors[1].email == 'bob@company.com' + manifest.contributors[1].contribution == [ContributionType.CONTRIBUTOR] as Set + manifest.nextflowVersion == '1.2.3' + manifest.name == 'foo' + manifest.organization == 'My Organization' + manifest.icon == 'icon.png' + manifest.docsUrl == 'https://docs.io' + manifest.license == 'Apache v2' } def 'should check empty manifest' () { - // check empty manifest when: def manifest = new Manifest(new ConfigObject()) then: - manifest.with { - homePage == null - defaultBranch == 'master' - description == null - author == null - mainScript == 'main.nf' - gitmodules == null - nextflowVersion == null - version == null - name == null - } + manifest.homePage == null + manifest.defaultBranch == null + manifest.description == null + manifest.author == null + manifest.contributors == [] + manifest.mainScript == 'main.nf' + manifest.gitmodules == null + manifest.nextflowVersion == null + manifest.version == null + manifest.name == null + manifest.docsUrl == null + manifest.organization == null + manifest.icon == null + manifest.license == null + + } + def 'should convert manifest to map' () { + + when: + def MAP = [ + name: 'Alice', + affiliation: 'University', + email: 'alice@university.edu', + contribution: ['author', 'maintainer'], + orcid: 'https://orcid.org/0000-0000-0000-0000' + ] + then: + new Manifest.Contributor(MAP).toMap() == [ + name: 'Alice', + affiliation: 'University', + email: 'alice@university.edu', + github: null, + contribution: ['author', 'maintainer'], + orcid: 'https://orcid.org/0000-0000-0000-0000' + ] } + def 'should throw error on invalid manifest' () { + when: + def manifest = new Manifest([ + contributors: [ 'Alice' ] + ]) + manifest.contributors + then: + thrown(AbortOperationException) + + when: + manifest = new Manifest([ + contributors: [[ + name: 'Alice', + contribution: 'author' + ]] + ]) + manifest.contributors + then: + thrown(AbortOperationException) + + when: + manifest = new Manifest([ + contributors: [[ + name: 'Alice', + contribution: [ 'owner' ] + ]] + ]) + manifest.contributors + then: + thrown(AbortOperationException) + } } diff --git a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy index 896f7a71c0..ba8e1b77bb 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy @@ -36,102 +36,124 @@ class CharliecloudBuilderTest extends Specification { def path2 = Paths.get('/bar/data/file2') expect: - new CharliecloudBuilder('/cacheDir/img/busybox') + new CharliecloudBuilder('/cacheDir/busybox') .build() - .runCommand == 'ch-convert -i ch-image --storage /cacheDir busybox "$NXF_TASK_WORKDIR"/container_busybox && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_busybox --' + .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' - new CharliecloudBuilder('/cacheDir/img/busybox') + new CharliecloudBuilder('/cacheDir/busybox') + .params(writeFake: false) + .build() + .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' + + new CharliecloudBuilder('/cacheDir/busybox') + .params(writeFake: false) + .params(readOnlyInputs: true) + .build() + .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' + + new CharliecloudBuilder('/cacheDir/busybox') .params(runOptions: '-j --no-home') .build() - .runCommand == 'ch-convert -i ch-image --storage /cacheDir busybox "$NXF_TASK_WORKDIR"/container_busybox && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" -j --no-home "$NXF_TASK_WORKDIR"/container_busybox --' + .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" -j --no-home /cacheDir/busybox --' - new CharliecloudBuilder('/cacheDir/img/busybox') + new CharliecloudBuilder('/cacheDir/busybox') .params(temp: '/foo') .build() - .runCommand == 'ch-convert -i ch-image --storage /cacheDir busybox "$NXF_TASK_WORKDIR"/container_busybox && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b /foo:/tmp -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_busybox --' + .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b /foo:/tmp -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' - new CharliecloudBuilder('/cacheDir/img/busybox') + new CharliecloudBuilder('/cacheDir/busybox') .addEnv('X=1') .addEnv(ALPHA:'aaa', BETA: 'bbb') .build() - .runCommand == 'ch-convert -i ch-image --storage /cacheDir busybox "$NXF_TASK_WORKDIR"/container_busybox && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w --set-env=X=1 --set-env=ALPHA=aaa --set-env=BETA=bbb -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_busybox --' + .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake --set-env=X=1 --set-env=ALPHA=aaa --set-env=BETA=bbb -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' - new CharliecloudBuilder('/cacheDir/img/ubuntu') + new CharliecloudBuilder('/cacheDir/ubuntu') .addMount(path1) .build() - .runCommand == 'ch-convert -i ch-image --storage /cacheDir ubuntu "$NXF_TASK_WORKDIR"/container_ubuntu && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b /foo/data/file1 -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_ubuntu --' + .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b /foo/data/file1 -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu --' - new CharliecloudBuilder('/cacheDir/img/ubuntu') + new CharliecloudBuilder('/cacheDir/ubuntu') .addMount(path1) .addMount(path2) .build() - .runCommand == 'ch-convert -i ch-image --storage /cacheDir ubuntu "$NXF_TASK_WORKDIR"/container_ubuntu && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b /foo/data/file1 -b /bar/data/file2 -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_ubuntu --' + .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b /foo/data/file1 -b /bar/data/file2 -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu --' } def db_file = Paths.get('/home/db') def 'should get run command' () { when: - def cmd = new CharliecloudBuilder('/cacheDir/img/ubuntu') + def cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .build() .getRunCommand() then: - cmd == 'ch-convert -i ch-image --storage /cacheDir ubuntu "$NXF_TASK_WORKDIR"/container_ubuntu && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_ubuntu --' + cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu --' when: - cmd = new CharliecloudBuilder('/cacheDir/img/ubuntu') - .params(useSquash: 'true') + cmd = new CharliecloudBuilder('/cacheDir/ubuntu') + .params(writeFake: 'true') .build() .getRunCommand() then: - cmd == 'ch-convert -i ch-image --storage /cacheDir ubuntu "$NXF_TASK_WORKDIR"/container_ubuntu.squashfs && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_ubuntu.squashfs --' + cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu --' when: - cmd = new CharliecloudBuilder('/cacheDir/img/ubuntu') - .params(writeFake: 'true') + cmd = new CharliecloudBuilder('/cacheDir/ubuntu') + .params(writeFake: 'false') .build() .getRunCommand() then: - cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -w -b "$NXF_TASK_WORKDIR" ubuntu --' + cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu --' when: - cmd = new CharliecloudBuilder('/cacheDir/img/ubuntu') + cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .params(entry:'/bin/sh') .build() .getRunCommand('bwa --this --that file.fastq') then: - cmd == 'ch-convert -i ch-image --storage /cacheDir ubuntu "$NXF_TASK_WORKDIR"/container_ubuntu && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' + cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' when: - cmd = new CharliecloudBuilder('/cacheDir/img/ubuntu') + cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .params(entry:'/bin/sh') .params(readOnlyInputs: 'true') .build() .getRunCommand('bwa --this --that file.fastq') then: - cmd == 'ch-convert -i ch-image --storage /cacheDir ubuntu "$NXF_TASK_WORKDIR"/container_ubuntu && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' + cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' when: - cmd = new CharliecloudBuilder('/cacheDir/img/ubuntu') + cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .params(entry:'/bin/sh') .params(readOnlyInputs: 'false') .build() .getRunCommand('bwa --this --that file.fastq') then: - cmd == 'ch-convert -i ch-image --storage /cacheDir ubuntu "$NXF_TASK_WORKDIR"/container_ubuntu && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' + cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' when: - cmd = new CharliecloudBuilder('/cacheDir/img/ubuntu') + cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .params(entry:'/bin/sh') + .params(readOnlyInputs: 'false') + .params(writeFake: 'false') + .build() + .getRunCommand('bwa --this --that file.fastq') + then: + cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' + + when: + cmd = new CharliecloudBuilder('/cacheDir/ubuntu') + .params(entry:'/bin/sh') + .params(readOnlyInputs: 'true') + .params(writeFake: 'false') .addMount(db_file) .addMount(db_file) - .params(readOnlyInputs: 'true') .build().getRunCommand('bwa --this --that file.fastq') then: - cmd == 'ch-convert -i ch-image --storage /cacheDir ubuntu "$NXF_TASK_WORKDIR"/container_ubuntu && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -b /home -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' + cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -b /home -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' when: - cmd = new CharliecloudBuilder('/cacheDir/img/ubuntu') + cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .params(entry:'/bin/sh') .addMount(db_file) .addMount(db_file) @@ -139,7 +161,7 @@ class CharliecloudBuilderTest extends Specification { .build() .getRunCommand('bwa --this --that file.fastq') then: - cmd == 'ch-convert -i ch-image --storage /cacheDir ubuntu "$NXF_TASK_WORKDIR"/container_ubuntu && ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b /home/db -b "$NXF_TASK_WORKDIR" "$NXF_TASK_WORKDIR"/container_ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' + cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b /home/db -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' } @Unroll diff --git a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudCacheTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudCacheTest.groovy index 5207af9a1a..2098526d82 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudCacheTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudCacheTest.groovy @@ -47,7 +47,7 @@ class CharliecloudCacheTest extends Specification { 'foo:bar' | 'foo+bar' } - def 'should return the cache dir from the config file' () { + def 'should return the cache dir from the config file'() { given: def dir = Files.createTempDirectory('test') @@ -63,7 +63,7 @@ class CharliecloudCacheTest extends Specification { dir.deleteDir() } - def 'should return the cache dir from the environment' () { + def 'should return the cache dir from the environment'() { given: def dir = Files.createTempDirectory('test') @@ -79,6 +79,84 @@ class CharliecloudCacheTest extends Specification { dir.deleteDir() } + def 'should use CH_IMAGE_STORAGE over cacheDir'() { + + given: + def dir = Files.createTempDirectory('test') + and: + def cacheDir = dir.resolve('nxf.ch') + and: + def charliecloudCacheDir = dir.resolve('charliecloud') + + when: + def cache = new CharliecloudCache([cacheDir: "$cacheDir"] as ContainerConfig, [CH_IMAGE_STORAGE: "$charliecloudCacheDir"]) + + then: + cache.getCacheDir() == charliecloudCacheDir + + cleanup: + dir.deleteDir() + } + + def 'should use CH_IMAGE_STORAGE over NXF_CHARLIECLOUD_CACHEDIR'() { + + given: + def dir = Files.createTempDirectory('test') + and: + def cacheDir = dir.resolve('nxf.ch') + and: + def charliecloudCacheDir = dir.resolve('charliecloud') + + when: + def cache = new CharliecloudCache(GroovyMock(ContainerConfig), [NXF_CHARLIECLOUD_CACHEDIR: "$cacheDir", CH_IMAGE_STORAGE: "$charliecloudCacheDir"]) + + then: + cache.getCacheDir() == charliecloudCacheDir + + cleanup: + dir.deleteDir() + } + + def 'should throw exception: cacheDir and CH_IMAGE_STORAGE are the same'() { + + given: + def dir = Files.createTempDirectory('test') + and: + def cacheDir = dir.resolve('nxf.ch') + + when: + def cache = new CharliecloudCache([cacheDir: "$cacheDir", writeFake: 'false'] as ContainerConfig, [CH_IMAGE_STORAGE: "$cacheDir"]) + and: + cache.getCacheDir() + + then: + def e = thrown(Exception) + e.message == "`charliecloud.cacheDir` configuration parameter must be different from env variable `CH_IMAGE_STORAGE`" + + cleanup: + dir.deleteDir() + } + + def 'should throw exception: NXF_CHARLIECLOUD_CACHEDIR and CH_IMAGE_STORAGE are the same'() { + + given: + def dir = Files.createTempDirectory('test') + and: + def cacheDir = dir.resolve('nxf.ch') + + when: + def cache = new CharliecloudCache([writeFake: 'false'] as ContainerConfig, [ NXF_CHARLIECLOUD_CACHEDIR: "$cacheDir", CH_IMAGE_STORAGE: "$cacheDir" ]) + and: + cache.getCacheDir() + + then: + def e = thrown(Exception) + e.message == "`NXF_CHARLIECLOUD_CACHEDIR` env variable must be different from env variable `CH_IMAGE_STORAGE`" + + cleanup: + dir.deleteDir() + } + def 'should run ch-image pull command'() { given: @@ -131,7 +209,7 @@ class CharliecloudCacheTest extends Specification { @Ignore @Timeout(1) - def 'should pull a charliecloud image' () { + def 'should pull a charliecloud image'() { given: def IMAGE = 'busybox:latest' diff --git a/modules/nextflow/src/test/groovy/nextflow/container/inspect/ContainersInspectorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/inspect/ContainersInspectorTest.groovy index 6cba96dd92..2da436e431 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/inspect/ContainersInspectorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/inspect/ContainersInspectorTest.groovy @@ -50,7 +50,7 @@ class ContainersInspectorTest extends Specification { ] when: - def observer = new ContainersInspector(dag) + def observer = new ContainersInspector(dag, false) then: observer.getContainers() == [ 'proc1': 'container1', @@ -67,20 +67,20 @@ class ContainersInspectorTest extends Specification { ] when: - def result = new ContainersInspector(dag) + def result = new ContainersInspector(dag, false) .withFormat('json') .renderContainers() then: result == '''\ { "processes": [ - { - "name": "proc2", - "container": "container2" - }, { "name": "proc1", "container": "container1" + }, + { + "name": "proc2", + "container": "container2" } ] } @@ -96,13 +96,13 @@ class ContainersInspectorTest extends Specification { ] when: - def result = new ContainersInspector(dag) + def result = new ContainersInspector(dag,false) .withFormat('config') .renderContainers() then: result == '''\ - process { withName: 'proc2' { container = 'container2' } } process { withName: 'proc1' { container = 'container1' } } + process { withName: 'proc2' { container = 'container2' } } '''.stripIndent(true) } diff --git a/modules/nextflow/src/test/groovy/nextflow/datasource/SraExplorerTest.groovy b/modules/nextflow/src/test/groovy/nextflow/datasource/SraExplorerTest.groovy index d251873a3a..acad4be7cb 100644 --- a/modules/nextflow/src/test/groovy/nextflow/datasource/SraExplorerTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/datasource/SraExplorerTest.groovy @@ -16,6 +16,8 @@ package nextflow.datasource +import dev.failsafe.FailsafeException + import java.nio.file.Files import java.nio.file.Path @@ -242,4 +244,35 @@ class SraExplorerTest extends Specification { result == '1bc' } + def 'should detect retry errors' () { + given: + def ex = new IOException("Server returned HTTP response code: " + ERROR +" for URL: https://dummy.url") + + expect: + SraExplorer.containsErrorCodes(ex.getLocalizedMessage(), SraExplorer.RETRY_CODES) == EXPECTED + + where: + ERROR | EXPECTED + '404' | false + '429' | true + + } + def 'should retry on errors' () { + given: + def ex = new IOException("Server returned HTTP response code: 429 for URL: https://dummy.url") + def slurper = new SraExplorer(null, [retryPolicy: [maxAttempts: 2]]) + def retries = 0 + + when: + slurper.runWithRetry{ + retries ++ + throw ex + } + + then: + def e = thrown(FailsafeException) + e.cause.message == ex.message + retries == 2 + } + } diff --git a/modules/nextflow/src/test/groovy/nextflow/datasource/SraRetryConfigTest.groovy b/modules/nextflow/src/test/groovy/nextflow/datasource/SraRetryConfigTest.groovy new file mode 100644 index 0000000000..61fc9c1339 --- /dev/null +++ b/modules/nextflow/src/test/groovy/nextflow/datasource/SraRetryConfigTest.groovy @@ -0,0 +1,44 @@ +/* + * Copyright 2013-2024, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package nextflow.datasource + +import nextflow.util.Duration +import spock.lang.Specification + +/** + * + * @author Jorge Ejarque + */ +class SraRetryConfigTest extends Specification { + + def 'should create retry config'() { + + expect: + new SraRetryConfig().delay == Duration.of('500ms') + new SraRetryConfig().maxDelay == Duration.of('30s') + new SraRetryConfig().maxAttempts == 3 + new SraRetryConfig().jitter == 0.25d + + and: + new SraRetryConfig([maxAttempts: 20]).maxAttempts == 20 + new SraRetryConfig([delay: '1s']).delay == Duration.of('1s') + new SraRetryConfig([maxDelay: '1m']).maxDelay == Duration.of('1m') + new SraRetryConfig([jitter: '0.5']).jitter == 0.5d + + } +} diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/AbstractGridExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/AbstractGridExecutorTest.groovy index 244f26c359..8269e89077 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/AbstractGridExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/AbstractGridExecutorTest.groovy @@ -32,11 +32,11 @@ class AbstractGridExecutorTest extends Specification { def 'should remove invalid chars from name' () { given: - def task = new TaskRun(name: 'task 90 (foo:bar/baz)') + def task = new TaskRun(name: 'task 90 = (foo:bar/baz)') def exec = [:] as AbstractGridExecutor expect: - exec.getJobNameFor(task) == 'nf-task_90_(foo_bar_baz)' + exec.getJobNameFor(task) == 'nf-task_90___(foo_bar_baz)' } diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/BashWrapperBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/BashWrapperBuilderTest.groovy index e301a376c8..3b93c4c360 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/BashWrapperBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/BashWrapperBuilderTest.groovy @@ -559,7 +559,7 @@ class BashWrapperBuilderTest extends Specification { binding.unstage_outputs == '''\ IFS=$'\\n' for name in $(eval "ls -1d test.bam test.bai" | sort | uniq); do - nxf_fs_copy "$name" /work/dir || true + nxf_fs_copy "$name" /work/dir done unset IFS '''.stripIndent().rightTrim() @@ -576,7 +576,7 @@ class BashWrapperBuilderTest extends Specification { binding.unstage_outputs == '''\ IFS=$'\\n' for name in $(eval "ls -1d test.bam test.bai" | sort | uniq); do - nxf_fs_move "$name" /another/dir || true + nxf_fs_move "$name" /another/dir done unset IFS '''.stripIndent().rightTrim() @@ -781,7 +781,24 @@ class BashWrapperBuilderTest extends Specification { # conda environment source $(conda info --json | awk '/conda_prefix/ { gsub(/"|,/, "", $2); print $2 }')/bin/activate /some/conda/env/foo '''.stripIndent() + } + + def 'should create micromamba activate snippet' () { + when: + def binding = newBashWrapperBuilder().makeBinding() + then: + binding.conda_activate == null + binding.containsKey('conda_activate') + + when: + def CONDA = Paths.get('/some/conda/env/foo') + binding = newBashWrapperBuilder([condaEnv: CONDA, 'useMicromamba': true]).makeBinding() + then: + binding.conda_activate == '''\ + # conda environment + eval "$(micromamba shell hook --shell bash)" && micromamba activate /some/conda/env/foo + '''.stripIndent() } def 'should create spack activate snippet' () { @@ -1320,6 +1337,46 @@ class BashWrapperBuilderTest extends Specification { } + def 'should get unstage control script'(){ + given: + BashWrapperBuilder builder + when: + builder = newBashWrapperBuilder() + then: + builder.getUnstageControls() == '''\ + cp .command.out /work/dir/.command.out || true + cp .command.err /work/dir/.command.err || true + '''.stripIndent() + + + when: + builder = newBashWrapperBuilder(statsEnabled: true) + then: + builder.getUnstageControls() == '''\ + cp .command.out /work/dir/.command.out || true + cp .command.err /work/dir/.command.err || true + cp .command.trace /work/dir/.command.trace || true + '''.stripIndent() + + when: + builder = newBashWrapperBuilder(outputEnvNames: ['some-data']) + then: + builder.getUnstageControls() == '''\ + cp .command.out /work/dir/.command.out || true + cp .command.err /work/dir/.command.err || true + cp .command.env /work/dir/.command.env || true + '''.stripIndent() + + when: + builder = newBashWrapperBuilder(outputEvals: [some:'data']) + then: + builder.getUnstageControls() == '''\ + cp .command.out /work/dir/.command.out || true + cp .command.err /work/dir/.command.err || true + cp .command.env /work/dir/.command.env || true + '''.stripIndent() + } + def 'should create wrapper with podman' () { when: diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/FluxExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/FluxExecutorTest.groovy index 3cc3a73396..95a90e65bc 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/FluxExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/FluxExecutorTest.groovy @@ -72,27 +72,27 @@ class FluxExecutorTest extends Specification { then: // Flux doesn't have script headers executor.getHeaders(task) == '' - executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'mini', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '--output=/work/path/.command.log', '--queue=delta', '/bin/bash', 'job.sh'] + executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '--output=/work/path/.command.log', '--queue=delta', '/bin/bash', 'job.sh'] when: task.config = new TaskConfig() task.config.time = '1m' then: - executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'mini', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '--output=/work/path/.command.log', '--time-limit=01', '/bin/bash', 'job.sh'] + executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '--output=/work/path/.command.log', '--time-limit=01', '/bin/bash', 'job.sh'] when: task.config = new TaskConfig() task.config.time = '1h' task.config.clusterOptions = '--tasks-per-node=4' then: - executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'mini', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '--output=/work/path/.command.log', '--time-limit=60', '--tasks-per-node=4', '/bin/bash', 'job.sh'] + executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '--output=/work/path/.command.log', '--time-limit=60', '--tasks-per-node=4', '/bin/bash', 'job.sh'] when: task.config = new TaskConfig() task.config.time = '1h' task.config.clusterOptions = '--tasks-per-node=4 --cpus-per-node=4' then: - executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'mini', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '--output=/work/path/.command.log', '--time-limit=60', '--tasks-per-node=4', '--cpus-per-node=4', '/bin/bash', 'job.sh'] + executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '--output=/work/path/.command.log', '--time-limit=60', '--tasks-per-node=4', '--cpus-per-node=4', '/bin/bash', 'job.sh'] } @@ -111,9 +111,9 @@ class FluxExecutorTest extends Specification { task.name = 'my task' task.workDir = Paths.get('/work/path') task.config = new TaskConfig() - + expect: - executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'mini', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '/bin/bash', 'job.sh'] + executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'submit', '--setattr=cwd=/work/path', '--job-name="nf-my_task"', '/bin/bash', 'job.sh'] } @@ -135,7 +135,7 @@ class FluxExecutorTest extends Specification { task.index = 21 task.config = new TaskConfig() then: - executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'mini', 'submit', '--setattr=cwd="/work/some\\ data/path"', '--job-name="nf-my_task"', '--output="/work/some\\ data/path/.command.log"', '/bin/bash', 'job.sh'] + executor.getSubmitCommandLine(task, Paths.get('/some/path/job.sh')) == ['flux', 'submit', '--setattr=cwd="/work/some\\ data/path"', '--job-name="nf-my_task"', '--output="/work/some\\ data/path/.command.log"', '/bin/bash', 'job.sh'] } @@ -160,8 +160,8 @@ class FluxExecutorTest extends Specification { result['ƒ6upwy2MY3'] == AbstractGridExecutor.QueueStatus.RUNNING result['ƒ6upcbFjvf'] == AbstractGridExecutor.QueueStatus.HOLD result['ƒ6uon2RGVV'] == AbstractGridExecutor.QueueStatus.PENDING - result['ƒ6upwy2MY4'] == AbstractGridExecutor.QueueStatus.DONE - result['ƒ6upcbFjvh'] == AbstractGridExecutor.QueueStatus.DONE + result['ƒ6upwy2MY4'] == AbstractGridExecutor.QueueStatus.DONE + result['ƒ6upcbFjvh'] == AbstractGridExecutor.QueueStatus.DONE } def testQueueStatusCommand() { diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/PbsExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/PbsExecutorTest.groovy index b279c7eee2..174a7b3b12 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/PbsExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/PbsExecutorTest.groovy @@ -172,7 +172,7 @@ class PbsExecutorTest extends Specification { } then: executor.getHeaders(taskArray) == ''' - #PBS -J 0-4 + #PBS -t 0-4 #PBS -N nf-task_name #PBS -o /dev/null #PBS -j oe @@ -321,7 +321,7 @@ class PbsExecutorTest extends Specification { given: def executor = Spy(PbsExecutor) expect: - executor.getArrayIndexName() == 'PBS_ARRAY_INDEX' + executor.getArrayIndexName() == 'PBS_ARRAYID' executor.getArrayIndexStart() == 0 } diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/SimpleFileCopyStrategyTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/SimpleFileCopyStrategyTest.groovy index 29cbb35697..6361e4f394 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/SimpleFileCopyStrategyTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/SimpleFileCopyStrategyTest.groovy @@ -270,7 +270,7 @@ class SimpleFileCopyStrategyTest extends Specification { script == ''' IFS=$'\\n' for name in $(eval "ls -1d simple.txt my/path/file.bam" | sort | uniq); do - nxf_fs_copy "$name" /target/work\\ dir || true + nxf_fs_copy "$name" /target/work\\ dir done unset IFS ''' @@ -293,7 +293,7 @@ class SimpleFileCopyStrategyTest extends Specification { script == ''' IFS=$'\\n' for name in $(eval "ls -1d simple.txt my/path/file.bam" | sort | uniq); do - nxf_fs_move "$name" /target/store || true + nxf_fs_move "$name" /target/store done unset IFS ''' @@ -315,7 +315,7 @@ class SimpleFileCopyStrategyTest extends Specification { script == ''' IFS=$'\\n' for name in $(eval "ls -1d simple.txt my/path/file.bam" | sort | uniq); do - nxf_fs_rsync "$name" /target/work\\'s || true + nxf_fs_rsync "$name" /target/work\\'s done unset IFS ''' diff --git a/modules/nextflow/src/test/groovy/nextflow/extension/JoinOpTest.groovy b/modules/nextflow/src/test/groovy/nextflow/extension/JoinOpTest.groovy index 9893d9a797..673686fdf9 100644 --- a/modules/nextflow/src/test/groovy/nextflow/extension/JoinOpTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/extension/JoinOpTest.groovy @@ -21,7 +21,9 @@ import nextflow.Channel import nextflow.Global import nextflow.Session import nextflow.exception.AbortOperationException +import nextflow.util.ArrayBag import spock.lang.Specification + /** * * @author Paolo Di Tommaso @@ -207,6 +209,35 @@ class JoinOpTest extends Specification { } + def 'should be able to use identical ArrayBags join key' () { + given: + def key1 = new ArrayBag(["key"]) + def key2 = new ArrayBag(["key"]) + def ch1 = Channel.of([key1, "foo"]) + def ch2 = Channel.of([key2, "bar"]) + + when: + def op = new JoinOp(ch1 as DataflowReadChannel, ch2 as DataflowReadChannel) + List result = op.apply().toList().getVal() + + then: + !result.isEmpty() + } + + def 'should differentiate nonidentical ArrayBags join key' () { + given: + def key1 = new ArrayBag(["key", "key", "quay"]) + def key2 = new ArrayBag(["quay", "quay", "key"]) + def ch1 = Channel.of([key1, "foo"]) + def ch2 = Channel.of([key2, "bar"]) + + when: + def op = new JoinOp(ch1 as DataflowReadChannel, ch2 as DataflowReadChannel) + List result = op.apply().toList().getVal() + + then: + result.isEmpty() + } def 'should not fail on mismatches' () { given: diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/LocalPollingMonitorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/LocalPollingMonitorTest.groovy index 54110fd475..e6f6150ce5 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/LocalPollingMonitorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/LocalPollingMonitorTest.groovy @@ -96,6 +96,7 @@ class LocalPollingMonitorTest extends Specification { def handler = Mock(TaskHandler) handler.getTask() >> { task } handler.canForkProcess() >> true + handler.isReady() >> true expect: monitor.canSubmit(handler) == true @@ -141,6 +142,7 @@ class LocalPollingMonitorTest extends Specification { def handler = Mock(TaskHandler) handler.getTask() >> { task } handler.canForkProcess() >> true + handler.isReady() >> true expect: monitor.canSubmit(handler) == true diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/ParallelPollingMonitorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/ParallelPollingMonitorTest.groovy index 5fc43aee69..76701ec5e6 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/ParallelPollingMonitorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/ParallelPollingMonitorTest.groovy @@ -104,6 +104,7 @@ class ParallelPollingMonitorTest extends Specification { def result = mon.canSubmit(handler) then: handler.canForkProcess() >> FORK + handler.isReady() >> true and: result == EXPECTED diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/TaskBeanTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/TaskBeanTest.groovy index c576bef6bb..4cfcb2a1fd 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/TaskBeanTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/TaskBeanTest.groovy @@ -19,6 +19,7 @@ package nextflow.processor import java.nio.file.Paths import nextflow.Session +import nextflow.conda.CondaConfig import nextflow.container.ContainerConfig import nextflow.executor.Executor import nextflow.script.ProcessConfig @@ -69,6 +70,7 @@ class TaskBeanTest extends Specification { task.getEnvironment() >> [alpha: 'one', beta: 'xxx', gamma: 'yyy'] task.getContainer() >> 'busybox:latest' task.getContainerConfig() >> [docker: true, registry: 'x'] + task.getCondaConfig() >> new CondaConfig([useMicromamba:true], [:]) when: def bean = new TaskBean(task) @@ -99,6 +101,8 @@ class TaskBeanTest extends Specification { bean.stageInMode == 'link' bean.stageOutMode == 'rsync' + bean.useMicromamba == true + } def 'should clone task bean' () { diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/TaskConfigTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/TaskConfigTest.groovy index a51d0fc15c..e2a09b1fdf 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/TaskConfigTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/TaskConfigTest.groovy @@ -157,8 +157,8 @@ class TaskConfigTest extends Specification { where: value | expected - null | 0 - 0 | 0 + null | 1 + 0 | 1 1 | 1 '3' | 3 10 | 10 @@ -171,8 +171,8 @@ class TaskConfigTest extends Specification { when: config = new TaskConfig() then: - config.maxRetries == 0 - config.getMaxRetries() == 0 + config.maxRetries == 1 + config.getMaxRetries() == 1 config.getErrorStrategy() == ErrorStrategy.TERMINATE when: diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/TaskHandlerTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/TaskHandlerTest.groovy index df6b065bc9..5bea39a948 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/TaskHandlerTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/TaskHandlerTest.groovy @@ -249,4 +249,18 @@ class TaskHandlerTest extends Specification { TaskStatus.RUNNING | false | false | true | true | false TaskStatus.COMPLETED| false | false | false | false | true } + + @Unroll + def 'should include the tower prefix'() { + given: + def name = 'job_1' + + expect: + TaskHandler.prependWorkflowPrefix(name, ENV) == EXPECTED + + where: + ENV | EXPECTED + [:] | "job_1" + [TOWER_WORKFLOW_ID: '1234'] | "tw-1234-job_1" + } } diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/TaskRunTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/TaskRunTest.groovy index 9f0cd50875..6f1e988899 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/TaskRunTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/TaskRunTest.groovy @@ -402,7 +402,6 @@ class TaskRunTest extends Specification { } def 'should resolve the task body script' () { - given: def task = new TaskRun() task.processor = [:] as TaskProcessor @@ -412,7 +411,7 @@ class TaskRunTest extends Specification { * plain task script */ when: - task.resolve(new BodyDef({-> 'Hello'}, 'Hello', 'script')) + task.resolveBody(new BodyDef({-> 'Hello'}, 'Hello', 'script')) then: task.script == 'Hello' task.source == 'Hello' @@ -422,7 +421,7 @@ class TaskRunTest extends Specification { */ when: task.context = new TaskContext(Mock(Script),[x: 'world'],'foo') - task.resolve(new BodyDef({-> "Hello ${x}"}, 'Hello ${x}', 'script')) + task.resolveBody(new BodyDef({-> "Hello ${x}"}, 'Hello ${x}', 'script')) then: task.script == 'Hello world' task.source == 'Hello ${x}' @@ -448,7 +447,7 @@ class TaskRunTest extends Specification { */ when: task.context = new TaskContext(Mock(Script),VARS,'foo') - task.resolve(new BodyDef(body, 'cat ${one}\nhead ${many}', 'script')) + task.resolveBody(new BodyDef(body, 'cat ${one}\nhead ${many}', 'script')) then: task.script == ''' cat a\\ b.txt @@ -481,7 +480,7 @@ class TaskRunTest extends Specification { when: task.context = new TaskContext(script,local,'foo') task.config = new TaskConfig().setContext(task.context) - task.resolve(new BodyDef({-> '$BASH_VAR !{nxf_var} - !{params.var_no}'}, '', 'shell')) // <-- note: 'shell' type + task.resolveBody(new BodyDef({-> '$BASH_VAR !{nxf_var} - !{params.var_no}'}, '', 'shell')) // <-- note: 'shell' type then: task.script == '$BASH_VAR YES - NO' task.source == '' @@ -493,7 +492,7 @@ class TaskRunTest extends Specification { task.context = new TaskContext(Mock(Script),[nxf_var: '>interpolated value<'],'foo') task.config = new TaskConfig().setContext(task.context) task.config.placeholder = '#' as char - task.resolve(new BodyDef({-> '$BASH_VAR #{nxf_var}'}, '$BASH_VAR #{nxf_var}', 'shell')) // <-- note: 'shell' type + task.resolveBody(new BodyDef({-> '$BASH_VAR #{nxf_var}'}, '$BASH_VAR #{nxf_var}', 'shell')) // <-- note: 'shell' type then: task.script == '$BASH_VAR >interpolated value<' task.source == '$BASH_VAR #{nxf_var}' @@ -517,7 +516,7 @@ class TaskRunTest extends Specification { task.config = new TaskConfig().setContext(task.context) when: - task.resolve( new BodyDef({-> template(my_file)}, 'template($file)', 'script')) + task.resolveBody( new BodyDef({-> template(my_file)}, 'template($file)', 'script')) then: task.script == 'echo Ciao mondo' task.source == 'echo ${say_hello}' @@ -542,7 +541,7 @@ class TaskRunTest extends Specification { task.config = new TaskConfig().setContext(task.context) when: - task.resolve( new BodyDef({-> template(my_file)}, 'template($file)', 'shell')) + task.resolveBody( new BodyDef({-> template(my_file)}, 'template($file)', 'shell')) then: task.script == 'echo $HOME ~ Foo bar' task.source == 'echo $HOME ~ !{user_name}' @@ -568,7 +567,7 @@ class TaskRunTest extends Specification { task.config.placeholder = '#' as char when: - task.resolve( new BodyDef({-> template(my_file)}, 'template($file)', 'shell')) + task.resolveBody( new BodyDef({-> template(my_file)}, 'template($file)', 'shell')) then: task.script == 'echo $HOME ~ Foo bar' task.source == 'echo $HOME ~ #{user_name}' @@ -750,7 +749,7 @@ class TaskRunTest extends Specification { task.context = GroovyMock(TaskContext) when: - task.resolve(new BodyDef({-> "Hello ${x}"}, 'Hello ${x}', 'script')) + task.resolveBody(new BodyDef({-> "Hello ${x}"}, 'Hello ${x}', 'script')) and: def vars = task.getVariableNames() then: @@ -769,7 +768,7 @@ class TaskRunTest extends Specification { task.config = new TaskConfig() when: - task.resolve(new BodyDef({-> 'Hello !{foo} !{bar} !{input_x}'}, 'Hello..', 'shell')) + task.resolveBody(new BodyDef({-> 'Hello !{foo} !{bar} !{input_x}'}, 'Hello..', 'shell')) and: def vars = task.getVariableNames() then: @@ -793,7 +792,7 @@ class TaskRunTest extends Specification { task.config = new TaskConfig() when: - task.resolve(new BodyDef({-> template }, 'Hello..', 'script')) + task.resolveBody(new BodyDef({-> template }, 'Hello..', 'script')) and: def vars = task.getVariableNames() then: @@ -822,7 +821,7 @@ class TaskRunTest extends Specification { * plain task script */ when: - task.resolve(dryRun) + task.resolveStub(dryRun) then: task.script == 'echo Hello world' task.source == 'command source' @@ -891,4 +890,55 @@ class TaskRunTest extends Specification { expect: !task.isArray() } + + def 'should resolve task body' () { + given: + def task = Spy(TaskRun) + task.processor = Mock(TaskProcessor) { + getSession()>>Mock(Session) { getStubRun() >> false} + } + and: + def body = Mock(BodyDef) + + when: + task.resolve(body) + then: + 1 * task.resolveBody(body) >> null + 0 * task.resolveStub(_) >> null + } + + def 'should resolve task body when no stub' () { + given: + def task = Spy(TaskRun) + task.processor = Mock(TaskProcessor) { + getSession()>>Mock(Session) { getStubRun() >> true} + } + task.config = Mock(TaskConfig) { getStubBlock()>> null } + and: + def body = Mock(BodyDef) + + when: + task.resolve(body) + then: + 1 * task.resolveBody(body) >> null + 0 * task.resolveStub(_) >> null + } + + def 'should resolve task stub' () { + given: + def body = Mock(BodyDef) + def stub = Mock(TaskClosure) + and: + def task = Spy(TaskRun) + task.config = Mock(TaskConfig) { getStubBlock()>>stub } + task.processor = Mock(TaskProcessor) { + getSession()>>Mock(Session) { getStubRun() >> true} + } + + when: + task.resolve(body) + then: + 1 * task.resolveStub(stub) >> null + 0 * task.resolveBody(_) >> null + } } diff --git a/modules/nextflow/src/test/groovy/nextflow/scm/AssetManagerTest.groovy b/modules/nextflow/src/test/groovy/nextflow/scm/AssetManagerTest.groovy index aa8bf1467c..ac59065c25 100644 --- a/modules/nextflow/src/test/groovy/nextflow/scm/AssetManagerTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/scm/AssetManagerTest.groovy @@ -414,7 +414,7 @@ class AssetManagerTest extends Specification { then: holder.getMainScriptName() == 'main.nf' holder.getHomePage() == 'https://github.com/foo/bar' - holder.manifest.getDefaultBranch() == 'master' + holder.manifest.getDefaultBranch() == null holder.manifest.getDescription() == null } @@ -601,4 +601,54 @@ class AssetManagerTest extends Specification { noExceptionThrown() } + @Requires({System.getenv('NXF_GITHUB_ACCESS_TOKEN')}) + def 'should identify default branch when downloading repo'() { + + given: + def folder = tempDir.getRoot() + def token = System.getenv('NXF_GITHUB_ACCESS_TOKEN') + def manager = new AssetManager().build('nextflow-io/socks', [providers: [github: [auth: token]]]) + + when: + // simulate calling `nextflow run nextflow-io/socks` without specifying a revision + manager.download() + manager.checkout(null) + then: + folder.resolve('nextflow-io/socks/.git').isDirectory() + manager.getCurrentRevision() == 'main' + + when: + manager.download() + then: + noExceptionThrown() + } + + @Requires({System.getenv('NXF_GITHUB_ACCESS_TOKEN')}) + def 'can filter remote branches'() { + given: + def folder = tempDir.getRoot() + def token = System.getenv('NXF_GITHUB_ACCESS_TOKEN') + def manager = new AssetManager().build('nextflow-io/hello', [providers: [github: [auth: token]]]) + manager.download() + def branches = manager.getBranchList() + + when: + def remote_head = branches.find { it.name == 'refs/remotes/origin/HEAD' } + then: + remote_head != null + !AssetManager.isRemoteBranch(remote_head) + + when: + def remote_master = branches.find { it.name == 'refs/remotes/origin/master' } + then: + remote_master != null + AssetManager.isRemoteBranch(remote_master) + + when: + def local_master = branches.find { it.name == 'refs/heads/master' } + then: + local_master != null + !AssetManager.isRemoteBranch(local_master) + } + } diff --git a/modules/nextflow/src/test/groovy/nextflow/scm/AzureRepositoryProviderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/scm/AzureRepositoryProviderTest.groovy index 00e7cc74f0..decec42c1d 100644 --- a/modules/nextflow/src/test/groovy/nextflow/scm/AzureRepositoryProviderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/scm/AzureRepositoryProviderTest.groovy @@ -80,7 +80,7 @@ class AzureRepositoryProviderTest extends Specification { def obj = new ProviderConfig('azurerepos', config.providers.azurerepos as ConfigObject) expect: - new AzureRepositoryProvider('ORGANIZATION/PROJECT/hello', obj).getRepositoryUrl() == 'https://dev.azure.com/ORGANIZATION/PROJECT' + new AzureRepositoryProvider('ORGANIZATION/PROJECT/hello', obj).getRepositoryUrl() == 'https://dev.azure.com/ORGANIZATION/PROJECT/hello' } diff --git a/modules/nextflow/src/test/groovy/nextflow/script/OutputDslTest.groovy b/modules/nextflow/src/test/groovy/nextflow/script/OutputDslTest.groovy index 3828ae5fc4..265672fd3f 100644 --- a/modules/nextflow/src/test/groovy/nextflow/script/OutputDslTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/script/OutputDslTest.groovy @@ -18,15 +18,23 @@ class OutputDslTest extends Specification { def 'should publish workflow outputs'() { given: def root = Files.createTempDirectory('test') + def outputDir = root.resolve('results') def workDir = root.resolve('work') def work1 = workDir.resolve('ab/1234'); Files.createDirectories(work1) def work2 = workDir.resolve('cd/5678'); Files.createDirectories(work2) def file1 = work1.resolve('file1.txt'); file1.text = 'Hello' def file2 = work2.resolve('file2.txt'); file2.text = 'world' - def target = root.resolve('results') and: def session = Mock(Session) { - getConfig() >> [:] + getConfig() >> [ + workflow: [ + output: [ + mode: 'symlink', + overwrite: true + ] + ] + ] + getOutputDir() >> outputDir getWorkDir() >> workDir } Global.session = session @@ -48,9 +56,6 @@ class OutputDslTest extends Specification { SysEnv.push(NXF_FILE_ROOT: root.toString()) when: - dsl.directory('results') - dsl.mode('symlink') - dsl.overwrite(true) dsl.target('bar') { path('barbar') index { @@ -67,74 +72,69 @@ class OutputDslTest extends Specification { } then: - target.resolve('foo/file1.txt').text == 'Hello' - target.resolve('barbar/file2.txt').text == 'world' - target.resolve('barbar/index.csv').text == """\ - "file2","${target}/barbar/file2.txt" + outputDir.resolve('foo/file1.txt').text == 'Hello' + outputDir.resolve('barbar/file2.txt').text == 'world' + outputDir.resolve('barbar/index.csv').text == """\ + "file2","${outputDir}/barbar/file2.txt" """.stripIndent() and: - 1 * session.notifyFilePublish(target.resolve('foo/file1.txt'), file1) - 1 * session.notifyFilePublish(target.resolve('barbar/file2.txt'), file2) - 1 * session.notifyFilePublish(target.resolve('barbar/index.csv')) + 1 * session.notifyFilePublish(outputDir.resolve('foo/file1.txt'), file1) + 1 * session.notifyFilePublish(outputDir.resolve('barbar/file2.txt'), file2) + 1 * session.notifyFilePublish(outputDir.resolve('barbar/index.csv')) cleanup: SysEnv.pop() root?.deleteDir() } - def 'should set options' () { + def 'should set target dsl' () { when: - def dsl1 = new OutputDsl() + def dsl1 = new OutputDsl.TargetDsl() then: - dsl1.@defaults == [:] + dsl1.getOptions() == [:] when: - def dsl2 = new OutputDsl() + def dsl2 = new OutputDsl.TargetDsl() and: dsl2.contentType('simple/text') + dsl2.enabled(true) dsl2.ignoreErrors(true) dsl2.mode('someMode') dsl2.overwrite(true) dsl2.storageClass('someClass') dsl2.tags([foo:'1',bar:'2']) - dsl2.enabled(true) then: - dsl2.@defaults == [ + dsl2.getOptions() == [ contentType:'simple/text', + enabled: true, ignoreErrors: true, mode: 'someMode', overwrite: true, storageClass: 'someClass', - tags: [foo:'1',bar:'2'], - enabled: true + tags: [foo:'1',bar:'2'] ] } - def 'should set target dsl' () { + def 'should set index dsl' () { when: - def dsl1 = new OutputDsl.TargetDsl() + def dsl1 = new OutputDsl.IndexDsl() then: dsl1.getOptions() == [:] when: - def dsl2 = new OutputDsl.TargetDsl() + def dsl2 = new OutputDsl.IndexDsl() + def mapper = { v -> v } and: - dsl2.contentType('simple/text') - dsl2.ignoreErrors(true) - dsl2.mode('someMode') - dsl2.overwrite(true) - dsl2.storageClass('someClass') - dsl2.tags([foo:'1',bar:'2']) - dsl2.enabled(true) + dsl2.header(true) + dsl2.mapper(mapper) + dsl2.path('path') + dsl2.sep(',') then: dsl2.getOptions() == [ - contentType:'simple/text', - ignoreErrors: true, - mode: 'someMode', - overwrite: true, - storageClass: 'someClass', - tags: [foo:'1',bar:'2'], - enabled: true + header: true, + mapper: mapper, + path: 'path', + sep: ',' ] } diff --git a/modules/nextflow/src/test/groovy/nextflow/script/ProcessConfigTest.groovy b/modules/nextflow/src/test/groovy/nextflow/script/ProcessConfigTest.groovy index bad0784768..59dd323c7f 100644 --- a/modules/nextflow/src/test/groovy/nextflow/script/ProcessConfigTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/script/ProcessConfigTest.groovy @@ -47,7 +47,7 @@ class ProcessConfigTest extends Specification { expect: config.shell == ['/bin/bash','-ue'] config.cacheable - config.maxRetries == 0 + config.maxRetries == 1 config.maxErrors == -1 config.errorStrategy == ErrorStrategy.TERMINATE } diff --git a/modules/nextflow/src/test/groovy/nextflow/script/ScriptBindingTest.groovy b/modules/nextflow/src/test/groovy/nextflow/script/ScriptBindingTest.groovy index e65f0c10ac..59ca5c6ecc 100644 --- a/modules/nextflow/src/test/groovy/nextflow/script/ScriptBindingTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/script/ScriptBindingTest.groovy @@ -75,65 +75,30 @@ class ScriptBindingTest extends Specification { } - def 'should convert hyphen separated string to camel case' () { - - expect: - ScriptBinding.ParamsMap.hyphenToCamelCase('a') == 'a' - ScriptBinding.ParamsMap.hyphenToCamelCase('A') == 'A' - ScriptBinding.ParamsMap.hyphenToCamelCase('a-b-c-') == 'aBC' - ScriptBinding.ParamsMap.hyphenToCamelCase('aa-bb-cc') == 'aaBbCc' - ScriptBinding.ParamsMap.hyphenToCamelCase('alpha-beta-delta') == 'alphaBetaDelta' - ScriptBinding.ParamsMap.hyphenToCamelCase('Alpha-Beta-delta') == 'AlphaBetaDelta' - - } - - def 'should convert camel case string to hyphen separated' () { - - expect: - ScriptBinding.ParamsMap.camelCaseToHyphen('alphaBetaDelta') == 'alpha-beta-delta' - ScriptBinding.ParamsMap.camelCaseToHyphen('AlphaBetaDelta') == 'Alpha-beta-delta' - ScriptBinding.ParamsMap.camelCaseToHyphen('Field1') == 'Field1' - ScriptBinding.ParamsMap.camelCaseToHyphen('FieldUno') == 'Field-uno' - ScriptBinding.ParamsMap.camelCaseToHyphen('FieldUNO') == 'Field-UNO' - ScriptBinding.ParamsMap.camelCaseToHyphen('FieldA') == 'Field-A' - ScriptBinding.ParamsMap.camelCaseToHyphen('FieldAB') == 'Field-AB' - ScriptBinding.ParamsMap.camelCaseToHyphen('FieldAb') == 'Field-ab' - - } - def 'should put an entry in the params map' () { when: def map = new ScriptBinding.ParamsMap() map['alphaBeta'] = 1 map['alphaBeta'] = 2 - map['alpha-beta'] = 3 - then: map['alphaBeta'] == 1 - map['alpha-beta'] == 1 - when: map = new ScriptBinding.ParamsMap() - map['aaa-bbb-ccc'] = 1 map['aaaBbbCcc'] = 10 map['AaaBbbCcc'] = 20 - then: - map['aaaBbbCcc'] == 1 - map['aaa-bbb-ccc'] == 1 - + map['aaaBbbCcc'] == 10 + map['AaaBbbCcc'] == 20 when: map = new ScriptBinding.ParamsMap() map['field1'] = 1 map['field2'] = 2 map['Field2'] = 3 - then: map['field1'] == 1 - map['field-1'] == null map['field2'] == 2 map['Field2'] == 3 @@ -151,7 +116,6 @@ class ScriptBindingTest extends Specification { then: map.alpha == 0 map.alphaBeta == 0 - map.'alpha-beta' == 0 map.delta == 2 map.gamma == 3 @@ -160,7 +124,6 @@ class ScriptBindingTest extends Specification { then: copy.foo == 1 copy.alphaBeta == 4 - copy.'alpha-beta' == 4 copy.delta == 2 copy.gamma == 3 copy.omega == 9 @@ -168,46 +131,12 @@ class ScriptBindingTest extends Specification { // source does not change map.alpha == 0 map.alphaBeta == 0 - map.'alpha-beta' == 0 map.delta == 2 map.gamma == 3 !map.containsKey('omega') } - def 'should wrap a string value with quote character' () { - - expect: - ScriptBinding.ParamsMap.wrap('hello',null) == 'hello' - ScriptBinding.ParamsMap.wrap('hello','"') == '"hello"' - ScriptBinding.ParamsMap.wrap('hello world',null) == '"hello world"' - ScriptBinding.ParamsMap.wrap('hello world',"'") == "'hello world'" - ScriptBinding.ParamsMap.wrap('hello"world',"'") == "'hello\"world'" - ScriptBinding.ParamsMap.wrap('hello"world',null) == '"hello\\"world"' - - } - - def 'should return a command line formatted string'() { - - when: - def params = new ScriptBinding.ParamsMap('foo-bar':1) - then: - params.size() == 2 - params.fooBar == 1 - params.'foo-bar' == 1 - params.all() == '--foo-bar 1' - - expect: - new ScriptBinding.ParamsMap(x:1).all() == '--x 1' - new ScriptBinding.ParamsMap(x:1, y: 2).all() == '--x 1 --y 2' - new ScriptBinding.ParamsMap(x:1, y: 2).all(sep:'=') == '--x=1 --y=2' - new ScriptBinding.ParamsMap(x:1, y: 2).all(sep:'=', prefix:'-') == '-x=1 -y=2' - new ScriptBinding.ParamsMap(x:1, y: 'hello world').all() == '--x 1 --y "hello world"' - new ScriptBinding.ParamsMap(x:1, y: 'hello world').all(quote:"'") == '--x \'1\' --y \'hello world\'' - new ScriptBinding.ParamsMap(x:1, y: "O'Connors").all(quote:"'") == "--x '1' --y 'O\\'Connors'" - - } - def 'should get the variable name giving the value'() { given: diff --git a/modules/nextflow/src/test/groovy/nextflow/trace/ProgressRecordTest.groovy b/modules/nextflow/src/test/groovy/nextflow/trace/ProgressRecordTest.groovy index e9541d2dda..e8fecc07b5 100644 --- a/modules/nextflow/src/test/groovy/nextflow/trace/ProgressRecordTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/trace/ProgressRecordTest.groovy @@ -60,6 +60,7 @@ class ProgressRecordTest extends Specification { def FAILED =5 def CACHED =6 def STORED =7 + def ABORTED = 8 and: def rec = new ProgressRecord(10, 'foo') @@ -71,10 +72,11 @@ class ProgressRecordTest extends Specification { rec.failed =FAILED rec.cached =CACHED rec.stored =STORED + rec.aborted = ABORTED then: rec.getCompletedCount() == SUCCEEDED+ FAILED+ CACHED+ STORED - rec.getTotalCount() == PENDING+ SUBMITTED+ RUNNING + SUCCEEDED+ FAILED+ CACHED+ STORED + rec.getTotalCount() == PENDING+ SUBMITTED+ RUNNING + SUCCEEDED+ FAILED+ CACHED+ STORED+ ABORTED } diff --git a/modules/nextflow/src/test/groovy/nextflow/util/ArrayBagTest.groovy b/modules/nextflow/src/test/groovy/nextflow/util/ArrayBagTest.groovy index 0eb91521a6..b4e0b8efb4 100644 --- a/modules/nextflow/src/test/groovy/nextflow/util/ArrayBagTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/util/ArrayBagTest.groovy @@ -74,4 +74,35 @@ class ArrayBagTest extends Specification { String.valueOf(bag) == '[1, 2, 3]' } + def 'hashCode should be invariant to order' () { + given: + def bag1 = new ArrayBag([1,2,3]) + def bag2 = new ArrayBag([3,1,2]) + def bag3 = new ArrayBag([4,1,2]) + + expect: + bag1.hashCode() == bag2.hashCode() + bag1.hashCode() != bag3.hashCode() + + /** + * NOTE!!! equality cannot be checked due to groovy overriding the equals implementation + * see {@link ArrayBag#equals(java.lang.Object)} + */ + } + + def 'should access map entry using bag as key' () { + given: + def bag1 = new ArrayBag([1,2,3]) + def bag2 = new ArrayBag([3,1,2]) + def bag3 = new ArrayBag([4,1,2]) + and: + def map = [(bag1):'foo'] + + expect: + map.get(bag1) == 'foo' + map.get(bag2) == 'foo' + map.get(bag3) == null + + } + } diff --git a/modules/nextflow/src/test/groovy/nextflow/util/ConfigHelperTest.groovy b/modules/nextflow/src/test/groovy/nextflow/util/ConfigHelperTest.groovy index b92ec8c308..985038ca1f 100644 --- a/modules/nextflow/src/test/groovy/nextflow/util/ConfigHelperTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/util/ConfigHelperTest.groovy @@ -19,6 +19,7 @@ package nextflow.util import java.nio.file.Files import java.nio.file.Paths +import nextflow.config.ConfigClosurePlaceholder import spock.lang.Specification import spock.lang.Unroll @@ -245,6 +246,100 @@ class ConfigHelperTest extends Specification { } + def 'should render config as json' () { + given: + def config = new ConfigObject() + config.process.queue = 'long' + config.process.executor = 'slurm' + config.process.memory = new ConfigClosurePlaceholder('{ 1.GB }') + config.docker.enabled = true + config.zeta.'quoted-attribute'.foo = 1 + + when: + def result = ConfigHelper.toJsonString(config, true) + then: + result == ''' + { + "docker": { + "enabled": true + }, + "process": { + "executor": "slurm", + "memory": "{ 1.GB }", + "queue": "long" + }, + "zeta": { + "quoted-attribute": { + "foo": 1 + } + } + } + '''.stripIndent().trim() + + + when: + result = ConfigHelper.toJsonString(config, false) + then: + result == ''' + { + "process": { + "queue": "long", + "executor": "slurm", + "memory": "{ 1.GB }" + }, + "docker": { + "enabled": true + }, + "zeta": { + "quoted-attribute": { + "foo": 1 + } + } + } + '''.stripIndent().trim() + } + + def 'should render config as yaml' () { + given: + def config = new ConfigObject() + config.process.queue = 'long' + config.process.executor = 'slurm' + config.process.memory = new ConfigClosurePlaceholder('{ 1.GB }') + config.docker.enabled = true + config.zeta.'quoted-attribute'.foo = 1 + + when: + def result = ConfigHelper.toYamlString(config, true) + then: + result == '''\ + docker: + enabled: true + process: + executor: slurm + memory: '{ 1.GB }' + queue: long + zeta: + quoted-attribute: + foo: 1 + '''.stripIndent() + + + when: + result = ConfigHelper.toYamlString(config, false) + then: + result == '''\ + process: + queue: long + executor: slurm + memory: '{ 1.GB }' + docker: + enabled: true + zeta: + quoted-attribute: + foo: 1 + '''.stripIndent() + } + def 'should verify valid identifiers' () { expect: diff --git a/modules/nextflow/src/test/groovy/nextflow/util/KryoHelperTest.groovy b/modules/nextflow/src/test/groovy/nextflow/util/KryoHelperTest.groovy index eea8009514..35c3ef6352 100644 --- a/modules/nextflow/src/test/groovy/nextflow/util/KryoHelperTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/util/KryoHelperTest.groovy @@ -165,6 +165,16 @@ class KryoHelperTest extends Specification { KryoHelper.deserialize(buffer).toUri() == new URI('http://host.com/foo.txt') } + def 'should serialise xfilesystem' () { + when: + def uri = new URI('https://host.com/path/foo.txt') + def fs = FileHelper.getOrCreateFileSystemFor(new URI('https://host.com/path/foo.txt')) + def fsBuffer = KryoHelper.serialize(fs) + then: + KryoHelper.deserialize(fsBuffer).getClass().getName() == 'nextflow.file.http.XFileSystem' + KryoHelper.deserialize(fsBuffer).getPath("/path/foo.txt").toUri() == uri + } + @EqualsAndHashCode static class Foo implements SerializableMarker { String foo diff --git a/modules/nextflow/src/test/resources/nextflow/executor/test-bash-wrapper-with-trace.txt b/modules/nextflow/src/test/resources/nextflow/executor/test-bash-wrapper-with-trace.txt index ef1380e7cf..5aef2f4795 100644 --- a/modules/nextflow/src/test/resources/nextflow/executor/test-bash-wrapper-with-trace.txt +++ b/modules/nextflow/src/test/resources/nextflow/executor/test-bash-wrapper-with-trace.txt @@ -98,26 +98,28 @@ nxf_mem_watch() { count=$((count+1)) done - echo "%mem=${nxf_stat_ret[1]}" >> $trace_file - echo "vmem=${nxf_stat_ret[2]}" >> $trace_file - echo "rss=${nxf_stat_ret[3]}" >> $trace_file - echo "peak_vmem=${nxf_stat_ret[4]}" >> $trace_file - echo "peak_rss=${nxf_stat_ret[5]}" >> $trace_file - echo "vol_ctxt=${nxf_stat_ret[6]}" >> $trace_file - echo "inv_ctxt=${nxf_stat_ret[7]}" >> $trace_file + printf "%s\n" \ + "%mem=${nxf_stat_ret[1]}" \ + "vmem=${nxf_stat_ret[2]}" \ + "rss=${nxf_stat_ret[3]}" \ + "peak_vmem=${nxf_stat_ret[4]}" \ + "peak_rss=${nxf_stat_ret[5]}" \ + "vol_ctxt=${nxf_stat_ret[6]}" \ + "inv_ctxt=${nxf_stat_ret[7]}" >> "$trace_file" || >&2 echo "Error: Failed to append to file: $trace_file" } nxf_write_trace() { - echo "nextflow.trace/v2" > $trace_file - echo "realtime=$wall_time" >> $trace_file - echo "%cpu=$ucpu" >> $trace_file - echo "cpu_model=$cpu_model" >> $trace_file - echo "rchar=${io_stat1[0]}" >> $trace_file - echo "wchar=${io_stat1[1]}" >> $trace_file - echo "syscr=${io_stat1[2]}" >> $trace_file - echo "syscw=${io_stat1[3]}" >> $trace_file - echo "read_bytes=${io_stat1[4]}" >> $trace_file - echo "write_bytes=${io_stat1[5]}" >> $trace_file + printf "%s\n" \ + "nextflow.trace/v2" \ + "realtime=$wall_time" \ + "%cpu=$ucpu" \ + "cpu_model=$cpu_model" \ + "rchar=${io_stat1[0]}" \ + "wchar=${io_stat1[1]}" \ + "syscr=${io_stat1[2]}" \ + "syscw=${io_stat1[3]}" \ + "read_bytes=${io_stat1[4]}" \ + "write_bytes=${io_stat1[5]}" > "$trace_file" || >&2 echo "Error: Failed to write to file: $trace_file" } nxf_trace_mac() { @@ -173,16 +175,17 @@ nxf_trace_linux() { local wall_time=$((end_millis-start_millis)) [ $NXF_DEBUG = 1 ] && echo "+++ STATS %CPU=$ucpu TIME=$wall_time I/O=${io_stat1[*]}" - echo "nextflow.trace/v2" > $trace_file - echo "realtime=$wall_time" >> $trace_file - echo "%cpu=$ucpu" >> $trace_file - echo "cpu_model=$cpu_model" >> $trace_file - echo "rchar=${io_stat1[0]}" >> $trace_file - echo "wchar=${io_stat1[1]}" >> $trace_file - echo "syscr=${io_stat1[2]}" >> $trace_file - echo "syscw=${io_stat1[3]}" >> $trace_file - echo "read_bytes=${io_stat1[4]}" >> $trace_file - echo "write_bytes=${io_stat1[5]}" >> $trace_file + printf "%s\n" \ + "nextflow.trace/v2" \ + "realtime=$wall_time" \ + "%cpu=$ucpu" \ + "cpu_model=$cpu_model" \ + "rchar=${io_stat1[0]}" \ + "wchar=${io_stat1[1]}" \ + "syscr=${io_stat1[2]}" \ + "syscw=${io_stat1[3]}" \ + "read_bytes=${io_stat1[4]}" \ + "write_bytes=${io_stat1[5]}" > "$trace_file" || >&2 echo "Error: Failed to write to file: $trace_file" [ -e /proc/$mem_proc ] && eval "echo 'DONE' >&$mem_fd" || true wait $mem_proc 2>/dev/null || true @@ -270,7 +273,10 @@ nxf_fs_fcp() { } on_exit() { - exit_status=${nxf_main_ret:=$?} + local last_err=$? + local exit_status=${nxf_main_ret:=0} + [[ ${exit_status} -eq 0 && ${nxf_unstage_ret:=0} -ne 0 ]] && exit_status=${nxf_unstage_ret:=0} + [[ ${exit_status} -eq 0 && ${last_err} -ne 0 ]] && exit_status=${last_err} printf -- $exit_status > {{folder}}/.exitcode set +u exit $exit_status @@ -289,9 +295,20 @@ nxf_stage() { true } -nxf_unstage() { +nxf_unstage_outputs() { + true +} + +nxf_unstage_controls() { true - [[ ${nxf_main_ret:=0} != 0 ]] && return +} + +nxf_unstage() { + if [[ ${nxf_main_ret:=0} == 0 ]]; then + (set -e -o pipefail; (nxf_unstage_outputs | tee -a .command.out) 3>&1 1>&2 2>&3 | tee -a .command.err) + nxf_unstage_ret=$? + fi + nxf_unstage_controls } nxf_main() { diff --git a/modules/nextflow/src/test/resources/nextflow/executor/test-bash-wrapper.txt b/modules/nextflow/src/test/resources/nextflow/executor/test-bash-wrapper.txt index f465b5b9b4..3bb4f34fe5 100644 --- a/modules/nextflow/src/test/resources/nextflow/executor/test-bash-wrapper.txt +++ b/modules/nextflow/src/test/resources/nextflow/executor/test-bash-wrapper.txt @@ -81,7 +81,10 @@ nxf_fs_fcp() { } on_exit() { - exit_status=${nxf_main_ret:=$?} + local last_err=$? + local exit_status=${nxf_main_ret:=0} + [[ ${exit_status} -eq 0 && ${nxf_unstage_ret:=0} -ne 0 ]] && exit_status=${nxf_unstage_ret:=0} + [[ ${exit_status} -eq 0 && ${last_err} -ne 0 ]] && exit_status=${last_err} printf -- $exit_status > {{folder}}/.exitcode set +u exit $exit_status @@ -100,9 +103,20 @@ nxf_stage() { true } -nxf_unstage() { +nxf_unstage_outputs() { + true +} + +nxf_unstage_controls() { true - [[ ${nxf_main_ret:=0} != 0 ]] && return +} + +nxf_unstage() { + if [[ ${nxf_main_ret:=0} == 0 ]]; then + (set -e -o pipefail; (nxf_unstage_outputs | tee -a .command.out) 3>&1 1>&2 2>&3 | tee -a .command.err) + nxf_unstage_ret=$? + fi + nxf_unstage_controls } nxf_main() { diff --git a/modules/nextflow/src/testFixtures/groovy/test/MockHelpers.groovy b/modules/nextflow/src/testFixtures/groovy/test/MockHelpers.groovy index 5a0c0c1c39..51a298dbd6 100644 --- a/modules/nextflow/src/testFixtures/groovy/test/MockHelpers.groovy +++ b/modules/nextflow/src/testFixtures/groovy/test/MockHelpers.groovy @@ -175,7 +175,7 @@ class MockTaskHandler extends TaskHandler { task.code.call() } status = TaskStatus.COMPLETED - task.processor.finalizeTask(task) + task.processor.finalizeTask(this) } @Override diff --git a/modules/nf-commons/build.gradle b/modules/nf-commons/build.gradle index 2a5c6ec708..951bf44f23 100644 --- a/modules/nf-commons/build.gradle +++ b/modules/nf-commons/build.gradle @@ -26,8 +26,8 @@ sourceSets { dependencies { api "ch.qos.logback:logback-classic:1.4.14" - api "org.apache.groovy:groovy:4.0.22" - api "org.apache.groovy:groovy-nio:4.0.22" + api "org.apache.groovy:groovy:4.0.24" + api "org.apache.groovy:groovy-nio:4.0.24" api "commons-lang:commons-lang:2.6" api 'com.google.guava:guava:33.0.0-jre' api 'org.pf4j:pf4j:3.12.0' diff --git a/modules/nf-commons/src/main/nextflow/file/CopyMoveHelper.java b/modules/nf-commons/src/main/nextflow/file/CopyMoveHelper.java index 7c978e65d2..e5b3c13d8a 100644 --- a/modules/nf-commons/src/main/nextflow/file/CopyMoveHelper.java +++ b/modules/nf-commons/src/main/nextflow/file/CopyMoveHelper.java @@ -41,6 +41,10 @@ * Helper class to handle copy/move files and directories */ public class CopyMoveHelper { + /** + * True if currently performing a copy of a foreign file. + */ + public static final ThreadLocal IN_FOREIGN_COPY = new ThreadLocal<>(); private static Logger log = LoggerFactory.getLogger(CopyMoveHelper.class); @@ -81,14 +85,16 @@ private static CopyOption[] convertMoveToCopyOptions(CopyOption... options) private static void copyFile(Path source, Path target, boolean foreign, CopyOption... options) throws IOException { - if( !foreign ) { source.getFileSystem().provider().copy(source, target, options); return; } + IN_FOREIGN_COPY.set(true); try (InputStream in = Files.newInputStream(source)) { Files.copy(in, target); + } finally { + IN_FOREIGN_COPY.set(false); } } diff --git a/modules/nf-httpfs/build.gradle b/modules/nf-httpfs/build.gradle index ba7e9462f6..29fc92922c 100644 --- a/modules/nf-httpfs/build.gradle +++ b/modules/nf-httpfs/build.gradle @@ -30,12 +30,12 @@ sourceSets { dependencies { api project(':nf-commons') api "ch.qos.logback:logback-classic:1.4.14" - api "org.apache.groovy:groovy:4.0.22" - api "org.apache.groovy:groovy-nio:4.0.22" + api "org.apache.groovy:groovy:4.0.24" + api "org.apache.groovy:groovy-nio:4.0.24" api("com.esotericsoftware.kryo:kryo:2.24.0") { exclude group: 'com.esotericsoftware.minlog', module: 'minlog' } /* testImplementation inherited from top gradle build file */ - testImplementation "org.apache.groovy:groovy-json:4.0.22" // needed by wiremock + testImplementation "org.apache.groovy:groovy-json:4.0.24" // needed by wiremock testImplementation ('com.github.tomakehurst:wiremock:1.57') { exclude module: 'groovy-all' } testImplementation ('com.github.tomjankes:wiremock-groovy:0.2.0') { exclude module: 'groovy-all' } diff --git a/modules/nf-httpfs/src/main/nextflow/file/http/FixedInputStream.groovy b/modules/nf-httpfs/src/main/nextflow/file/http/FixedInputStream.groovy new file mode 100644 index 0000000000..3f6e100af4 --- /dev/null +++ b/modules/nf-httpfs/src/main/nextflow/file/http/FixedInputStream.groovy @@ -0,0 +1,73 @@ +/* + * Copyright 2013-2024, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package nextflow.file.http + +import groovy.transform.CompileStatic + +/** + * Implements a {@link FilterInputStream} that checks the expected length of bytes have been + * read when closing the stream or throws an error otherwise + * + * @author Paolo Di Tommaso + */ +@CompileStatic +class FixedInputStream extends FilterInputStream { + + private final long length + private long bytesRead + + FixedInputStream(InputStream inputStream, long len) { + super(inputStream) + this.length = len + } + + @Override + int read() throws IOException { + final result = super.read() + if( result!=-1 ) + bytesRead++ + return result + } + + @Override + int read(byte[] b, int off, int len) throws IOException { + final result = super.read(b, off, len) + if( result!=-1 ) + bytesRead += result + return result + } + + @Override + long skip(long n) throws IOException { + long skipped = super.skip(n) + bytesRead += skipped + return skipped + } + + @Override + int available() throws IOException { + super.available() + } + + @Override + void close() throws IOException { + if( bytesRead != length ) + throw new IOException("Read data length does not match expected size - bytes read: ${bytesRead}; expected: ${length}") + super.close() + } +} diff --git a/modules/nf-httpfs/src/main/nextflow/file/http/XFileSystem.groovy b/modules/nf-httpfs/src/main/nextflow/file/http/XFileSystem.groovy index d62455dff7..3e47953af7 100644 --- a/modules/nf-httpfs/src/main/nextflow/file/http/XFileSystem.groovy +++ b/modules/nf-httpfs/src/main/nextflow/file/http/XFileSystem.groovy @@ -44,6 +44,11 @@ class XFileSystem extends FileSystem { private URI base + /* + * Only needed to prevent serialization issues - see https://github.com/nextflow-io/nextflow/issues/5208 + */ + protected XFileSystem(){} + XFileSystem(XFileSystemProvider provider, URI base) { this.provider = provider this.base = base diff --git a/modules/nf-httpfs/src/main/nextflow/file/http/XFileSystemProvider.groovy b/modules/nf-httpfs/src/main/nextflow/file/http/XFileSystemProvider.groovy index e673a930c1..8bfdaae560 100644 --- a/modules/nf-httpfs/src/main/nextflow/file/http/XFileSystemProvider.groovy +++ b/modules/nf-httpfs/src/main/nextflow/file/http/XFileSystemProvider.groovy @@ -16,6 +16,8 @@ package nextflow.file.http +import nextflow.file.CopyMoveHelper + import static nextflow.file.http.XFileSystemConfig.* import java.nio.ByteBuffer @@ -245,7 +247,11 @@ abstract class XFileSystemProvider extends FileSystemProvider { } final conn = toConnection(path) - final stream = new BufferedInputStream(conn.getInputStream()) + final length = conn.getContentLengthLong() + final target = length>0 + ? new FixedInputStream(conn.getInputStream(),length) + : conn.getInputStream() + final stream = new BufferedInputStream(target) new SeekableByteChannel() { @@ -346,7 +352,12 @@ abstract class XFileSystemProvider extends FileSystemProvider { } } - return toConnection(path).getInputStream() + final conn = toConnection(path) + final length = conn.getContentLengthLong() + // only apply the FixedInputStream check if staging files + return length>0 && CopyMoveHelper.IN_FOREIGN_COPY.get() + ? new FixedInputStream(conn.getInputStream(), length) + : conn.getInputStream() } /** diff --git a/modules/nf-httpfs/src/main/nextflow/file/http/XPath.groovy b/modules/nf-httpfs/src/main/nextflow/file/http/XPath.groovy index 4a3ddb5164..d2c7401734 100644 --- a/modules/nf-httpfs/src/main/nextflow/file/http/XPath.groovy +++ b/modules/nf-httpfs/src/main/nextflow/file/http/XPath.groovy @@ -45,6 +45,11 @@ class XPath implements Path { private String query + /* + * Only needed to prevent serialization issues - see https://github.com/nextflow-io/nextflow/issues/5208 + */ + protected XPath(){} + XPath(XFileSystem fs, String path) { this(fs, path, EMPTY) } diff --git a/modules/nf-httpfs/src/test/nextflow/file/http/FixedInputStreamTest.groovy b/modules/nf-httpfs/src/test/nextflow/file/http/FixedInputStreamTest.groovy new file mode 100644 index 0000000000..96ae2134b1 --- /dev/null +++ b/modules/nf-httpfs/src/test/nextflow/file/http/FixedInputStreamTest.groovy @@ -0,0 +1,79 @@ +/* + * Copyright 2013-2024, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package nextflow.file.http + +import spock.lang.Specification + +/** + * + * @author Paolo Di Tommaso + */ +class FixedInputStreamTest extends Specification { + + def 'should read byte by byte' () { + given: + def bytes = "Hello world". bytes + def stream = new FixedInputStream(new ByteArrayInputStream(bytes), bytes.length) + + when: + def ch + def result = new StringBuilder() + while( (ch=stream.read())!=-1 ) + result.append(ch as char) + and: + stream.close() + then: + noExceptionThrown() + result.toString() == 'Hello world' + } + + def 'should read byte buffer' () { + given: + def bytes = "Hello world". bytes + def stream = new FixedInputStream(new ByteArrayInputStream(bytes), bytes.length) + + when: + def buffer = new byte[5] + def result = new StringBuilder() + def c + while( (c=stream.read(buffer))!=-1 ) { + for( int i=0; i> new ByteArrayInputStream('Hello world'.bytes) + connection.getInputStream() >> new ByteArrayInputStream(DATA.bytes) + connection.getContentLengthLong() >> DATA.size() and: stream.text == 'Hello world' } diff --git a/nextflow b/nextflow index 5fa211ed10..c89ca7617d 100755 --- a/nextflow +++ b/nextflow @@ -15,7 +15,7 @@ # limitations under the License. [[ "$NXF_DEBUG" == 'x' ]] && set -x -NXF_VER=${NXF_VER:-'24.07.0-edge'} +NXF_VER=${NXF_VER:-'24.11.0-edge'} NXF_ORG=${NXF_ORG:-'nextflow-io'} NXF_HOME=${NXF_HOME:-$HOME/.nextflow} NXF_PROT=${NXF_PROT:-'https'} @@ -313,7 +313,7 @@ if [ ! -x "$JAVA_CMD" ] ; then JAVA_CMD="$JAVA_HOME/bin/java" fi elif [ -x /usr/libexec/java_home ]; then - JAVA_CMD="$(/usr/libexec/java_home -v 11+ 2>/dev/null)/bin/java" || JAVA_CMD=java + JAVA_CMD="$(/usr/libexec/java_home -v 17+ 2>/dev/null)/bin/java" || JAVA_CMD=java else JAVA_CMD="$(which java)" || JAVA_CMD=java fi @@ -344,14 +344,8 @@ else fi major=${BASH_REMATCH[1]} minor=${BASH_REMATCH[2]} - # legacy version - Java 7/8 only - if [ $major -eq 0 ] && [ $minor -lt 26 ]; then - version_check="^(1.7|1.8)" - version_message="Java 7 or 8" - else - version_check="^(1.8|9|10|11|12|13|14|15|16|17|18|19|20|21|22)" - version_message="Java 8 or later (up to 22)" - fi + version_check="^(17|18|19|20|21|22|23)" + version_message="Java 17 or later (up to 23)" if [[ ! $JAVA_VER =~ $version_check ]]; then echo_red "ERROR: Cannot find Java or it's a wrong version -- please make sure that $version_message is installed" if [[ "$NXF_JAVA_HOME" ]]; then @@ -361,9 +355,6 @@ else fi exit 1 fi - if [[ ! $JAVA_VER =~ ^(11|12|13|14|15|16|17|18|19|20|21|22) ]]; then - echo_yellow "NOTE: Nextflow is not tested with Java $JAVA_VER -- It's recommended the use of version 11 up to 22\n" - fi mkdir -p "$(dirname "$JAVA_KEY")" [[ -f $JAVA_VER ]] && echo $JAVA_VER > "$JAVA_KEY" fi @@ -389,7 +380,6 @@ if [[ $NXF_LEGACY_LAUNCHER ]]; then [[ "$CAPSULE_LOG" ]] && JAVA_OPTS+=(-Dcapsule.log=$CAPSULE_LOG) [[ "$CAPSULE_RESET" ]] && JAVA_OPTS+=(-Dcapsule.reset=true) fi -[[ "$JAVA_VER" =~ ^(21|22) ]] && [[ ! "$NXF_ENABLE_VIRTUAL_THREADS" ]] && NXF_ENABLE_VIRTUAL_THREADS=true [[ "$cmd" != "run" && "$cmd" != "node" ]] && JAVA_OPTS+=(-XX:+TieredCompilation -XX:TieredStopAtLevel=1) [[ "$NXF_OPTS" ]] && JAVA_OPTS+=($NXF_OPTS) [[ "$NXF_CLASSPATH" ]] && export NXF_CLASSPATH @@ -462,33 +452,29 @@ else [[ "$NXF_JVM_ARGS" ]] && launcher+=($NXF_JVM_ARGS) [[ "$remote_debug" ]] && launcher+=(-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=$NXF_REMOTE_DEBUG_PORT) - if [[ "$JAVA_VER" =~ ^(9|10|11|12|13|14|15|16|17|18|19|20|21|22) ]]; then - launcher+=(--add-opens=java.base/java.lang=ALL-UNNAMED) - launcher+=(--add-opens=java.base/java.io=ALL-UNNAMED) - launcher+=(--add-opens=java.base/java.nio=ALL-UNNAMED) - launcher+=(--add-opens=java.base/java.net=ALL-UNNAMED) - launcher+=(--add-opens=java.base/java.util=ALL-UNNAMED) - launcher+=(--add-opens=java.base/java.util.concurrent.locks=ALL-UNNAMED) - launcher+=(--add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED) - launcher+=(--add-opens=java.base/java.nio.file.spi=ALL-UNNAMED) - launcher+=(--add-opens=java.base/sun.nio.ch=ALL-UNNAMED) - launcher+=(--add-opens=java.base/sun.nio.fs=ALL-UNNAMED) - launcher+=(--add-opens=java.base/sun.net.www.protocol.http=ALL-UNNAMED) - launcher+=(--add-opens=java.base/sun.net.www.protocol.https=ALL-UNNAMED) - launcher+=(--add-opens=java.base/sun.net.www.protocol.ftp=ALL-UNNAMED) - launcher+=(--add-opens=java.base/sun.net.www.protocol.file=ALL-UNNAMED) - launcher+=(--add-opens=java.base/jdk.internal.misc=ALL-UNNAMED) - launcher+=(--add-opens=java.base/jdk.internal.vm=ALL-UNNAMED) - launcher+=(--add-opens=java.base/java.util.regex=ALL-UNNAMED) - if [[ "$NXF_ENABLE_VIRTUAL_THREADS" == 'true' ]]; then - if [[ "$JAVA_VER" =~ ^(19|20) ]]; then launcher+=(--enable-preview) - elif [[ ! "$JAVA_VER" =~ ^(21|22) ]]; then die "Virtual threads require Java 19 or later - current version $JAVA_VER" - fi + launcher+=(--add-opens=java.base/java.lang=ALL-UNNAMED) + launcher+=(--add-opens=java.base/java.io=ALL-UNNAMED) + launcher+=(--add-opens=java.base/java.nio=ALL-UNNAMED) + launcher+=(--add-opens=java.base/java.net=ALL-UNNAMED) + launcher+=(--add-opens=java.base/java.util=ALL-UNNAMED) + launcher+=(--add-opens=java.base/java.util.concurrent.locks=ALL-UNNAMED) + launcher+=(--add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED) + launcher+=(--add-opens=java.base/java.nio.file.spi=ALL-UNNAMED) + launcher+=(--add-opens=java.base/sun.nio.ch=ALL-UNNAMED) + launcher+=(--add-opens=java.base/sun.nio.fs=ALL-UNNAMED) + launcher+=(--add-opens=java.base/sun.net.www.protocol.http=ALL-UNNAMED) + launcher+=(--add-opens=java.base/sun.net.www.protocol.https=ALL-UNNAMED) + launcher+=(--add-opens=java.base/sun.net.www.protocol.ftp=ALL-UNNAMED) + launcher+=(--add-opens=java.base/sun.net.www.protocol.file=ALL-UNNAMED) + launcher+=(--add-opens=java.base/jdk.internal.misc=ALL-UNNAMED) + launcher+=(--add-opens=java.base/jdk.internal.vm=ALL-UNNAMED) + launcher+=(--add-opens=java.base/java.util.regex=ALL-UNNAMED) + if [[ "$NXF_ENABLE_VIRTUAL_THREADS" == 'true' ]]; then + if [[ "$JAVA_VER" =~ ^(19|20) ]]; then launcher+=(--enable-preview) + elif [[ ! "$JAVA_VER" =~ ^(21|22|23) ]]; then die "Virtual threads require Java 19 or later - current version $JAVA_VER" fi - launcher+=("${cmd_tail[@]}") - else - launcher+=("${cmd_tail[@]}") fi + launcher+=("${cmd_tail[@]}") # create the launch file only when using the legacy launcher (capsule) if [[ $NXF_LEGACY_LAUNCHER ]]; then diff --git a/nextflow.md5 b/nextflow.md5 index 763bb14a5c..deb7f7a40a 100644 --- a/nextflow.md5 +++ b/nextflow.md5 @@ -1 +1 @@ -de590cf568c61fa941e4013d7f407902 +66995c4139ebcd17bf99f17d9dd030d1 diff --git a/nextflow.sha1 b/nextflow.sha1 index 36b7629f1e..ccadbdc25f 100644 --- a/nextflow.sha1 +++ b/nextflow.sha1 @@ -1 +1 @@ -3cc9902cd2713262f5fb29d652b749ef5676b3ed +cdbb67bdb21c0e63fb48aabb8b168c12a31fa5b3 diff --git a/nextflow.sha256 b/nextflow.sha256 index b5f27a0a76..f4e9ccb3bd 100644 --- a/nextflow.sha256 +++ b/nextflow.sha256 @@ -1 +1 @@ -19515441f324cd32e272352b056d800293b639e1bfcc455257b4192115029831 +69a86852c52dcfa7662407c46d16f05bd3dec16e0c505c2a2f71ccc56219d631 diff --git a/packing.gradle b/packing.gradle index f52825ed65..07e404c0d9 100644 --- a/packing.gradle +++ b/packing.gradle @@ -168,7 +168,7 @@ task installLauncher(type: Copy, dependsOn: ['pack']) { task installScratch(type: Copy, dependsOn: ['pack']) { from "$releaseDir/nextflow-$version-one.jar" - into "${rootProject.projectDir}/docker-scratch/.nextflow/framework/$version/" + into "${rootProject.projectDir}/test-e2e/.nextflow/framework/$version/" } /* diff --git a/plugins/nf-amazon/build.gradle b/plugins/nf-amazon/build.gradle index 08d2adccca..e8a8c75eeb 100644 --- a/plugins/nf-amazon/build.gradle +++ b/plugins/nf-amazon/build.gradle @@ -38,24 +38,27 @@ dependencies { compileOnly 'org.pf4j:pf4j:3.12.0' api ('javax.xml.bind:jaxb-api:2.4.0-b180830.0359') - api ('com.amazonaws:aws-java-sdk-s3:1.12.766') - api ('com.amazonaws:aws-java-sdk-ec2:1.12.766') - api ('com.amazonaws:aws-java-sdk-batch:1.12.766') - api ('com.amazonaws:aws-java-sdk-iam:1.12.766') - api ('com.amazonaws:aws-java-sdk-ecs:1.12.766') - api ('com.amazonaws:aws-java-sdk-logs:1.12.766') - api ('com.amazonaws:aws-java-sdk-codecommit:1.12.766') - api ('com.amazonaws:aws-java-sdk-sts:1.12.766') - api ('com.amazonaws:aws-java-sdk-ses:1.12.766') + api ('com.amazonaws:aws-java-sdk-s3:1.12.777') + api ('com.amazonaws:aws-java-sdk-ec2:1.12.777') + api ('com.amazonaws:aws-java-sdk-batch:1.12.777') + api ('com.amazonaws:aws-java-sdk-iam:1.12.777') + api ('com.amazonaws:aws-java-sdk-ecs:1.12.777') + api ('com.amazonaws:aws-java-sdk-logs:1.12.777') + api ('com.amazonaws:aws-java-sdk-codecommit:1.12.777') + api ('com.amazonaws:aws-java-sdk-sts:1.12.777') + api ('com.amazonaws:aws-java-sdk-ses:1.12.777') api ('software.amazon.awssdk:sso:2.26.26') api ('software.amazon.awssdk:ssooidc:2.26.26') constraints { api 'com.fasterxml.jackson.core:jackson-databind:2.12.7.1' } + + // address security vulnerabilities + runtimeOnly 'io.netty:netty-common:4.1.115.Final' testImplementation(testFixtures(project(":nextflow"))) testImplementation project(':nextflow') - testImplementation "org.apache.groovy:groovy:4.0.22" - testImplementation "org.apache.groovy:groovy-nio:4.0.22" + testImplementation "org.apache.groovy:groovy:4.0.24" + testImplementation "org.apache.groovy:groovy-nio:4.0.24" } diff --git a/plugins/nf-amazon/changelog.txt b/plugins/nf-amazon/changelog.txt index 911ac7d8f1..d9fc1213e9 100644 --- a/plugins/nf-amazon/changelog.txt +++ b/plugins/nf-amazon/changelog.txt @@ -1,5 +1,21 @@ nf-amazon changelog =================== +2.10.0 - 3 Dec 2024 +- Detecting errors in data unstaging (#5345) [3c8e602d] +- Prevent NPE with null AWS Batch response [12fc1d60] +- Fix Fargate warning on memory check (#5475) [bdf0ad00] +- Bump groovy 4.0.24 [dd71ad31] +- Bump aws sdk 1.12.777 (#5458) [8bad0b4b] +- Bump netty-common to version 4.1.115.Final [d1bbd3d0] + +2.9.0 - 2 Oct 2024 +- Add Platform workflow prefix in AWS Batch job names (#5318) [e2e test] [42dd4ba8] +- Fix AWS spot attempts with zero value (#5331) [ci fast] [bac2da12] +- Bump groovy 4.0.23 (#5303) [ci fast] [fe3e3ac7] + +2.8.0 - 4 Sep 2024 +- Disable AWS spot retry (#5215) [f28fcb25] + 2.7.0 - 5 Aug 2024 - More robust parsing of shm-size containerOptions (#5177) [b56802a3] - Fix AWS Cloudwatch access when using custom log group name [30195838] diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchTaskHandler.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchTaskHandler.groovy index bf97bf976a..c96b6b33e5 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchTaskHandler.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchTaskHandler.groovy @@ -60,6 +60,7 @@ import groovy.transform.CompileStatic import groovy.transform.Memoized import groovy.util.logging.Slf4j import nextflow.BuildInfo +import nextflow.SysEnv import nextflow.cloud.types.CloudMachineInfo import nextflow.container.ContainerNameValidator import nextflow.exception.ProcessException @@ -112,7 +113,7 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler environment + private Map environment = Map.of() final static private Map jobDefinitions = [:] @@ -134,7 +135,7 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler128 ? result.substring(0,128) : result } - protected CloudMachineInfo getMachineInfo() { if( machineInfo ) return machineInfo @@ -927,7 +932,7 @@ class AwsBatchTaskHandler extends TaskHandler implements BatchHandler=24.04.4 diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchTaskHandlerTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchTaskHandlerTest.groovy index 2363152e02..44455a7a11 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchTaskHandlerTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchTaskHandlerTest.groovy @@ -66,6 +66,7 @@ class AwsBatchTaskHandlerTest extends Specification { expect: handler.normalizeJobName('foo') == 'foo' handler.normalizeJobName('foo (12)') == 'foo_12' + handler.normalizeJobName('foo-12') == 'foo-12' when: def looong = '012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789' @@ -98,7 +99,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [VAR_FOO, VAR_BAR] - req.getJobName() == 'batchtask' + req.getJobName() == 'batch-task' req.getJobQueue() == 'queue1' req.getJobDefinition() == 'job-def:1' req.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' @@ -119,7 +120,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [VAR_FOO, VAR_BAR] - req.getJobName() == 'batchtask' + req.getJobName() == 'batch-task' req.getJobQueue() == 'queue1' req.getJobDefinition() == 'job-def:1' req.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' @@ -149,7 +150,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [] - req.getJobName() == 'batchtask' + req.getJobName() == 'batch-task' req.getJobQueue() == 'queue1' req.getJobDefinition() == 'job-def:1' req.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' @@ -166,7 +167,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [] - req2.getJobName() == 'batchtask' + req2.getJobName() == 'batch-task' req2.getJobQueue() == 'queue1' req2.getJobDefinition() == 'job-def:1' req2.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' @@ -233,7 +234,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobQueue(task) >> 'queue1' 1 * handler.getJobDefinition(task) >> 'job-def:1' and: - req.getJobName() == 'batchtask' + req.getJobName() == 'batch-task' req.getJobQueue() == 'queue1' req.getJobDefinition() == 'job-def:1' req.getTimeout() == null @@ -250,7 +251,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobQueue(task) >> 'queue2' 1 * handler.getJobDefinition(task) >> 'job-def:2' and: - req.getJobName() == 'batchtask' + req.getJobName() == 'batch-task' req.getJobQueue() == 'queue2' req.getJobDefinition() == 'job-def:2' // minimal allowed timeout is 60 seconds @@ -269,7 +270,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobQueue(task) >> 'queue3' 1 * handler.getJobDefinition(task) >> 'job-def:3' and: - req.getJobName() == 'batchtask' + req.getJobName() == 'batch-task' req.getJobQueue() == 'queue3' req.getJobDefinition() == 'job-def:3' // minimal allowed timeout is 60 seconds @@ -300,7 +301,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobQueue(task) >> 'queue1' 1 * handler.getJobDefinition(task) >> 'job-def:1' and: - req.getJobName() == 'batchtask' + req.getJobName() == 'batch-task' req.getJobQueue() == 'queue1' req.getJobDefinition() == 'job-def:1' // no error `retry` error strategy is defined by NF, use `maxRetries` to se Batch attempts @@ -1014,7 +1015,7 @@ class AwsBatchTaskHandlerTest extends Specification { 1 * handler.getJobDefinition(task) >> 'job-def:1' 1 * handler.getEnvironmentVars() >> [VAR_FOO, VAR_BAR] - req.getJobName() == 'batchtask' + req.getJobName() == 'batch-task' req.getJobQueue() == 'queue1' req.getJobDefinition() == 'job-def:1' req.getContainerOverrides().getResourceRequirements().find { it.type=='VCPU'}.getValue() == '4' @@ -1103,4 +1104,25 @@ class AwsBatchTaskHandlerTest extends Specification { 'job1' | 'job1' 'job1:task2' | 'job1' } + + def 'should get job name' () { + given: + def handler = Spy(new AwsBatchTaskHandler(environment: ENV)) + def task = Mock(TaskRun) + + when: + def result = handler.getJobName(task) + then: + task.getName() >> NAME + and: + result == EXPECTED + + where: + ENV | NAME | EXPECTED + [:] | 'foo' | 'foo' + [TOWER_WORKFLOW_ID: '12345'] | 'foo' | 'tw-12345-foo' + [TOWER_WORKFLOW_ID: '12345'] | 'foo' | 'tw-12345-foo' + [TOWER_WORKFLOW_ID: '12345'] | 'foo(12)' | 'tw-12345-foo12' + + } } diff --git a/plugins/nf-amazon/src/test/nextflow/executor/BashWrapperBuilderWithS3Test.groovy b/plugins/nf-amazon/src/test/nextflow/executor/BashWrapperBuilderWithS3Test.groovy index 3e213444cb..4f90e22aa2 100644 --- a/plugins/nf-amazon/src/test/nextflow/executor/BashWrapperBuilderWithS3Test.groovy +++ b/plugins/nf-amazon/src/test/nextflow/executor/BashWrapperBuilderWithS3Test.groovy @@ -58,7 +58,7 @@ class BashWrapperBuilderWithS3Test extends Specification { binding.unstage_outputs == '''\ IFS=$'\\n' for name in $(eval "ls -1d test.bam test.bai bla\\ nk.txt" | sort | uniq); do - nxf_s3_upload $name s3://some/buck\\ et || true + nxf_s3_upload $name s3://some/buck\\ et done unset IFS '''.stripIndent().rightTrim() diff --git a/plugins/nf-azure/build.gradle b/plugins/nf-azure/build.gradle index adee9807e5..2116355076 100644 --- a/plugins/nf-azure/build.gradle +++ b/plugins/nf-azure/build.gradle @@ -43,12 +43,15 @@ dependencies { exclude group: 'org.slf4j', module: 'slf4j-api' exclude group: 'com.google.guava', module: 'guava' } - api('com.azure:azure-identity:1.11.3') { + api('com.azure:azure-identity:1.12.2') { exclude group: 'org.slf4j', module: 'slf4j-api' } + // address security vulnerabilities + runtimeOnly 'io.netty:netty-common:4.1.115.Final' + testImplementation(testFixtures(project(":nextflow"))) testImplementation project(':nextflow') - testImplementation "org.apache.groovy:groovy:4.0.22" - testImplementation "org.apache.groovy:groovy-nio:4.0.22" + testImplementation "org.apache.groovy:groovy:4.0.24" + testImplementation "org.apache.groovy:groovy-nio:4.0.24" } diff --git a/plugins/nf-azure/changelog.txt b/plugins/nf-azure/changelog.txt index 3fba12494f..780fda1082 100644 --- a/plugins/nf-azure/changelog.txt +++ b/plugins/nf-azure/changelog.txt @@ -1,5 +1,22 @@ nf-azure changelog =================== +1.11.0 - 3 Dec 2024 +- Detecting errors in data unstaging (#5345) [3c8e602d] +- Bump netty-common to version 4.1.115.Final [d1bbd3d0] +- Bump groovy 4.0.24 [dd71ad31] +- Bump com.azure:azure-identity from 1.11.3 to 1.12.2 (#5449) [cb70f1df] +- Target Java 17 as minimal Java version (#5045) [0140f954] + +1.10.1 - 27 Oct 2024 +- Demote azure batch task status log level to trace (#5416) [ci skip] [d6c684bb] + +1.10.0 - 2 Oct 2024 +- Fix Azure Fusion env misses credentials when no key or SAS provided (#5328) [e11382c8] +- Bump groovy 4.0.23 (#5303) [ci fast] [fe3e3ac7] + +1.9.0 - 4 Sep 2024 +- Support Azure Managed Identities in Fusion configuration logic (#5278) [a0bf8b40] + 1.8.1 - 5 Aug 2024 - Bump pf4j to version 3.12.0 [96117b9a] diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchTaskHandler.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchTaskHandler.groovy index 7aa7a1430e..b359a87cb7 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchTaskHandler.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchTaskHandler.groovy @@ -98,7 +98,7 @@ class AzBatchTaskHandler extends TaskHandler implements FusionAwareTask { // note, include complete status otherwise it hangs if the task // completes before reaching this check final running = state==BatchTaskState.RUNNING || state==BatchTaskState.COMPLETED - log.debug "[AZURE BATCH] Task status $task.name taskId=$taskKey; running=$running" + log.trace "[AZURE BATCH] Task status $task.name taskId=$taskKey; running=$running" if( running ) this.status = TaskStatus.RUNNING return running diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzActiveDirectoryOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzActiveDirectoryOpts.groovy index 462c6c8203..bc05264806 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzActiveDirectoryOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzActiveDirectoryOpts.groovy @@ -16,6 +16,7 @@ package nextflow.cloud.azure.config import groovy.transform.CompileStatic +import nextflow.SysEnv import nextflow.cloud.azure.nio.AzFileSystemProvider /** @@ -26,18 +27,15 @@ import nextflow.cloud.azure.nio.AzFileSystemProvider @CompileStatic class AzActiveDirectoryOpts { - private Map sysEnv - String servicePrincipalId String servicePrincipalSecret String tenantId AzActiveDirectoryOpts(Map config, Map env = null) { assert config != null - this.sysEnv = env == null ? new HashMap(System.getenv()) : env - this.servicePrincipalId = config.servicePrincipalId ?: sysEnv.get('AZURE_CLIENT_ID') - this.servicePrincipalSecret = config.servicePrincipalSecret ?: sysEnv.get('AZURE_CLIENT_SECRET') - this.tenantId = config.tenantId ?: sysEnv.get('AZURE_TENANT_ID') + this.servicePrincipalId = config.servicePrincipalId ?: SysEnv.get('AZURE_CLIENT_ID') + this.servicePrincipalSecret = config.servicePrincipalSecret ?: SysEnv.get('AZURE_CLIENT_SECRET') + this.tenantId = config.tenantId ?: SysEnv.get('AZURE_TENANT_ID') } Map getEnv() { diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRegistryOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRegistryOpts.groovy index 16bb25548b..386571cf2d 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRegistryOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRegistryOpts.groovy @@ -16,8 +16,8 @@ package nextflow.cloud.azure.config - import groovy.transform.CompileStatic +import nextflow.SysEnv /** * Model Azure Batch registry config settings from nextflow config file @@ -27,8 +27,6 @@ import groovy.transform.CompileStatic @CompileStatic class AzRegistryOpts { - private Map sysEnv - String server String userName String password @@ -37,12 +35,11 @@ class AzRegistryOpts { this(Collections.emptyMap()) } - AzRegistryOpts(Map config, Map env=null) { + AzRegistryOpts(Map config, Map env=SysEnv.get()) { assert config!=null - this.sysEnv = env==null ? new HashMap(System.getenv()) : env this.server = config.server ?: 'docker.io' - this.userName = config.userName ?: sysEnv.get('AZURE_REGISTRY_USER_NAME') - this.password = config.password ?: sysEnv.get('AZURE_REGISTRY_PASSWORD') + this.userName = config.userName ?: env.get('AZURE_REGISTRY_USER_NAME') + this.password = config.password ?: env.get('AZURE_REGISTRY_PASSWORD') } boolean isConfigured() { diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStorageOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStorageOpts.groovy index 1bfecfb780..d3e6bb3259 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStorageOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStorageOpts.groovy @@ -17,6 +17,7 @@ package nextflow.cloud.azure.config import groovy.transform.CompileStatic +import nextflow.SysEnv import nextflow.cloud.azure.batch.AzHelper import nextflow.cloud.azure.nio.AzFileSystemProvider import nextflow.util.Duration @@ -28,7 +29,6 @@ import nextflow.util.Duration @CompileStatic class AzStorageOpts { - private Map sysEnv String accountKey String accountName String sasToken @@ -36,12 +36,11 @@ class AzStorageOpts { Map fileShares - AzStorageOpts(Map config, Map env=null) { + AzStorageOpts(Map config, Map env=SysEnv.get()) { assert config!=null - this.sysEnv = env==null ? new HashMap(System.getenv()) : env - this.accountKey = config.accountKey ?: sysEnv.get('AZURE_STORAGE_ACCOUNT_KEY') - this.accountName = config.accountName ?: sysEnv.get('AZURE_STORAGE_ACCOUNT_NAME') - this.sasToken = config.sasToken ?: sysEnv.get('AZURE_STORAGE_SAS_TOKEN') + this.accountKey = config.accountKey ?: env.get('AZURE_STORAGE_ACCOUNT_KEY') + this.accountName = config.accountName ?: env.get('AZURE_STORAGE_ACCOUNT_NAME') + this.sasToken = config.sasToken ?: env.get('AZURE_STORAGE_SAS_TOKEN') this.tokenDuration = (config.tokenDuration as Duration) ?: Duration.of('48h') this.fileShares = parseFileShares(config.fileShares instanceof Map ? config.fileShares as Map : Collections. emptyMap()) @@ -63,10 +62,4 @@ class AzStorageOpts { } return result } - - synchronized String getOrCreateSasToken() { - if( !sasToken ) - sasToken = AzHelper.generateAccountSas(accountName, accountKey, tokenDuration) - return sasToken - } } diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/fusion/AzFusionEnv.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/fusion/AzFusionEnv.groovy index 188e411c29..a4b9e8e344 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/fusion/AzFusionEnv.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/fusion/AzFusionEnv.groovy @@ -17,34 +17,72 @@ package nextflow.cloud.azure.fusion +import groovy.util.logging.Slf4j +import nextflow.Global +import nextflow.cloud.azure.batch.AzHelper import groovy.transform.CompileStatic import nextflow.cloud.azure.config.AzConfig import nextflow.fusion.FusionConfig import nextflow.fusion.FusionEnv import org.pf4j.Extension + /** * Implement environment provider for Azure specific variables - * + * * @author Paolo Di Tommaso */ @Extension @CompileStatic +@Slf4j class AzFusionEnv implements FusionEnv { @Override Map getEnvironment(String scheme, FusionConfig config) { - if( scheme!='az' ) - return Collections.emptyMap() + if (scheme != 'az') { + return Collections. emptyMap() + } - final cfg = AzConfig.config.storage() + final cfg = AzConfig.config final result = new LinkedHashMap(10) - if( !cfg.accountName ) - throw new IllegalArgumentException("Missing Azure storage account name") - if( !cfg.sasToken && !cfg.accountKey ) - throw new IllegalArgumentException("Missing Azure storage SAS token") - - result.AZURE_STORAGE_ACCOUNT = cfg.accountName - result.AZURE_STORAGE_SAS_TOKEN = cfg.getOrCreateSasToken() + + if (!cfg.storage().accountName) { + throw new IllegalArgumentException("Missing Azure Storage account name") + } + + if (cfg.storage().accountKey && cfg.storage().sasToken) { + throw new IllegalArgumentException("Azure Storage Access key and SAS token detected. Only one is allowed") + } + + result.AZURE_STORAGE_ACCOUNT = cfg.storage().accountName + // In theory, generating an impromptu SAS token for authentication methods other than + // `azure.storage.sasToken` should not be necessary, because those methods should already allow sufficient + // access for normal operation. Nevertheless, #5287 heavily implies that failing to do so causes the Azure + // Storage plugin or Fusion to fail. In any case, it may be possible to remove this in the future. + result.AZURE_STORAGE_SAS_TOKEN = getOrCreateSasToken() + return result } + + /** + * Return the SAS token if it is defined in the configuration, otherwise generate one based on the requested + * authentication method. + */ + synchronized String getOrCreateSasToken() { + + final cfg = AzConfig.config + + // If a SAS token is already defined in the configuration, just return it + if (cfg.storage().sasToken) { + return cfg.storage().sasToken + } + + // For Active Directory and Managed Identity, we cannot generate an *account* SAS token, but we can generate + // a *container* SAS token for the work directory. + if (cfg.activeDirectory().isConfigured() || cfg.managedIdentity().isConfigured()) { + return AzHelper.generateContainerSasWithActiveDirectory(Global.session.workDir, cfg.storage().tokenDuration) + } + + // Shared Key authentication can use an account SAS token + return AzHelper.generateAccountSasWithAccountKey(Global.session.workDir, cfg.storage().tokenDuration) + } } diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/nio/AzFileSystem.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/nio/AzFileSystem.groovy index e890dbb559..41a1b62d4a 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/nio/AzFileSystem.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/nio/AzFileSystem.groovy @@ -527,8 +527,15 @@ class AzFileSystem extends FileSystem { * @param cond A predicate that determines when a retry should be triggered * @return The {@link dev.failsafe.RetryPolicy} instance */ - @Memoized protected RetryPolicy retryPolicy(Predicate cond) { + // this is needed because apparently bytebuddy used by testing framework is not able + // to handle properly this method signature using both generics and `@Memoized` annotation. + // therefore the `@Memoized` has been moved to the inner method invocation + return (RetryPolicy) retryPolicy0(cond) + } + + @Memoized + protected RetryPolicy retryPolicy0(Predicate cond) { final cfg = AzConfig.getConfig().retryConfig() final listener = new EventListener() { @Override @@ -536,7 +543,7 @@ class AzFileSystem extends FileSystem { log.debug("Azure I/O exception - attempt: ${event.attemptCount}; cause: ${event.lastFailure?.message}") } } - return RetryPolicy.builder() + return RetryPolicy.builder() .handleIf(cond) .withBackoff(cfg.delay.toMillis(), cfg.maxDelay.toMillis(), ChronoUnit.MILLIS) .withMaxAttempts(cfg.maxAttempts) diff --git a/plugins/nf-azure/src/resources/META-INF/MANIFEST.MF b/plugins/nf-azure/src/resources/META-INF/MANIFEST.MF index 939940fc14..1ebcbf274f 100644 --- a/plugins/nf-azure/src/resources/META-INF/MANIFEST.MF +++ b/plugins/nf-azure/src/resources/META-INF/MANIFEST.MF @@ -1,6 +1,6 @@ Manifest-Version: 1.0 Plugin-Class: nextflow.cloud.azure.AzurePlugin Plugin-Id: nf-azure -Plugin-Version: 1.8.1 +Plugin-Version: 1.11.0 Plugin-Provider: Seqera Labs Plugin-Requires: >=24.04.4 diff --git a/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzBatchServiceTest.groovy b/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzBatchServiceTest.groovy index 660291859a..eceeb644b2 100644 --- a/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzBatchServiceTest.groovy +++ b/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzBatchServiceTest.groovy @@ -10,6 +10,7 @@ import com.azure.identity.ManagedIdentityCredential import com.google.common.hash.HashCode import nextflow.Global import nextflow.Session +import nextflow.SysEnv import nextflow.cloud.azure.config.AzConfig import nextflow.cloud.azure.config.AzManagedIdentityOpts import nextflow.cloud.azure.config.AzPoolOpts @@ -31,6 +32,14 @@ class AzBatchServiceTest extends Specification { static long _1GB = 1024 * 1024 * 1024 + def setup() { + SysEnv.push([:]) // <-- clear the system host env + } + + def cleanup() { + SysEnv.pop() // <-- restore the system host env + } + def 'should make job id'() { given: def task = Mock(TaskRun) { diff --git a/plugins/nf-azure/src/test/nextflow/cloud/azure/fusion/AzFusionEnvTest.groovy b/plugins/nf-azure/src/test/nextflow/cloud/azure/fusion/AzFusionEnvTest.groovy new file mode 100644 index 0000000000..79774e9a9a --- /dev/null +++ b/plugins/nf-azure/src/test/nextflow/cloud/azure/fusion/AzFusionEnvTest.groovy @@ -0,0 +1,199 @@ +/* + * Copyright 2013-2024, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package nextflow.cloud.azure.fusion + +import nextflow.Global +import nextflow.Session +import nextflow.SysEnv +import nextflow.fusion.FusionConfig + +import spock.lang.Specification + +/** + * + * @author Alberto Miranda + */ +class AzFusionEnvTest extends Specification { + + def setup() { + SysEnv.push([:]) // <-- clear the system host env + } + + def cleanup() { + SysEnv.pop() // <-- restore the system host env + } + + def 'should return empty env'() { + given: + def provider = new AzFusionEnv() + when: + def env = provider.getEnvironment('aws', Mock(FusionConfig)) + then: + env == Collections.emptyMap() + } + + def 'should return env environment with SAS token config when accountKey is provided'() { + given: + def NAME = 'myaccount' + def KEY = 'myaccountkey' + Global.session = Mock(Session) { + getConfig() >> [azure: [storage: [accountName: NAME, accountKey: KEY]]] + } + + when: + def config = Mock(FusionConfig) + def fusionEnv = Spy(AzFusionEnv) + 1 * fusionEnv.getOrCreateSasToken() >> 'generatedSasToken' + def env = fusionEnv.getEnvironment('az', config) + + then: + env.AZURE_STORAGE_ACCOUNT == NAME + env.AZURE_STORAGE_SAS_TOKEN + env.size() == 2 + } + + def 'should return env environment with SAS token config when a Service Principal is provided'() { + given: + def NAME = 'myaccount' + def CLIENT_ID = 'myclientid' + def CLIENT_SECRET = 'myclientsecret' + def TENANT_ID = 'mytenantid' + Global.session = Mock(Session) { + getConfig() >> [ + azure: [ + activeDirectory: [ + servicePrincipalId: CLIENT_ID, + servicePrincipalSecret: CLIENT_SECRET, + tenantId: TENANT_ID + ], + storage: [ + accountName: NAME + ] + ] + ] + } + + when: + def config = Mock(FusionConfig) + def fusionEnv = Spy(AzFusionEnv) + 1 * fusionEnv.getOrCreateSasToken() >> 'generatedSasToken' + def env = fusionEnv.getEnvironment('az', config) + + then: + env.AZURE_STORAGE_ACCOUNT == NAME + env.AZURE_STORAGE_SAS_TOKEN == 'generatedSasToken' + env.size() == 2 + } + + def 'should return env environment with SAS token config when a user-assigned Managed Identity is provided'() { + given: + def NAME = 'myaccount' + def CLIENT_ID = 'myclientid' + Global.session = Mock(Session) { + getConfig() >> [ + azure: [ + managedIdentity: [ + clientId: CLIENT_ID, + ], + storage: [ + accountName: NAME + ] + ] + ] + } + + when: + def config = Mock(FusionConfig) + def fusionEnv = Spy(AzFusionEnv) + 1 * fusionEnv.getOrCreateSasToken() >> 'generatedSasToken' + def env = fusionEnv.getEnvironment('az', config) + + then: + env.AZURE_STORAGE_ACCOUNT == NAME + env.AZURE_STORAGE_SAS_TOKEN == 'generatedSasToken' + env.size() == 2 + } + + def 'should return env environment with SAS token config when a system-assigned Managed Identity is provided'() { + given: + def NAME = 'myaccount' + Global.session = Mock(Session) { + getConfig() >> [ + azure: [ + managedIdentity: [ + system: true + ], + storage: [ + accountName: NAME + ] + ] + ] + } + + when: + def config = Mock(FusionConfig) + def fusionEnv = Spy(AzFusionEnv) + 1 * fusionEnv.getOrCreateSasToken() >> 'generatedSasToken' + def env = fusionEnv.getEnvironment('az', config) + + then: + env.AZURE_STORAGE_ACCOUNT == NAME + env.AZURE_STORAGE_SAS_TOKEN == 'generatedSasToken' + env.size() == 2 + } + + def 'should return env environment with SAS token config when a sasToken is provided'() { + given: + Global.session = Mock(Session) { + getConfig() >> [azure: [storage: [accountName: 'x1', sasToken: 'y1']]] + } + and: + + when: + def config = Mock(FusionConfig) + def env = new AzFusionEnv().getEnvironment('az', config) + then: + env == [AZURE_STORAGE_ACCOUNT: 'x1', AZURE_STORAGE_SAS_TOKEN: 'y1'] + + } + + def 'should throw an exception when missing Azure Storage account name'() { + given: + Global.session = Mock(Session) { + getConfig() >> [azure: [storage: [sasToken: 'y1']]] + } + when: + def config = Mock(FusionConfig) + def env = new AzFusionEnv().getEnvironment('az', Mock(FusionConfig)) + then: + thrown(IllegalArgumentException) + } + + def 'should throw an exception when both account key and SAS token are present'() { + given: + Global.session = Mock(Session) { + getConfig() >> [azure: [storage: [accountName: 'x1', accountKey: 'y1', sasToken: 'z1']]] + } + when: + def config = Mock(FusionConfig) + def env = new AzFusionEnv().getEnvironment('az', Mock(FusionConfig)) + then: + thrown(IllegalArgumentException) + } + +} diff --git a/plugins/nf-azure/src/test/nextflow/executor/BashWrapperBuilderWithAzTest.groovy b/plugins/nf-azure/src/test/nextflow/executor/BashWrapperBuilderWithAzTest.groovy index eb72d25163..1969345af5 100644 --- a/plugins/nf-azure/src/test/nextflow/executor/BashWrapperBuilderWithAzTest.groovy +++ b/plugins/nf-azure/src/test/nextflow/executor/BashWrapperBuilderWithAzTest.groovy @@ -47,7 +47,7 @@ class BashWrapperBuilderWithAzTest extends Specification { binding.unstage_outputs == """\ IFS=\$'\\n' for name in \$(eval "ls -1d test.bam test.bai" | sort | uniq); do - nxf_az_upload \$name '${AzHelper.toHttpUrl(target)}' || true + nxf_az_upload \$name '${AzHelper.toHttpUrl(target)}' done unset IFS """.stripIndent().rightTrim() diff --git a/plugins/nf-cloudcache/build.gradle b/plugins/nf-cloudcache/build.gradle index 8bd3fcc870..7cfa07de4b 100644 --- a/plugins/nf-cloudcache/build.gradle +++ b/plugins/nf-cloudcache/build.gradle @@ -35,7 +35,7 @@ dependencies { compileOnly 'org.pf4j:pf4j:3.12.0' testImplementation(testFixtures(project(":nextflow"))) - testImplementation "org.apache.groovy:groovy:4.0.22" - testImplementation "org.apache.groovy:groovy-nio:4.0.22" + testImplementation "org.apache.groovy:groovy:4.0.24" + testImplementation "org.apache.groovy:groovy-nio:4.0.24" } diff --git a/plugins/nf-codecommit/build.gradle b/plugins/nf-codecommit/build.gradle index 8ae6e2e2cc..b0f16607e6 100644 --- a/plugins/nf-codecommit/build.gradle +++ b/plugins/nf-codecommit/build.gradle @@ -38,10 +38,10 @@ dependencies { compileOnly 'org.pf4j:pf4j:3.12.0' api ('javax.xml.bind:jaxb-api:2.4.0-b180830.0359') - api ('com.amazonaws:aws-java-sdk-codecommit:1.12.766') + api ('com.amazonaws:aws-java-sdk-codecommit:1.12.777') testImplementation(testFixtures(project(":nextflow"))) testImplementation project(':nextflow') - testImplementation "org.apache.groovy:groovy:4.0.22" - testImplementation "org.apache.groovy:groovy-nio:4.0.22" + testImplementation "org.apache.groovy:groovy:4.0.24" + testImplementation "org.apache.groovy:groovy-nio:4.0.24" } diff --git a/plugins/nf-console/build.gradle b/plugins/nf-console/build.gradle index e84430a4c1..64184c2b80 100644 --- a/plugins/nf-console/build.gradle +++ b/plugins/nf-console/build.gradle @@ -38,13 +38,13 @@ dependencies { compileOnly 'org.pf4j:pf4j:3.12.0' api("org.apache.groovy:groovy-console:4.0.21-patch.2") { transitive=false } - api("org.apache.groovy:groovy-swing:4.0.22") { transitive=false } + api("org.apache.groovy:groovy-swing:4.0.24") { transitive=false } // this is required by 'groovy-console' api("com.github.javaparser:javaparser-core:3.25.8") testImplementation(testFixtures(project(":nextflow"))) testImplementation project(':nextflow') - testImplementation "org.apache.groovy:groovy:4.0.22" - testImplementation "org.apache.groovy:groovy-nio:4.0.22" + testImplementation "org.apache.groovy:groovy:4.0.24" + testImplementation "org.apache.groovy:groovy-nio:4.0.24" } diff --git a/plugins/nf-google/build.gradle b/plugins/nf-google/build.gradle index 9b768f8c57..954d4963ea 100644 --- a/plugins/nf-google/build.gradle +++ b/plugins/nf-google/build.gradle @@ -39,15 +39,15 @@ dependencies { api 'com.google.apis:google-api-services-lifesciences:v2beta-rev20210527-1.31.5' api 'com.google.auth:google-auth-library-oauth2-http:0.18.0' - api 'com.google.cloud:google-cloud-batch:0.29.0' - api 'com.google.cloud:google-cloud-logging:3.8.0' + api 'com.google.cloud:google-cloud-batch:0.53.0' + api 'com.google.cloud:google-cloud-logging:3.20.6' api 'com.google.cloud:google-cloud-nio:0.124.8' api 'com.google.cloud:google-cloud-storage:2.9.3' api 'com.google.code.gson:gson:2.10.1' testImplementation(testFixtures(project(":nextflow"))) - testImplementation "org.apache.groovy:groovy:4.0.22" - testImplementation "org.apache.groovy:groovy-nio:4.0.22" + testImplementation "org.apache.groovy:groovy:4.0.24" + testImplementation "org.apache.groovy:groovy-nio:4.0.24" } test { diff --git a/plugins/nf-google/changelog.txt b/plugins/nf-google/changelog.txt index 5b8a54a4dc..f7d23811e6 100644 --- a/plugins/nf-google/changelog.txt +++ b/plugins/nf-google/changelog.txt @@ -1,5 +1,23 @@ nf-google changelog =================== +1.16.0 - 3 Dec 2024 +- Detecting errors in data unstaging (#5345) [3c8e602d] +- Bump bouncycastle to jdk18on:1.78.1 (#5467) [cd8c385f] +- Bump groovy 4.0.24 [dd71ad31] +- Bump protobuf-java:3.25.5 to nf-google [488b7906] +- Add NotFoundException to retry condition for Google Batch [aa4d19cc] + +1.15.2 - 14 Oct 2024 +- Add Google LS deprecation notice (#5400) [0ee1d9bc] + +1.15.1 - 13 Oct 2024 +- Add retry policy to google batch describe task (#5356) [64bb5a92] + +1.15.0 - 4 Sep 2024 +- Add Google Batch warning when for conflicting disk image config (#5279) [ci fast] [96cb57cb] +- Add support for Google Batch used specified boot images (#5268) [0aaa6482] +- Disable Google Batch automatic spot retries (#5223) [aad21533] + 1.14.0 - 5 Aug 2024 - Bump pf4j to version 3.12.0 [96117b9a] - Make Google Batch auto retry codes configurable (#5148) [e562ce06] diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchTaskHandler.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchTaskHandler.groovy index f897438b1c..b965c5d8c9 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchTaskHandler.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchTaskHandler.groovy @@ -61,7 +61,9 @@ import nextflow.util.TestOnly @CompileStatic class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { - private static Pattern EXIT_CODE_REGEX = ~/exit code 500(\d\d)/ + private static final Pattern EXIT_CODE_REGEX = ~/exit code 500(\d\d)/ + + private static final Pattern BATCH_ERROR_REGEX = ~/Batch Error: code/ private GoogleBatchExecutor executor @@ -99,6 +101,11 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { private volatile long timestamp + /** + * A flag to indicate that the job has failed without launching any tasks + */ + private volatile boolean noTaskJobfailure + GoogleBatchTaskHandler(TaskRun task, GoogleBatchExecutor executor) { super(task) this.client = executor.getClient() @@ -302,6 +309,9 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { if( task.config.getDisk() ) log.warn1 'Process directive `disk` ignored because an instance template was specified' + if( executor.config.getBootDiskImage() ) + log.warn1 'Config option `google.batch.bootDiskImage` ignored because an instance template was specified' + if( executor.config.cpuPlatform ) log.warn1 'Config option `google.batch.cpuPlatform` ignored because an instance template was specified' @@ -331,6 +341,9 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { instancePolicyOrTemplate.setInstallGpuDrivers(true) } + if( executor.config.getBootDiskImage() ) + instancePolicy.setBootDisk( AllocationPolicy.Disk.newBuilder().setImage( executor.config.getBootDiskImage() ) ) + if( fusionEnabled() && !disk ) { disk = new DiskResource(request: '375 GB', type: 'local-ssd') log.debug "[GOOGLE BATCH] Process `${task.lazyName()}` - adding local volume as fusion scratch: $disk" @@ -438,9 +451,10 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { */ protected String getTaskState() { final tasks = client.listTasks(jobId) - if( !tasks.iterator().hasNext() ) - return 'PENDING' - + if( !tasks.iterator().hasNext() ) { + // if there are no tasks checks the job status + return checkJobStatus() + } final now = System.currentTimeMillis() final delta = now - timestamp; if( !taskState || delta >= 1_000) { @@ -461,6 +475,21 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { return taskState } + protected String checkJobStatus() { + final jobStatus = client.getJobStatus(jobId) + final newState = jobStatus?.state as String + if (newState) { + taskState = newState + timestamp = System.currentTimeMillis() + if (newState == "FAILED") { + noTaskJobfailure = true + } + return taskState + } else { + return "PENDING" + } + } + static private final List RUNNING_OR_COMPLETED = ['RUNNING', 'SUCCEEDED', 'FAILED'] static private final List COMPLETED = ['SUCCEEDED', 'FAILED'] @@ -503,13 +532,14 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { protected Throwable getJobError() { try { - final status = client.getTaskStatus(jobId, taskId) - final eventsCount = status.getStatusEventsCount() - final lastEvent = eventsCount > 0 ? status.getStatusEvents(eventsCount - 1) : null + final events = noTaskJobfailure + ? client.getJobStatus(jobId).getStatusEventsList() + : client.getTaskStatus(jobId, taskId).getStatusEventsList() + final lastEvent = events?.get(events.size() - 1) log.debug "[GOOGLE BATCH] Process `${task.lazyName()}` - last event: ${lastEvent}; exit code: ${lastEvent?.taskExecution?.exitCode}" final error = lastEvent?.description - if( error && EXIT_CODE_REGEX.matcher(error).find() ) { + if( error && (EXIT_CODE_REGEX.matcher(error).find() || BATCH_ERROR_REGEX.matcher(error).find()) ) { return new ProcessException(error) } } diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchClient.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchClient.groovy index 7c0fe3cfb7..ffc3da905a 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchClient.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchClient.groovy @@ -22,12 +22,14 @@ import java.util.function.Predicate import com.google.api.gax.core.CredentialsProvider import com.google.api.gax.rpc.FixedHeaderProvider +import com.google.api.gax.rpc.NotFoundException import com.google.api.gax.rpc.UnavailableException import com.google.auth.Credentials import com.google.cloud.batch.v1.BatchServiceClient import com.google.cloud.batch.v1.BatchServiceSettings import com.google.cloud.batch.v1.Job import com.google.cloud.batch.v1.JobName +import com.google.cloud.batch.v1.JobStatus import com.google.cloud.batch.v1.LocationName import com.google.cloud.batch.v1.Task import com.google.cloud.batch.v1.TaskGroupName @@ -107,7 +109,7 @@ class BatchClient { Task describeTask(String jobId, String taskId) { final name = TaskName.of(projectId, location, jobId, 'group0', taskId) - return batchServiceClient.getTask(name) + return apply(()-> batchServiceClient.getTask(name)) } void deleteJob(String jobId) { @@ -119,6 +121,10 @@ class BatchClient { return describeTask(jobId, taskId).getStatus() } + JobStatus getJobStatus(String jobId) { + return describeJob(jobId).getStatus() + } + String getTaskState(String jobId, String taskId) { final status = getTaskStatus(jobId, taskId) return status ? status.getState().toString() : null @@ -175,6 +181,8 @@ class BatchClient { return true if( t instanceof TimeoutException || t.cause instanceof TimeoutException ) return true + if( t instanceof NotFoundException || t.cause instanceof NotFoundException ) + return true return false } } diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchConfig.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchConfig.groovy index 6af5c0abb0..de9b6e2d38 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchConfig.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchConfig.groovy @@ -16,13 +16,11 @@ package nextflow.cloud.google.batch.client - import com.google.auth.oauth2.GoogleCredentials import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.Session import nextflow.cloud.google.GoogleOpts -import nextflow.exception.ProcessUnrecoverableException import nextflow.util.MemoryUnit /** * Model Google Batch config settings @@ -33,11 +31,14 @@ import nextflow.util.MemoryUnit @CompileStatic class BatchConfig { - static private List DEFAULT_RETRY_LIST = List.of(50001) + static final private int DEFAULT_MAX_SPOT_ATTEMPTS = 0 + + static final private List DEFAULT_RETRY_LIST = List.of(50001) private GoogleOpts googleOpts private GoogleCredentials credentials private List allowedLocations + private String bootDiskImage private MemoryUnit bootDiskSize private String cpuPlatform private int maxSpotAttempts @@ -54,6 +55,7 @@ class BatchConfig { GoogleOpts getGoogleOpts() { return googleOpts } GoogleCredentials getCredentials() { return credentials } List getAllowedLocations() { allowedLocations } + String getBootDiskImage() { bootDiskImage } MemoryUnit getBootDiskSize() { bootDiskSize } String getCpuPlatform() { cpuPlatform } int getMaxSpotAttempts() { maxSpotAttempts } @@ -72,9 +74,10 @@ class BatchConfig { result.googleOpts = GoogleOpts.create(session) result.credentials = result.googleOpts.credentials result.allowedLocations = session.config.navigate('google.batch.allowedLocations', List.of()) as List + result.bootDiskImage = session.config.navigate('google.batch.bootDiskImage') result.bootDiskSize = session.config.navigate('google.batch.bootDiskSize') as MemoryUnit result.cpuPlatform = session.config.navigate('google.batch.cpuPlatform') - result.maxSpotAttempts = session.config.navigate('google.batch.maxSpotAttempts',5) as int + result.maxSpotAttempts = session.config.navigate('google.batch.maxSpotAttempts', DEFAULT_MAX_SPOT_ATTEMPTS) as int result.installGpuDrivers = session.config.navigate('google.batch.installGpuDrivers',false) result.preemptible = session.config.navigate('google.batch.preemptible',false) result.spot = session.config.navigate('google.batch.spot',false) @@ -83,7 +86,7 @@ class BatchConfig { result.subnetwork = session.config.navigate('google.batch.subnetwork') result.serviceAccountEmail = session.config.navigate('google.batch.serviceAccountEmail') result.retryConfig = new BatchRetryConfig( session.config.navigate('google.batch.retryPolicy') as Map ?: Map.of() ) - result.autoRetryExitCodes = session.config.navigate('google.batch.autoRetryExitCodes',DEFAULT_RETRY_LIST) as List + result.autoRetryExitCodes = session.config.navigate('google.batch.autoRetryExitCodes', DEFAULT_RETRY_LIST) as List return result } diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/lifesciences/GoogleLifeSciencesExecutor.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/lifesciences/GoogleLifeSciencesExecutor.groovy index 956b5f858e..9ef4c55894 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/lifesciences/GoogleLifeSciencesExecutor.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/lifesciences/GoogleLifeSciencesExecutor.groovy @@ -21,6 +21,7 @@ import java.nio.file.Path import groovy.transform.CompileStatic import groovy.util.logging.Slf4j +import nextflow.SysEnv import nextflow.exception.AbortOperationException import nextflow.executor.Executor import nextflow.executor.SupportedScriptTypes @@ -120,6 +121,8 @@ class GoogleLifeSciencesExecutor extends Executor implements ExtensionPoint { @Override protected void register() { + if( !SysEnv.getBool('NXF_DISABLE_GLS_DEPRECATION_NOTICE', false) ) + log.warn "Google Cloud Life Sciences API Cloud Life Sciences is deprecated and will no longer be available on Google Cloud after July 8, 2025. Consider migrating to Google Cloud Batch instead. Read more at this link: https://cloud.google.com/life-sciences/docs/deprecations" super.register() validateWorkDir() createConfig() diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/lifesciences/GoogleLifeSciencesHelper.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/lifesciences/GoogleLifeSciencesHelper.groovy index ea186ff781..8ae4bef53c 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/lifesciences/GoogleLifeSciencesHelper.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/lifesciences/GoogleLifeSciencesHelper.groovy @@ -366,7 +366,7 @@ class GoogleLifeSciencesHelper { final remoteTaskDir = getRemoteTaskDir(workDir) def result = 'set -x; ' result += "trap 'err=\$?; exec 1>&2; gsutil -m -q cp -R $localTaskDir/${TaskRun.CMD_LOG} ${remoteTaskDir}/${TaskRun.CMD_LOG} || true; [[ \$err -gt 0 || \$GOOGLE_LAST_EXIT_STATUS -gt 0 || \$NXF_DEBUG -gt 0 ]] && { ls -lah $localTaskDir || true; gsutil -m -q cp -R /google/ ${remoteTaskDir}; } || rm -rf $localTaskDir; exit \$err' EXIT; " - result += "{ cd $localTaskDir; bash ${TaskRun.CMD_RUN} nxf_unstage; } >> $localTaskDir/${TaskRun.CMD_LOG} 2>&1" + result += "{ cd $localTaskDir; bash ${TaskRun.CMD_RUN} nxf_unstage;} >> $localTaskDir/${TaskRun.CMD_LOG} 2>&1" return result } diff --git a/plugins/nf-google/src/resources/META-INF/MANIFEST.MF b/plugins/nf-google/src/resources/META-INF/MANIFEST.MF index 92494dcf38..fb9f7deae5 100644 --- a/plugins/nf-google/src/resources/META-INF/MANIFEST.MF +++ b/plugins/nf-google/src/resources/META-INF/MANIFEST.MF @@ -1,6 +1,6 @@ Manifest-Version: 1.0 Plugin-Class: nextflow.cloud.google.GoogleCloudPlugin Plugin-Id: nf-google -Plugin-Version: 1.14.0 +Plugin-Version: 1.16.0 Plugin-Provider: Seqera Labs Plugin-Requires: >=24.04.4 diff --git a/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchTaskHandlerTest.groovy b/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchTaskHandlerTest.groovy index b54a5ff7a8..43150d17fd 100644 --- a/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchTaskHandlerTest.groovy +++ b/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchTaskHandlerTest.groovy @@ -17,6 +17,9 @@ package nextflow.cloud.google.batch +import com.google.cloud.batch.v1.JobStatus +import com.google.cloud.batch.v1.Task + import java.nio.file.Path import com.google.cloud.batch.v1.GCS @@ -105,6 +108,7 @@ class GoogleBatchTaskHandlerTest extends Specification { !instancePolicy.getMachineType() !instancePolicy.getMinCpuPlatform() instancePolicy.getProvisioningModel().toString() == 'PROVISIONING_MODEL_UNSPECIFIED' + !instancePolicy.getBootDisk().getImage() and: allocationPolicy.getLocation().getAllowedLocationsCount() == 0 allocationPolicy.getNetwork().getNetworkInterfacesCount() == 0 @@ -121,6 +125,7 @@ class GoogleBatchTaskHandlerTest extends Specification { and: def ACCELERATOR = new AcceleratorResource(request: 1, type: 'nvidia-tesla-v100') def BOOT_DISK = MemoryUnit.of('10 GB') + def BOOT_IMAGE = 'batch-debian' def CONTAINER_IMAGE = 'ubuntu:22.1' def CONTAINER_OPTS = '--this --that' def CPU_PLATFORM = 'Intel Skylake' @@ -134,6 +139,7 @@ class GoogleBatchTaskHandlerTest extends Specification { getConfig() >> Mock(BatchConfig) { getAllowedLocations() >> ['zones/us-central1-a', 'zones/us-central1-c'] getBootDiskSize() >> BOOT_DISK + getBootDiskImage() >> BOOT_IMAGE getCpuPlatform() >> CPU_PLATFORM getMaxSpotAttempts() >> 5 getSpot() >> true @@ -211,6 +217,7 @@ class GoogleBatchTaskHandlerTest extends Specification { and: instancePolicy.getAccelerators(0).getCount() == 1 instancePolicy.getAccelerators(0).getType() == ACCELERATOR.type + instancePolicy.getBootDisk().getImage() == BOOT_IMAGE instancePolicy.getDisks(0).getNewDisk().getSizeGb() == DISK.request.toGiga() instancePolicy.getDisks(0).getNewDisk().getType() == DISK.type instancePolicy.getMachineType() == MACHINE_TYPE @@ -577,4 +584,39 @@ class GoogleBatchTaskHandlerTest extends Specification { and: 0 * client.deleteJob('job1') >> null } + + JobStatus makeJobStatus(JobStatus.State state, String desc = null) { + final builder = JobStatus.newBuilder().setState(state) + if( desc ) { + builder.addStatusEvents( + StatusEvent.newBuilder() + .setDescription(desc) + ) + } + builder.build() + } + + def 'should check job status when no tasks in job '() { + + given: + def jobId = 'job-id' + def taskId = 'task-id' + def client = Mock(BatchClient) + def task = Mock(TaskRun) { + lazyName() >> 'foo (1)' + } + def handler = Spy(new GoogleBatchTaskHandler(jobId: jobId, taskId: taskId, client: client, task: task)) + final message = 'Job failed when Batch tries to schedule it: Batch Error: code - CODE_MACHINE_TYPE_NOT_FOUND' + when: + client.listTasks(jobId) >>> [new LinkedList(), new LinkedList()] + client.getJobStatus(jobId) >>> [ + null, + makeJobStatus(JobStatus.State.FAILED, 'Scheduling Failed'), + makeJobStatus(JobStatus.State.FAILED, message) + ] + then: + handler.getTaskState() == "PENDING" + handler.getTaskState() == "FAILED" + handler.getJobError().message == message + } } diff --git a/plugins/nf-google/src/test/nextflow/cloud/google/batch/client/BatchConfigTest.groovy b/plugins/nf-google/src/test/nextflow/cloud/google/batch/client/BatchConfigTest.groovy index e364a01667..c6fc08f104 100644 --- a/plugins/nf-google/src/test/nextflow/cloud/google/batch/client/BatchConfigTest.groovy +++ b/plugins/nf-google/src/test/nextflow/cloud/google/batch/client/BatchConfigTest.groovy @@ -18,6 +18,7 @@ package nextflow.cloud.google.batch.client import nextflow.Session +import nextflow.util.MemoryUnit import spock.lang.Requires import spock.lang.Specification /** @@ -29,21 +30,20 @@ class BatchConfigTest extends Specification { @Requires({System.getenv('GOOGLE_APPLICATION_CREDENTIALS')}) def 'should create batch config' () { given: - def CONFIG = [google: [ - batch: [ - spot: true - ] - ] ] + def CONFIG = [:] def session = Mock(Session) { getConfig()>>CONFIG } when: def config = BatchConfig.create(session) then: - config.getSpot() + !config.getSpot() and: config.retryConfig.maxAttempts == 5 - config.maxSpotAttempts == 5 + config.maxSpotAttempts == 0 config.autoRetryExitCodes == [50001] + and: + !config.bootDiskImage + !config.bootDiskSize } @Requires({System.getenv('GOOGLE_APPLICATION_CREDENTIALS')}) @@ -54,7 +54,9 @@ class BatchConfigTest extends Specification { spot: true, maxSpotAttempts: 8, autoRetryExitCodes: [50001, 50003, 50005], - retryPolicy: [maxAttempts: 10] + retryPolicy: [maxAttempts: 10], + bootDiskImage: 'batch-foo', + bootDiskSize: '100GB' ] ] ] def session = Mock(Session) { getConfig()>>CONFIG } @@ -67,6 +69,9 @@ class BatchConfigTest extends Specification { config.retryConfig.maxAttempts == 10 config.maxSpotAttempts == 8 config.autoRetryExitCodes == [50001, 50003, 50005] + and: + config.bootDiskImage == 'batch-foo' + config.bootDiskSize == MemoryUnit.of('100GB') } } diff --git a/plugins/nf-google/src/test/nextflow/cloud/google/lifesciences/GoogleLifeSciencesHelperTest.groovy b/plugins/nf-google/src/test/nextflow/cloud/google/lifesciences/GoogleLifeSciencesHelperTest.groovy index 35cda62f0b..9db824a902 100644 --- a/plugins/nf-google/src/test/nextflow/cloud/google/lifesciences/GoogleLifeSciencesHelperTest.groovy +++ b/plugins/nf-google/src/test/nextflow/cloud/google/lifesciences/GoogleLifeSciencesHelperTest.groovy @@ -548,7 +548,7 @@ class GoogleLifeSciencesHelperTest extends GoogleSpecification { def unstage = helper.getUnstagingScript(dir) then: unstage == - 'set -x; trap \'err=$?; exec 1>&2; gsutil -m -q cp -R /work/dir/.command.log gs://my-bucket/work/dir/.command.log || true; [[ $err -gt 0 || $GOOGLE_LAST_EXIT_STATUS -gt 0 || $NXF_DEBUG -gt 0 ]] && { ls -lah /work/dir || true; gsutil -m -q cp -R /google/ gs://my-bucket/work/dir; } || rm -rf /work/dir; exit $err\' EXIT; { cd /work/dir; bash .command.run nxf_unstage; } >> /work/dir/.command.log 2>&1' + 'set -x; trap \'err=$?; exec 1>&2; gsutil -m -q cp -R /work/dir/.command.log gs://my-bucket/work/dir/.command.log || true; [[ $err -gt 0 || $GOOGLE_LAST_EXIT_STATUS -gt 0 || $NXF_DEBUG -gt 0 ]] && { ls -lah /work/dir || true; gsutil -m -q cp -R /google/ gs://my-bucket/work/dir; } || rm -rf /work/dir; exit $err\' EXIT; { cd /work/dir; bash .command.run nxf_unstage;} >> /work/dir/.command.log 2>&1' } @Unroll diff --git a/plugins/nf-google/src/test/nextflow/cloud/google/lifesciences/bash-wrapper-gcp.txt b/plugins/nf-google/src/test/nextflow/cloud/google/lifesciences/bash-wrapper-gcp.txt index 70a68452aa..c7382062a1 100644 --- a/plugins/nf-google/src/test/nextflow/cloud/google/lifesciences/bash-wrapper-gcp.txt +++ b/plugins/nf-google/src/test/nextflow/cloud/google/lifesciences/bash-wrapper-gcp.txt @@ -168,7 +168,10 @@ nxf_fs_fcp() { } on_exit() { - exit_status=${nxf_main_ret:=$?} + local last_err=$? + local exit_status=${nxf_main_ret:=0} + [[ ${exit_status} -eq 0 && ${nxf_unstage_ret:=0} -ne 0 ]] && exit_status=${nxf_unstage_ret:=0} + [[ ${exit_status} -eq 0 && ${last_err} -ne 0 ]] && exit_status=${last_err} printf -- $exit_status > {{folder}}/.exitcode set +u exit $exit_status @@ -192,12 +195,23 @@ nxf_stage() { nxf_parallel "${downloads[@]}" } -nxf_unstage() { +nxf_unstage_outputs() { + true +} + +nxf_unstage_controls() { true gsutil -m -q cp -R .command.out gs://bucket/work/dir/.command.out || true gsutil -m -q cp -R .command.err gs://bucket/work/dir/.command.err || true gsutil -m -q cp -R .exitcode gs://bucket/work/dir/.exitcode || true - [[ ${nxf_main_ret:=0} != 0 ]] && return +} + +nxf_unstage() { + if [[ ${nxf_main_ret:=0} == 0 ]]; then + (set -e -o pipefail; (nxf_unstage_outputs | tee -a .command.out) 3>&1 1>&2 2>&3 | tee -a .command.err) + nxf_unstage_ret=$? + fi + nxf_unstage_controls } nxf_main() { diff --git a/plugins/nf-tower/build.gradle b/plugins/nf-tower/build.gradle index ca7dae0a27..347d127762 100644 --- a/plugins/nf-tower/build.gradle +++ b/plugins/nf-tower/build.gradle @@ -37,6 +37,6 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:2.12.7.1" testImplementation(testFixtures(project(":nextflow"))) - testImplementation "org.apache.groovy:groovy:4.0.22" - testImplementation "org.apache.groovy:groovy-nio:4.0.22" + testImplementation "org.apache.groovy:groovy:4.0.24" + testImplementation "org.apache.groovy:groovy-nio:4.0.24" } diff --git a/plugins/nf-tower/changelog.txt b/plugins/nf-tower/changelog.txt index 776e4828d1..bcc8d343cf 100644 --- a/plugins/nf-tower/changelog.txt +++ b/plugins/nf-tower/changelog.txt @@ -1,5 +1,8 @@ nf-tower changelog =================== +1.9.3 - 27 Oct 2024 +- Fix nf-tower plugin to upload logs when early failures (#5434) [4222442a] + 1.9.2 - 5 Aug 2024 - Bump pf4j to version 3.12.0 [96117b9a] diff --git a/plugins/nf-tower/src/main/io/seqera/tower/plugin/LogsCheckpoint.groovy b/plugins/nf-tower/src/main/io/seqera/tower/plugin/LogsCheckpoint.groovy index 514a0c0c48..11aa44d786 100644 --- a/plugins/nf-tower/src/main/io/seqera/tower/plugin/LogsCheckpoint.groovy +++ b/plugins/nf-tower/src/main/io/seqera/tower/plugin/LogsCheckpoint.groovy @@ -21,7 +21,9 @@ import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.Session import nextflow.SysEnv +import nextflow.processor.TaskHandler import nextflow.trace.TraceObserver +import nextflow.trace.TraceRecord import nextflow.util.Duration import nextflow.util.Threads /** @@ -47,6 +49,7 @@ class LogsCheckpoint implements TraceObserver { this.config = session.config this.handler = new LogsHandler(session, SysEnv.get()) this.interval = config.navigate('tower.logs.checkpoint.interval', defaultInterval()) as Duration + thread = Threads.start('tower-logs-checkpoint', this.&run) } private String defaultInterval() { @@ -54,12 +57,12 @@ class LogsCheckpoint implements TraceObserver { } @Override - void onFlowBegin() { - thread = Threads.start('tower-logs-checkpoint', this.&run) + void onFlowComplete() { + this.terminated = true + thread.join() } - @Override - void onFlowComplete() { + void onFlowError(TaskHandler handler, TraceRecord trace){ this.terminated = true thread.join() } diff --git a/plugins/nf-tower/src/resources/META-INF/MANIFEST.MF b/plugins/nf-tower/src/resources/META-INF/MANIFEST.MF index ec179c25c6..9b912922bf 100644 --- a/plugins/nf-tower/src/resources/META-INF/MANIFEST.MF +++ b/plugins/nf-tower/src/resources/META-INF/MANIFEST.MF @@ -1,6 +1,6 @@ Manifest-Version: 1.0 Plugin-Class: io.seqera.tower.plugin.TowerPlugin Plugin-Id: nf-tower -Plugin-Version: 1.9.2 +Plugin-Version: 1.9.3 Plugin-Provider: Seqera Labs Plugin-Requires: >=24.04.4 diff --git a/plugins/nf-wave/build.gradle b/plugins/nf-wave/build.gradle index ec6d173fe4..7f4f40d744 100644 --- a/plugins/nf-wave/build.gradle +++ b/plugins/nf-wave/build.gradle @@ -36,10 +36,10 @@ dependencies { api 'org.apache.commons:commons-lang3:3.12.0' api 'com.google.code.gson:gson:2.10.1' api 'org.yaml:snakeyaml:2.2' - api 'io.seqera:wave-api:0.11.1' - api 'io.seqera:wave-utils:0.12.1' + api 'io.seqera:wave-api:0.14.0' + api 'io.seqera:wave-utils:0.15.0' testImplementation(testFixtures(project(":nextflow"))) - testImplementation "org.apache.groovy:groovy:4.0.22" - testImplementation "org.apache.groovy:groovy-nio:4.0.22" + testImplementation "org.apache.groovy:groovy:4.0.24" + testImplementation "org.apache.groovy:groovy-nio:4.0.24" } diff --git a/plugins/nf-wave/changelog.txt b/plugins/nf-wave/changelog.txt index 57b257b5ef..1a13763b31 100644 --- a/plugins/nf-wave/changelog.txt +++ b/plugins/nf-wave/changelog.txt @@ -1,5 +1,30 @@ nf-wave changelog ================== +1.8.0 - 3 Dec 2024 +- Fix missing wave response (#5547) [ci fast] [ee252173] +- Update wave deps [09ccd295] +- Fix isContainerReady when wave is disabled (#5509) [ci fast] [3215afa8] +- Bump groovy 4.0.24 [dd71ad31] + +1.7.2 - 27 Oct 2024 +- Add wave mirror vs module bundles conflicts warning [b37a8a5b] + +1.7.1 - 14 Oct 2024 +- Change to scan.levels to scan.allowedLevels (#5401) [88a1b1b5] +- Fix inspect concretize option [0ee29a87] + +1.7.0 - 13 Oct 2024 +- Add support for Wave container status API (#5384) [873703ad] [9ed18a88] +- Remove unused reportsOpts (#5379) [e794e868] +- Sunsetting Spack support in Wave [3a54cb3b] + +1.6.0 - 2 Oct 2024 +- Improve Wave build timeout handling (#5304) [05bef7e4] +- Bump groovy 4.0.23 (#5303) [ci fast] [fe3e3ac7] + +1.5.1 - 4 Sep 2024 +- Wave client logs improvement [5a37e617] + 1.5.0 - 5 Aug 2024 - Await build completion for all Wave containers [2b8117e9] - Bump pf4j to version 3.12.0 [96117b9a] diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/ContainerConfig.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/ContainerConfig.groovy index 926d7f7835..2823c5c060 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/ContainerConfig.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/ContainerConfig.groovy @@ -29,7 +29,7 @@ import nextflow.util.CacheHelper */ @Canonical @CompileStatic -@ToString(includePackage = false, includeNames = true) +@ToString(includePackage = false, includeNames = true, ignoreNulls = true) class ContainerConfig { List entrypoint @@ -108,4 +108,16 @@ class ContainerConfig { } return CacheHelper.hasher(allMeta).hash().toString() } + + boolean asBoolean() { + return !empty() + } + + boolean empty() { + return !entrypoint && + !cmd && + !env && + !workingDir && + !layers + } } diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/SubmitContainerTokenRequest.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/SubmitContainerTokenRequest.groovy index 86a9606b55..90e86df645 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/SubmitContainerTokenRequest.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/SubmitContainerTokenRequest.groovy @@ -21,7 +21,10 @@ package io.seqera.wave.plugin import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode import groovy.transform.ToString +import io.seqera.wave.api.ImageNameStrategy import io.seqera.wave.api.PackagesSpec +import io.seqera.wave.api.ScanLevel +import io.seqera.wave.api.ScanMode /** * Model a request for an augmented container @@ -135,4 +138,26 @@ class SubmitContainerTokenRequest { */ PackagesSpec packages + /** + * The strategy applied to name a container build by wave when using + * the freeze option. + */ + ImageNameStrategy nameStrategy; + + /** + * Whenever use container "mirror" mode + */ + boolean mirror; + + /** + * The request security scan mode + */ + ScanMode scanMode; + + /** + * Define the allows security vulnerabilities in the container request. + * Empty or null means no vulnerabilities are allowed. + */ + List scanLevels + } diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/SubmitContainerTokenResponse.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/SubmitContainerTokenResponse.groovy index 5c2bb110fe..2cf5e97f2c 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/SubmitContainerTokenResponse.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/SubmitContainerTokenResponse.groovy @@ -20,7 +20,6 @@ package io.seqera.wave.plugin import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode import groovy.transform.ToString - /** * Model a response for an augmented container * @@ -30,6 +29,12 @@ import groovy.transform.ToString @ToString(includeNames = true, includePackage = false) @CompileStatic class SubmitContainerTokenResponse { + + /** + * Unique Id for this request + */ + String requestId + /** * A unique authorization token assigned to this request */ @@ -60,4 +65,20 @@ class SubmitContainerTokenResponse { */ Boolean freeze; + /** + * When the result is a mirror container. Version v1alpha2 as later. + */ + Boolean mirror + + /** + * The id of the security scan associated with this container + */ + String scanId + + /** + * Whenever the container has been provisioned successfully or not. If false + * the current status needs the be check via container status API + */ + Boolean succeeded + } diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/WaveClient.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/WaveClient.groovy index 4b64f509ca..c3640ce42d 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/WaveClient.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/WaveClient.groovy @@ -27,7 +27,7 @@ import java.time.Duration import java.time.Instant import java.time.OffsetDateTime import java.time.temporal.ChronoUnit -import java.util.concurrent.Callable +import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.Executors import java.util.concurrent.TimeUnit import java.util.function.Predicate @@ -44,9 +44,12 @@ import dev.failsafe.event.ExecutionAttemptedEvent import dev.failsafe.function.CheckedSupplier import groovy.json.JsonOutput import groovy.json.JsonSlurper +import groovy.transform.Canonical import groovy.transform.CompileStatic import groovy.transform.Memoized import io.seqera.wave.api.BuildStatusResponse +import io.seqera.wave.api.ContainerStatus +import io.seqera.wave.api.ContainerStatusResponse import io.seqera.wave.api.PackagesSpec import io.seqera.wave.plugin.config.TowerConfig import io.seqera.wave.plugin.config.WaveConfig @@ -57,6 +60,7 @@ import nextflow.Session import nextflow.SysEnv import nextflow.container.inspect.ContainerInspectMode import nextflow.container.resolver.ContainerInfo +import nextflow.exception.ProcessUnrecoverableException import nextflow.fusion.FusionConfig import nextflow.processor.Architecture import nextflow.processor.TaskRun @@ -73,6 +77,13 @@ import org.slf4j.LoggerFactory @CompileStatic class WaveClient { + @Canonical + static class Handle { + final SubmitContainerTokenResponse response + final Instant createdAt + int iteration + } + final static public String DEFAULT_S5CMD_AMD64_URL = 'https://nf-xpack.seqera.io/s5cmd/linux_amd64_2.2.2.json' final static public String DEFAULT_S5CMD_ARM64_URL = 'https://nf-xpack.seqera.io/s5cmd/linux_arm64_2.2.2.json' @@ -80,8 +91,6 @@ class WaveClient { public static final List DEFAULT_CONDA_CHANNELS = ['conda-forge','bioconda'] - private static final String DEFAULT_SPACK_ARCH = 'x86_64' - private static final String DEFAULT_DOCKER_PLATFORM = 'linux/amd64' final private HttpClient httpClient @@ -98,6 +107,8 @@ class WaveClient { private Cache cache + private Map responses = new ConcurrentHashMap<>() + private Session session private volatile String accessToken @@ -127,7 +138,7 @@ class WaveClient { this.packer = new Packer().withPreserveTimestamp(config.preserveFileTimestamp()) this.waveRegistry = new URI(endpoint).getAuthority() // create cache - cache = CacheBuilder + this.cache = CacheBuilder .newBuilder() .expireAfterWrite(config.tokensCacheMaxDuration().toSeconds(), TimeUnit.SECONDS) .build() @@ -137,6 +148,9 @@ class WaveClient { this.httpClient = newHttpClient() } + /* only for testing */ + protected WaveClient() { } + protected HttpClient newHttpClient() { final builder = HttpClient.newBuilder() .version(HttpClient.Version.HTTP_1_1) @@ -160,7 +174,7 @@ class WaveClient { } SubmitContainerTokenRequest makeRequest(WaveAssets assets) { - final containerConfig = assets.containerConfig ?: new ContainerConfig() + ContainerConfig containerConfig = assets.containerConfig ?: new ContainerConfig() // prepend the bundle layer if( assets.moduleResources!=null && assets.moduleResources.hasEntries() ) { containerConfig.prependLayer(makeLayer(assets.moduleResources)) @@ -181,7 +195,21 @@ class WaveClient { if( assets.containerFile && assets.packagesSpec ) throw new IllegalArgumentException("Wave containerFile file and packages spec cannot be specified in the same request") - + + if( config.mirrorMode() && config.freezeMode() ) + throw new IllegalArgumentException("Wave configuration setting 'wave.mirror' and 'wave.freeze' conflicts each other") + + if( config.mirrorMode() && !config.buildRepository() ) + throw new IllegalArgumentException("Wave configuration setting 'wave.mirror' requires the use of 'wave.build.repository' to define the target registry") + + if( config.mirrorMode() && !assets.containerImage ) + throw new IllegalArgumentException("Invalid container mirror operation - missing source container") + + if( config.mirrorMode() && containerConfig ) { + log.warn1("Wave configuration setting 'wave.mirror' conflicts with the use of module bundles - ignoring custom config for container: $assets.containerImage") + containerConfig = null + } + return new SubmitContainerTokenRequest( containerImage: assets.containerImage, containerPlatform: assets.containerPlatform, @@ -194,7 +222,10 @@ class WaveClient { fingerprint: assets.fingerprint(), freeze: config.freezeMode(), format: assets.singularity ? 'sif' : null, - dryRun: ContainerInspectMode.active() + dryRun: ContainerInspectMode.active(), + mirror: config.mirrorMode(), + scanMode: config.scanMode(), + scanLevels: config.scanAllowedLevels() ) } @@ -219,6 +250,9 @@ class WaveClient { workflowId: tower.workflowId, freeze: config.freezeMode(), dryRun: ContainerInspectMode.active(), + mirror: config.mirrorMode(), + scanMode: config.scanMode(), + scanLevels: config.scanAllowedLevels() ) return sendRequest(request) } @@ -265,13 +299,30 @@ class WaveClient { throw new UnauthorizedException("Unauthorized [401] - Verify you have provided a valid access token") } else - throw new BadResponseException("Wave invalid response: [${resp.statusCode()}] ${resp.body()}") + throw new BadResponseException("Wave invalid response: POST ${uri} [${resp.statusCode()}] ${resp.body()}") } catch (IOException e) { throw new IllegalStateException("Unable to connect Wave service: $endpoint") } } + protected ContainerStatusResponse jsonToContainerStatusResponse(String body) { + final obj = new JsonSlurper().parseText(body) as Map + return new ContainerStatusResponse( + obj.id as String, + obj.status as ContainerStatus, + obj.buildId as String, + obj.mirrorId as String, + obj.scanId as String, + obj.vulnerabilities as Map, + obj.succeeded as Boolean, + obj.reason as String, + obj.detailsUri as String, + Instant.parse(obj.creationTime as String), + null + ) + } + protected BuildStatusResponse jsonToBuildStatusResponse(String body) { final obj = new JsonSlurper().parseText(body) as Map new BuildStatusResponse( @@ -350,12 +401,6 @@ class WaveClient { if( attrs.container && attrs.conda ) { throw new IllegalArgumentException("Process '${name}' declares both 'container' and 'conda' directives that conflict each other") } - if( attrs.container && attrs.spack ) { - throw new IllegalArgumentException("Process '${name}' declares both 'container' and 'spack' directives that conflict each other") - } - if( attrs.spack && attrs.conda ) { - throw new IllegalArgumentException("Process '${name}' declares both 'spack' and 'conda' directives that conflict each other") - } checkConflicts0(attrs, name, 'dockerfile') checkConflicts0(attrs, name, 'singularityfile') } @@ -367,9 +412,6 @@ class WaveClient { if( attrs.container && attrs.get(fileType) ) { throw new IllegalArgumentException("Process '${name}' declares both a 'container' directive and a module bundle $fileType that conflict each other") } - if( attrs.get(fileType) && attrs.spack ) { - throw new IllegalArgumentException("Process '${name}' declares both a 'spack' directive and a module bundle $fileType that conflict each other") - } } Map resolveConflicts(Map attrs, List strategy) { @@ -411,15 +453,13 @@ class WaveClient { WaveAssets resolveAssets(TaskRun task, String containerImage, boolean singularity) { // get the bundle final bundle = task.getModuleBundle() - // get the Spack architecture + // get the architecture final arch = task.config.getArchitecture() - final spackArch = arch ? arch.spackArch : DEFAULT_SPACK_ARCH final dockerArch = arch? arch.dockerArch : DEFAULT_DOCKER_PLATFORM // compose the request attributes def attrs = new HashMap() attrs.container = containerImage attrs.conda = task.config.conda as String - attrs.spack = task.config.spack as String if( bundle!=null && bundle.dockerfile ) { attrs.dockerfile = bundle.dockerfile.text } @@ -435,10 +475,10 @@ class WaveClient { checkConflicts(attrs, task.lazyName()) // resolve the wave assets - return resolveAssets0(attrs, bundle, singularity, dockerArch, spackArch) + return resolveAssets0(attrs, bundle, singularity, dockerArch) } - protected WaveAssets resolveAssets0(Map attrs, ResourcesBundle bundle, boolean singularity, String dockerArch, String spackArch) { + protected WaveAssets resolveAssets0(Map attrs, ResourcesBundle bundle, boolean singularity, String dockerArch) { final scriptType = singularity ? 'singularityfile' : 'dockerfile' String containerScript = attrs.get(scriptType) @@ -483,33 +523,6 @@ class WaveClient { } } - /* - * If 'spack' directive is specified use it to create a container file - * to assemble the target container - */ - if( attrs.spack ) { - if( containerScript ) - throw new IllegalArgumentException("Unexpected spack and dockerfile conflict while resolving wave container") - - if( isSpackFile(attrs.spack) ) { - // create a minimal spack file with package spec from user input - final spackFile = Path.of(attrs.spack) - final spackEnv = addPackagesToSpackYaml(spackFile.text, config.spackOpts()) - packagesSpec = new PackagesSpec() - .withType(PackagesSpec.Type.SPACK) - .withSpackOpts(config.spackOpts()) - .withEnvironment(spackEnv.bytes.encodeBase64().toString()) - } - else { - // create a minimal spack file with package spec from user input - final spackEnv = spackPackagesToSpackYaml(attrs.spack, config.spackOpts()) - packagesSpec = new PackagesSpec() - .withType(PackagesSpec.Type.SPACK) - .withSpackOpts(config.spackOpts()) - .withEnvironment(spackEnv.bytes.encodeBase64().toString()) - } - } - /* * The process should declare at least a container image name via 'container' directive * or a dockerfile file to build, otherwise there's no job to be done by wave @@ -562,28 +575,84 @@ class WaveClient { final key = assets.fingerprint() log.trace "Wave fingerprint: $key; assets: $assets" // get from cache or submit a new request - final response = cache.get(key, { sendRequest(assets) } as Callable ) - if( response.buildId && !response.cached && !ContainerInspectMode.active() ) { - // await the image to be available when a new image is being built - awaitCompletion(response.buildId) - } - // assemble the container info response - return new ContainerInfo(assets.containerImage, response.targetImage, key) + final resp = cache.get(key, () -> { + final ret = sendRequest(assets); + responses.put(key,new Handle(ret,Instant.now())); + return ret + }) + return new ContainerInfo(assets.containerImage, resp.targetImage, key) } catch ( UncheckedExecutionException e ) { throw e.cause } } - void awaitCompletion(String buildId) { - final long maxAwait = Duration.ofMinutes(15).toMillis(); - final long startTime = Instant.now().toEpochMilli(); - while( !isComplete(buildId) ) { - if( System.currentTimeMillis()-startTime > maxAwait ) { - break - } - Thread.sleep(randomRange(10,15) * 1_000) - } + protected boolean checkContainerCompletion(Handle handle) { + final long maxAwait = config.buildMaxDuration().toMillis() + final startTime = handle.createdAt.toEpochMilli() + final containerImage = handle.response.targetImage + final requestId = handle.response.requestId + final resp = containerStatus(requestId) + if( resp.status==ContainerStatus.DONE ) { + if( resp.succeeded ) + return true + def msg = "Wave provisioning for container '${containerImage}' did not complete successfully" + if( resp.reason ) + msg += "\n- Reason: ${resp.reason}" + if( resp.detailsUri ) + msg += "\n- Find out more here: ${resp.detailsUri}" + throw new ProcessUnrecoverableException(msg) + } + if( System.currentTimeMillis()-startTime > maxAwait ) { + final msg = "Wave provisioning for container '${containerImage}' is exceeding max allowed duration (${config.buildMaxDuration()}) - check details here: ${endpoint}/view/containers/${requestId}" + throw new ProcessUnrecoverableException(msg) + } + // this is expected to be invoked ~ every seconds, therefore + // print an info message after 10 seconds or every 200 seconds + if( ((handle.iteration++)-10) % 200 == 0 ) { + log.info "Awaiting container provisioning: $containerImage" + } + return false + } + + protected boolean checkBuildCompletion(Handle handle) { + final long maxAwait = config.buildMaxDuration().toMillis() + final startTime = handle.createdAt.toEpochMilli() + final containerImage = handle.response.targetImage + final buildId = handle.response.buildId + final resp = buildStatus(buildId) + if( resp.status==BuildStatusResponse.Status.COMPLETED ) { + if( resp.succeeded ) + return true + final msg = "Wave provisioning for container '${containerImage}' did not complete successfully - check details here: ${endpoint}/view/builds/${buildId}" + throw new ProcessUnrecoverableException(msg) + } + if( System.currentTimeMillis()-startTime > maxAwait ) { + final msg = "Wave provisioning for container '${containerImage}' is exceeding max allowed duration (${config.buildMaxDuration()}) - check details here: ${endpoint}/view/builds/${buildId}" + throw new ProcessUnrecoverableException(msg) + } + // this is expected to be invoked ~ every seconds, therefore + // print an info message after 10 seconds or every 200 seconds + if( ((handle.iteration++)-10) % 200 == 0 ) { + log.info "Awaiting container provisioning: $containerImage" + } + return false + } + + boolean isContainerReady(String key) { + final handle = responses.get(key) + if( !handle ) + throw new IllegalStateException("Unable to find any container with key: $key") + final resp = handle.response + if( resp.requestId ) { + return resp.succeeded + ? true + : checkContainerCompletion(handle) + } + if( resp.buildId && !resp.cached ) + return checkBuildCompletion(handle) + else + return true } protected static int randomRange(int min, int max) { @@ -592,8 +661,28 @@ class WaveClient { return rand.nextInt((max - min) + 1) + min; } - protected boolean isComplete(String buildId) { - final String statusEndpoint = endpoint + "/v1alpha1/builds/"+buildId+"/status"; + protected ContainerStatusResponse containerStatus(String requestId) { + final String statusEndpoint = endpoint + "/v1alpha2/container/${requestId}/status"; + final HttpRequest req = HttpRequest.newBuilder() + .uri(URI.create(statusEndpoint)) + .headers("Content-Type","application/json") + .GET() + .build(); + + final HttpResponse resp = httpSend(req); + log.debug("Wave container status response: statusCode={}; body={}", resp.statusCode(), resp.body()) + if( resp.statusCode()==200 ) { + return jsonToContainerStatusResponse(resp.body()) + } + else { + String msg = String.format("Wave invalid response: GET %s [%s] %s", statusEndpoint, resp.statusCode(), resp.body()); + throw new BadResponseException(msg) + } + } + + @Deprecated + protected BuildStatusResponse buildStatus(String buildId) { + final String statusEndpoint = endpoint + "/v1alpha1/builds/${buildId}/status"; final HttpRequest req = HttpRequest.newBuilder() .uri(URI.create(statusEndpoint)) .headers("Content-Type","application/json") @@ -603,8 +692,7 @@ class WaveClient { final HttpResponse resp = httpSend(req); log.debug("Wave build status response: statusCode={}; body={}", resp.statusCode(), resp.body()) if( resp.statusCode()==200 ) { - final result = jsonToBuildStatusResponse(resp.body()) - return result.status == BuildStatusResponse.Status.COMPLETED + return jsonToBuildStatusResponse(resp.body()) } else { String msg = String.format("Wave invalid response: GET %s [%s] %s", statusEndpoint, resp.statusCode(), resp.body()); @@ -626,12 +714,6 @@ class WaveClient { value.startsWith('http://') || value.startsWith('https://') } - protected boolean isSpackFile(String value) { - if( value.contains('\n') ) - return false - return value.endsWith('.yaml') || value.endsWith('.yml') - } - protected boolean refreshJwtToken0(String refresh) { log.debug "Token refresh request >> $refresh" @@ -684,7 +766,7 @@ class WaveClient { final cfg = config.retryOpts() final listener = new EventListener>() { @Override - void accept(ExecutionAttemptedEvent event) throws Throwable { + void accept(ExecutionAttemptedEvent event) throws Throwable { def msg = "Wave connection failure - attempt: ${event.attemptCount}" if( event.lastResult!=null ) msg += "; response: ${event.lastResult}" diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/WaveConfig.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/WaveConfig.groovy index 09c5afadaf..af40573172 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/WaveConfig.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/WaveConfig.groovy @@ -20,6 +20,8 @@ package io.seqera.wave.plugin.config import groovy.transform.CompileStatic import groovy.transform.ToString import groovy.util.logging.Slf4j +import io.seqera.wave.api.ScanLevel +import io.seqera.wave.api.ScanMode import io.seqera.wave.config.CondaOpts import io.seqera.wave.config.SpackOpts import nextflow.file.FileHelper @@ -30,7 +32,7 @@ import nextflow.util.Duration * @author Paolo Di Tommaso */ @Slf4j -@ToString(includeNames = true, includePackage = false, includeFields = true, excludes = 'reportOpts') +@ToString(includeNames = true, includePackage = false, includeFields = true) @CompileStatic class WaveConfig { final private static String DEF_ENDPOINT = 'https://wave.seqera.io' @@ -45,16 +47,20 @@ class WaveConfig { final private Boolean bundleProjectResources final private String buildRepository final private String cacheRepository - final private ReportOpts reportOpts final private RetryOpts retryOpts final private HttpOpts httpClientOpts final private Boolean freezeMode final private Boolean preserveFileTimestamp + final private Duration buildMaxDuration + final private Boolean mirrorMode + final private ScanMode scanMode + final private List scanAllowedLevels WaveConfig(Map opts, Map env=System.getenv()) { this.enabled = opts.enabled this.endpoint = (opts.endpoint?.toString() ?: env.get('WAVE_API_ENDPOINT') ?: DEF_ENDPOINT)?.stripEnd('/') - this.freezeMode = opts.freeze as Boolean + this.freezeMode = opts.freeze + this.mirrorMode = opts.mirror this.preserveFileTimestamp = opts.preserveFileTimestamp as Boolean this.containerConfigUrl = parseConfig(opts, env) this.tokensCacheMaxDuration = opts.navigate('tokens.cache.maxDuration', '30m') as Duration @@ -64,9 +70,11 @@ class WaveConfig { this.cacheRepository = opts.navigate('build.cacheRepository') as String this.strategy = parseStrategy(opts.strategy) this.bundleProjectResources = opts.bundleProjectResources - this.reportOpts = new ReportOpts(opts.report as Map ?: Map.of()) this.retryOpts = retryOpts0(opts) this.httpClientOpts = new HttpOpts(opts.httpClient as Map ?: Map.of()) + this.buildMaxDuration = opts.navigate('build.maxDuration', '40m') as Duration + this.scanMode = opts.navigate('scan.mode') as ScanMode + this.scanAllowedLevels = parseScanLevels(opts.navigate('scan.allowedLevels')) // some validation validateConfig() } @@ -87,6 +95,8 @@ class WaveConfig { boolean freezeMode() { return this.freezeMode } + boolean mirrorMode() { return this.mirrorMode } + boolean preserveFileTimestamp() { return this.preserveFileTimestamp } boolean bundleProjectResources() { bundleProjectResources } @@ -95,6 +105,8 @@ class WaveConfig { String cacheRepository() { cacheRepository } + Duration buildMaxDuration() { buildMaxDuration } + private void validateConfig() { def scheme= FileHelper.getUrlProtocol(endpoint) if( scheme !in ['http','https'] ) @@ -164,6 +176,24 @@ class WaveConfig { return tokensCacheMaxDuration } - @Deprecated - ReportOpts reportOpts() { reportOpts } + ScanMode scanMode() { + return scanMode + } + + List scanAllowedLevels() { + return scanAllowedLevels + } + + protected List parseScanLevels(value) { + if( !value ) + return null + if( value instanceof CharSequence ) { + final str = value.toString() + value = str.tokenize(',').collect(it->it.trim()) + } + if( value instanceof List ) { + return (value as List).collect(it-> ScanLevel.valueOf(it.toString().toUpperCase())) + } + throw new IllegalArgumentException("Invalid value for 'wave.scan.levels' setting - offending value: $value; type: ${value.getClass().getName()}") + } } diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/resolver/WaveContainerResolver.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/resolver/WaveContainerResolver.groovy index 914d73befe..69521b9e78 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/resolver/WaveContainerResolver.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/resolver/WaveContainerResolver.groovy @@ -127,4 +127,11 @@ class WaveContainerResolver implements ContainerResolver { return null } + @Override + boolean isContainerReady(String key) { + final c=client() + return c.enabled() + ? c.isContainerReady(key) + : defaultResolver.isContainerReady(key) + } } diff --git a/plugins/nf-wave/src/resources/META-INF/MANIFEST.MF b/plugins/nf-wave/src/resources/META-INF/MANIFEST.MF index 696026aa07..bb42c60b97 100644 --- a/plugins/nf-wave/src/resources/META-INF/MANIFEST.MF +++ b/plugins/nf-wave/src/resources/META-INF/MANIFEST.MF @@ -1,6 +1,6 @@ Manifest-Version: 1.0 Plugin-Class: io.seqera.wave.plugin.WavePlugin Plugin-Id: nf-wave -Plugin-Version: 1.5.0 +Plugin-Version: 1.8.0 Plugin-Provider: Seqera Labs Plugin-Requires: >=24.04.4 diff --git a/plugins/nf-wave/src/test/io/seqera/wave/plugin/ContainerConfigTest.groovy b/plugins/nf-wave/src/test/io/seqera/wave/plugin/ContainerConfigTest.groovy index ec634488f6..87e4f0c927 100644 --- a/plugins/nf-wave/src/test/io/seqera/wave/plugin/ContainerConfigTest.groovy +++ b/plugins/nf-wave/src/test/io/seqera/wave/plugin/ContainerConfigTest.groovy @@ -161,4 +161,20 @@ class ContainerConfigTest extends Specification { fusion1.fingerprint() != fusion2.fingerprint() config1.fingerprint() == config2.fingerprint() } + + def 'should validate empty' () { + expect: + new ContainerConfig().empty() + new ContainerConfig([], null, null, null, null).empty() + new ContainerConfig(null, [], null, null, null).empty() + new ContainerConfig(null, null, [], null, null).empty() + new ContainerConfig(null, null, null, '', null).empty() + new ContainerConfig(null, null, null, null, []).empty() + and: + !new ContainerConfig(['x'], null, null, null, null).empty() + !new ContainerConfig(null, ['x'], null, null, null).empty() + !new ContainerConfig(null, null, ['x'], null, null).empty() + !new ContainerConfig(null, null, null, 'x', null).empty() + !new ContainerConfig(null, null, null, null, [new ContainerLayer()]).empty() + } } diff --git a/plugins/nf-wave/src/test/io/seqera/wave/plugin/WaveClientTest.groovy b/plugins/nf-wave/src/test/io/seqera/wave/plugin/WaveClientTest.groovy index 9b7eab4781..1f54b0a3d7 100644 --- a/plugins/nf-wave/src/test/io/seqera/wave/plugin/WaveClientTest.groovy +++ b/plugins/nf-wave/src/test/io/seqera/wave/plugin/WaveClientTest.groovy @@ -18,7 +18,6 @@ package io.seqera.wave.plugin import static java.nio.file.StandardOpenOption.* -import static test.TestHelper.* import java.net.http.HttpRequest import java.nio.file.Files @@ -35,11 +34,16 @@ import groovy.json.JsonOutput import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import io.seqera.wave.api.BuildStatusResponse +import io.seqera.wave.api.ContainerStatus +import io.seqera.wave.api.ContainerStatusResponse import io.seqera.wave.api.PackagesSpec +import io.seqera.wave.api.ScanLevel +import io.seqera.wave.api.ScanMode import io.seqera.wave.config.CondaOpts import nextflow.Session import nextflow.SysEnv import nextflow.container.inspect.ContainerInspectMode +import nextflow.exception.ProcessUnrecoverableException import nextflow.extension.FilesEx import nextflow.file.FileHelper import nextflow.processor.TaskRun @@ -48,9 +52,11 @@ import org.apache.commons.compress.archivers.ArchiveStreamFactory import org.apache.commons.compress.archivers.tar.TarArchiveEntry import org.apache.commons.compress.archivers.tar.TarArchiveInputStream import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream +import org.junit.Rule import spock.lang.Shared import spock.lang.Specification import spock.lang.Unroll +import test.OutputCapture /** * * @author Paolo Di Tommaso @@ -99,6 +105,9 @@ class WaveClientTest extends Specification { } } + @Rule + OutputCapture capture = new OutputCapture() + def 'should tar file' () { given: def LAST_MODIFIED = FileTime.fromMillis(1_000_000_000_000) @@ -219,6 +228,7 @@ class WaveClientTest extends Specification { !req.condaFile !req.spackFile !req.containerConfig.layers + !req.mirror and: req.freeze and: @@ -226,6 +236,53 @@ class WaveClientTest extends Specification { req.timestamp instanceof String } + def 'should create request object with mirror mode' () { + given: + def session = Mock(Session) { getConfig() >> [wave:[mirror:true, build: [repository: 'quay.io']]]} + def IMAGE = 'foo:latest' + def wave = new WaveClient(session) + + when: + def req = wave.makeRequest(WaveAssets.fromImage(IMAGE)) + then: + req.containerImage == IMAGE + !req.containerPlatform + !req.containerFile + !req.condaFile + !req.spackFile + !req.containerConfig.layers + !req.freeze + and: + req.mirror + req.buildRepository == 'quay.io' + and: + req.fingerprint == 'bd2cb4b32df41f2d290ce2366609f2ad' + req.timestamp instanceof String + } + + def 'should create request object with scan mode and levels' () { + given: + def session = Mock(Session) { getConfig() >> [wave:[scan:[mode: 'required', allowedLevels: 'low,medium']]]} + def IMAGE = 'foo:latest' + def wave = new WaveClient(session) + + when: + def req = wave.makeRequest(WaveAssets.fromImage(IMAGE)) + then: + req.containerImage == IMAGE + !req.containerPlatform + !req.containerFile + !req.condaFile + !req.spackFile + !req.containerConfig.layers + and: + req.scanMode == ScanMode.required + req.scanLevels == List.of(ScanLevel.LOW, ScanLevel.MEDIUM) + and: + req.fingerprint == 'bd2cb4b32df41f2d290ce2366609f2ad' + req.timestamp instanceof String + } + def 'should create request object with dry-run mode' () { given: ContainerInspectMode.activate(true) @@ -565,33 +622,6 @@ class WaveClientTest extends Specification { assets.packagesSpec.channels == WaveClient.DEFAULT_CONDA_CHANNELS } - def 'should create asset with spack recipe' () { - given: - def session = Mock(Session) { getConfig() >> [:]} - and: - def task = Mock(TaskRun) {getConfig() >> [spack:"rseqc@3.0.1 'rbase@3.5'", arch:"amd64"] } - and: - def client = new WaveClient(session) - - when: - def assets = client.resolveAssets(task, null, false) - then: - !assets.containerFile - !assets.moduleResources - !assets.containerImage - !assets.containerConfig - !assets.projectResources - and: - assets.packagesSpec.type == PackagesSpec.Type.SPACK - assets.packagesSpec.entries == null - and: - new String(assets.packagesSpec.environment.decodeBase64()) == '''\ - spack: - specs: [rseqc@3.0.1, rbase@3.5] - concretizer: {unify: true, reuse: false} - '''.stripIndent(true) - } - def 'should create asset with conda file' () { given: def folder = Files.createTempDirectory('test') @@ -619,32 +649,6 @@ class WaveClientTest extends Specification { folder?.deleteDir() } - def 'should create asset with spack file' () { - given: - def folder = Files.createTempDirectory('test') - def spackFile = folder.resolve('spack.yaml'); spackFile.text = 'the-spack-recipe-here' - and: - def session = Mock(Session) { getConfig() >> [:]} - def task = Mock(TaskRun) {getConfig() >> [spack:spackFile.toString(), arch: 'amd64'] } - and: - def client = new WaveClient(session) - - when: - def assets = client.resolveAssets(task, null, false) - then: - !assets.containerFile - !assets.moduleResources - !assets.containerImage - !assets.containerConfig - !assets.projectResources - and: - assets.packagesSpec.type == PackagesSpec.Type.SPACK - new String(assets.packagesSpec.environment.decodeBase64()) == 'the-spack-recipe-here' - !assets.packagesSpec.entries - - cleanup: - folder?.deleteDir() - } // ==== singularity native build + conda ==== @@ -723,70 +727,6 @@ class WaveClientTest extends Specification { folder?.deleteDir() } - def 'should create assets with spack recipe for singularity' () { - given: - def session = Mock(Session) { getConfig() >> [wave:[build:[spack:[commands: ['cmd-foo','cmd-bar']]]]]} - and: - def task = Mock(TaskRun) {getConfig() >> [spack:"rseqc@3.0.1 'rbase@3.5'", arch:"amd64"] } - and: - def client = new WaveClient(session) - - when: - def assets = client.resolveAssets(task, null, true) - then: - !assets.containerFile - !assets.moduleResources - !assets.containerImage - !assets.containerConfig - !assets.projectResources - and: - assets.packagesSpec.type == PackagesSpec.Type.SPACK - assets.packagesSpec.entries == null - assets.packagesSpec.spackOpts.commands == ['cmd-foo','cmd-bar'] - decodeBase64(assets.packagesSpec.environment) == '''\ - spack: - specs: [rseqc@3.0.1, rbase@3.5] - concretizer: {unify: true, reuse: false} - '''.stripIndent(true) - } - - def 'should create asset with spack file for singularity' () { - given: - def folder = Files.createTempDirectory('test') - def spackFile = folder.resolve('spack.yml'); - spackFile.text = '''\ - spack: - specs: [rseqc@3.0.1, rbase@3.5] - concretizer: {unify: true, reuse: false} - '''.stripIndent(true) - and: - def session = Mock(Session) { getConfig() >> [wave:[build:[spack:[basePackages: 'nano@1.2.3']]]]} - def task = Mock(TaskRun) {getConfig() >> [spack:spackFile.toString()] } - and: - def client = new WaveClient(session) - - when: - def assets = client.resolveAssets(task, null, true) - then: - assets.singularity - and: - !assets.containerFile - !assets.moduleResources - !assets.containerImage - !assets.containerConfig - !assets.projectResources - and: - assets.packagesSpec.type == PackagesSpec.Type.SPACK - assets.packagesSpec.spackOpts.basePackages == 'nano@1.2.3' - decodeBase64(assets.packagesSpec.environment) == '''\ - spack: - specs: [rseqc@3.0.1, rbase@3.5, nano@1.2.3] - concretizer: {unify: true, reuse: false} - '''.stripIndent(true) - - cleanup: - folder?.deleteDir() - } def 'should create assets with project resources' () { given: @@ -895,24 +835,6 @@ class WaveClientTest extends Specification { e = thrown(IllegalArgumentException) e.message == "Process 'foo' declares both a 'container' directive and a module bundle dockerfile that conflict each other" - when: - client.checkConflicts([spack:'this', dockerfile:'that'], 'foo') - then: - e = thrown(IllegalArgumentException) - e.message == "Process 'foo' declares both a 'spack' directive and a module bundle dockerfile that conflict each other" - - when: - client.checkConflicts([spack:'this', container:'that'], 'foo') - then: - e = thrown(IllegalArgumentException) - e.message == "Process 'foo' declares both 'container' and 'spack' directives that conflict each other" - - when: - client.checkConflicts([conda:'this', spack:'that'], 'foo') - then: - e = thrown(IllegalArgumentException) - e.message == "Process 'foo' declares both 'spack' and 'conda' directives that conflict each other" - // singularity file checks when: client.checkConflicts([conda:'this', singularityfile:'that'], 'foo') @@ -926,12 +848,6 @@ class WaveClientTest extends Specification { e = thrown(IllegalArgumentException) e.message == "Process 'foo' declares both a 'container' directive and a module bundle singularityfile that conflict each other" - when: - client.checkConflicts([spack:'this', singularityfile:'that'], 'foo') - then: - e = thrown(IllegalArgumentException) - e.message == "Process 'foo' declares both a 'spack' directive and a module bundle singularityfile that conflict each other" - } def 'should get project resource bundle' () { @@ -1178,27 +1094,53 @@ class WaveClientTest extends Specification { def 'should deserialize build status' () { given: + def timestamp = Instant.parse('2024-10-07T20:41:00.804699Z') def sess = Mock(Session) {getConfig() >> [:] } and: def wave = Spy(new WaveClient(sess)) and: - def b1 = '{"id":"3449a9d02831c406_1","status":"PENDING","startTime":"2024-04-11T20:42:56.917524490Z"}' - def b2 = '{"id":"f76b765d2a9cec8b_1","status":"COMPLETED","startTime":"2024-04-11T21:46:55.960337916Z","duration":51.092386813,"succeeded":true}' + def json = ''' + { + "id":"1234", + "buildId":"bd-12345", + "creationTime":"2024-10-07T20:41:00.804699Z", + "detailsUri":"http://foo.com/view/123", + "duration":"60000000000", + "mirrorId":"mr-12345", + "reason":"Some err message", + "scanId":"sc-12345", + "status":"DONE", + "succeeded":true, + "vulnerabilities":{ + "LOW":1, + "MEDIUM":2 + } + } + '''.stripIndent() - expect: - wave.jsonToBuildStatusResponse(b1) == new BuildStatusResponse( - '3449a9d02831c406_1', - BuildStatusResponse.Status.PENDING, - Instant.parse("2024-04-11T20:42:56.917524490Z"), + when: + def resp = wave.jsonToContainerStatusResponse(json) + then: + resp == new ContainerStatusResponse( + '1234', + ContainerStatus.DONE, + 'bd-12345', + 'mr-12345', + 'sc-12345', + [LOW: 1, MEDIUM: 2], + true, + 'Some err message', + 'http://foo.com/view/123', + timestamp, null, - null) - and: - wave.jsonToBuildStatusResponse(b2) == new BuildStatusResponse( - 'f76b765d2a9cec8b_1', - BuildStatusResponse.Status.COMPLETED, - Instant.parse("2024-04-11T21:46:55.960337916Z"), - Duration.ofMillis(51.092386813 * 1_000 as long), - true) + ) + } + + def 'should deserialize container status' () { + given: + def sess = Mock(Session) {getConfig() >> [:] } + and: + def wave = Spy(new WaveClient(sess)) } def 'should test range' () { @@ -1209,4 +1151,249 @@ class WaveClientTest extends Specification { 100 .times { assert WaveClient.randomRange(0, 10) >= 0 } } + def 'should report true on build completion' () { + given: + def sess = Mock(Session) {getConfig() >> [:] } + and: + def wave = Spy(new WaveClient(sess)) + def BUILD_ID = 'build-123' + def PENDING = new BuildStatusResponse('123', BuildStatusResponse.Status.PENDING, Instant.now(), null, null) + def COMPLETED = new BuildStatusResponse('123', BuildStatusResponse.Status.COMPLETED, Instant.now(), Duration.ofMillis(1), true) + and: + def response = new SubmitContainerTokenResponse(buildId: BUILD_ID, targetImage: 'my/container:latest') + + when: + def done = wave.checkBuildCompletion(new WaveClient.Handle(response,Instant.now())) + then: + 1 * wave.buildStatus(BUILD_ID) >> PENDING + and: + !done + + when: + done = wave.checkBuildCompletion(new WaveClient.Handle(response,Instant.now())) + then: + 1 * wave.buildStatus(BUILD_ID) >> COMPLETED + and: + done + } + + def 'should report an exception on build failure' () { + given: + def sess = Mock(Session) {getConfig() >> [:] } + and: + def wave = Spy(new WaveClient(sess)) + def BUILD_ID = 'build-123' + def PENDING = new BuildStatusResponse('123', BuildStatusResponse.Status.PENDING, Instant.now(), null, null) + def FAILED = new BuildStatusResponse('123', BuildStatusResponse.Status.COMPLETED, Instant.now(), Duration.ofMillis(1), false) + and: + def response = new SubmitContainerTokenResponse(buildId: BUILD_ID, targetImage: 'my/container:latest') + + when: + def done = wave.checkBuildCompletion(new WaveClient.Handle(response,Instant.now())) + then: + 1 * wave.buildStatus(BUILD_ID) >> PENDING + and: + !done + + when: + wave.checkBuildCompletion(new WaveClient.Handle(response,Instant.now())) + then: + 1 * wave.buildStatus(BUILD_ID) >> FAILED + and: + def err = thrown(ProcessUnrecoverableException) + err.message == "Wave provisioning for container 'my/container:latest' did not complete successfully - check details here: https://wave.seqera.io/view/builds/build-123" + } + + def 'should fail on build timeout' () { + given: + def sess = Mock(Session) {getConfig() >> [wave: [build:[maxDuration: '500ms']]] } + and: + def wave = Spy(new WaveClient(sess)) + def BUILD_ID = 'build-123' + def PENDING = new BuildStatusResponse('123', BuildStatusResponse.Status.PENDING, Instant.now(), null, null) + and: + def response = new SubmitContainerTokenResponse(buildId: BUILD_ID, targetImage: 'my/container:latest') + + when: + wave.checkBuildCompletion(new WaveClient.Handle(response,Instant.now().minusSeconds(10))) + then: + 1 * wave.buildStatus(BUILD_ID) >> PENDING + + then: + def err = thrown(ProcessUnrecoverableException) + err.message == "Wave provisioning for container 'my/container:latest' is exceeding max allowed duration (500ms) - check details here: https://wave.seqera.io/view/builds/build-123" + } + + // == new api + + def 'should report true on container completion' () { + given: + def sess = Mock(Session) {getConfig() >> [:] } + and: + def wave = Spy(new WaveClient(sess)) + def ID = '123' + def PENDING = new ContainerStatusResponse('123', ContainerStatus.PENDING, 'bd-123', null, 'sc-123', [:], null, null, null, Instant.now(), null ) + def COMPLETED = new ContainerStatusResponse('123', ContainerStatus.DONE, 'bd-123', null, 'sc-123', [:], true, null, null, Instant.now(), Duration.ofMinutes(1)) + and: + def response = new SubmitContainerTokenResponse(requestId: ID, buildId: 'bd-123', targetImage: 'my/container:latest') + + when: + def done = wave.checkContainerCompletion(new WaveClient.Handle(response,Instant.now())) + then: + 1 * wave.containerStatus(ID) >> PENDING + and: + !done + + when: + done = wave.checkContainerCompletion(new WaveClient.Handle(response,Instant.now())) + then: + 1 * wave.containerStatus(ID) >> COMPLETED + and: + done + } + + def 'should report an exception on build failure' () { + given: + def sess = Mock(Session) {getConfig() >> [:] } + and: + def wave = Spy(new WaveClient(sess)) + def ID = '123' + def PENDING = new ContainerStatusResponse('123', ContainerStatus.PENDING, 'bd-123', null, 'sc-123', [:], null, null, null, Instant.now(), null ) + def FAILED = new ContainerStatusResponse('123', ContainerStatus.DONE, 'bd-123', null, 'sc-123', [:], false, null, 'https://wave.seqera.io/view/builds/build-123', Instant.now(), Duration.ofMinutes(1)) + and: + def response = new SubmitContainerTokenResponse(requestId: ID, buildId: 'bd-123', targetImage: 'my/container:latest') + + when: + def done = wave.checkContainerCompletion(new WaveClient.Handle(response,Instant.now())) + then: + 1 * wave.containerStatus(ID) >> PENDING + and: + !done + + when: + wave.checkContainerCompletion(new WaveClient.Handle(response,Instant.now())) + then: + 1 * wave.containerStatus(ID) >> FAILED + and: + def err = thrown(ProcessUnrecoverableException) + err.message == "Wave provisioning for container 'my/container:latest' did not complete successfully\n- Find out more here: https://wave.seqera.io/view/builds/build-123" + } + + def 'should fail on build timeout' () { + given: + def sess = Mock(Session) {getConfig() >> [wave: [build:[maxDuration: '500ms']]] } + and: + def wave = Spy(new WaveClient(sess)) + def ID = '123' + def PENDING = new ContainerStatusResponse('123', ContainerStatus.PENDING, 'bd-123', null, 'sc-123', [:], null, null, null, Instant.now(), null ) + and: + def response = new SubmitContainerTokenResponse(requestId: ID, buildId: 'bd-123', targetImage: 'my/container:latest') + + when: + wave.checkContainerCompletion(new WaveClient.Handle(response,Instant.now().minusSeconds(10))) + then: + 1 * wave.containerStatus(ID) >> PENDING + + then: + def err = thrown(ProcessUnrecoverableException) + err.message == "Wave provisioning for container 'my/container:latest' is exceeding max allowed duration (500ms) - check details here: https://wave.seqera.io/view/containers/123" + } + + def 'should validate isContainerReady' () { + given: + def sess = Mock(Session) {getConfig() >> [wave: [build:[maxDuration: '500ms']]] } + def cache = Mock(Map) + and: + def resp = Mock(SubmitContainerTokenResponse) + def handle = new WaveClient.Handle(resp,Instant.now()) + def wave = Spy(new WaveClient(session:sess, responses: cache)) + boolean ready + + // container succeeded + when: + ready = wave.isContainerReady('xyz') + then: + cache.get('xyz') >> handle + and: + resp.requestId >> '12345' + resp.succeeded >> true + and: + 0 * wave.checkContainerCompletion(handle) >> null + 0 * wave.checkBuildCompletion(_) >> null + and: + ready + + // container is pending + when: + ready = wave.isContainerReady('xyz') + then: + cache.get('xyz') >> handle + and: + resp.requestId >> '12345' + resp.succeeded >> null + and: + 1 * wave.checkContainerCompletion(handle) >> true + 0 * wave.checkBuildCompletion(_) >> null + and: + ready + + // container failed + when: + ready = wave.isContainerReady('xyz') + then: + cache.get('xyz') >> handle + and: + resp.requestId >> '12345' + resp.succeeded >> false + and: + 1 * wave.checkContainerCompletion(handle) >> false + 0 * wave.checkBuildCompletion(_) >> null + and: + !ready + + + // build is READY + when: + ready = wave.isContainerReady('xyz') + then: + cache.get('xyz') >> handle + and: + resp.buildId >> 'bd-5678' + resp.cached >> false + and: + 0 * wave.checkContainerCompletion(_) >> null + 1 * wave.checkBuildCompletion(handle) >> true + and: + ready + + // build is not ready + when: + ready = wave.isContainerReady('xyz') + then: + cache.get('xyz') >> handle + and: + resp.requestId >> null + resp.buildId >> 'bd-5678' + resp.cached >> false + and: + 0 * wave.checkContainerCompletion(_) >> null + 1 * wave.checkBuildCompletion(handle) >> false + and: + !ready + + // build is cached + when: + ready = wave.isContainerReady('xyz') + then: + cache.get('xyz') >> handle + and: + resp.requestId >> null + resp.buildId >> 'bd-5678' + resp.cached >> true + and: + 0 * wave.checkContainerCompletion(_) >> null + 0 * wave.checkBuildCompletion(handle) >> null + and: + ready + } } diff --git a/plugins/nf-wave/src/test/io/seqera/wave/plugin/config/WaveConfigTest.groovy b/plugins/nf-wave/src/test/io/seqera/wave/plugin/config/WaveConfigTest.groovy index 613a14ee4b..fe95425528 100644 --- a/plugins/nf-wave/src/test/io/seqera/wave/plugin/config/WaveConfigTest.groovy +++ b/plugins/nf-wave/src/test/io/seqera/wave/plugin/config/WaveConfigTest.groovy @@ -17,6 +17,8 @@ package io.seqera.wave.plugin.config +import io.seqera.wave.api.ScanLevel +import io.seqera.wave.api.ScanMode import nextflow.util.Duration import spock.lang.Specification import spock.lang.Unroll @@ -86,7 +88,7 @@ class WaveConfigTest extends Specification { when: def opts = new WaveConfig([:]) then: - opts.condaOpts().mambaImage == 'mambaorg/micromamba:1.5.8-lunar' + opts.condaOpts().mambaImage == 'mambaorg/micromamba:1.5.10-noble' opts.condaOpts().commands == null when: @@ -199,10 +201,10 @@ class WaveConfigTest extends Specification { given: def config = new WaveConfig([enabled: true]) expect: - config.toString() == 'WaveConfig(enabled:true, endpoint:https://wave.seqera.io, containerConfigUrl:[], tokensCacheMaxDuration:30m, condaOpts:CondaOpts(mambaImage=mambaorg/micromamba:1.5.8-lunar; basePackages=conda-forge::procps-ng, commands=null), spackOpts:SpackOpts(basePackages=null, commands=null), strategy:[container, dockerfile, conda, spack], bundleProjectResources:null, buildRepository:null, cacheRepository:null, retryOpts:RetryOpts(delay:450ms, maxDelay:1m 30s, maxAttempts:10, jitter:0.25), httpClientOpts:HttpOpts(), freezeMode:null, preserveFileTimestamp:null)' + config.toString() == 'WaveConfig(enabled:true, endpoint:https://wave.seqera.io, containerConfigUrl:[], tokensCacheMaxDuration:30m, condaOpts:CondaOpts(mambaImage=mambaorg/micromamba:1.5.10-noble; basePackages=conda-forge::procps-ng, commands=null), spackOpts:SpackOpts(basePackages=null, commands=null), strategy:[container, dockerfile, conda, spack], bundleProjectResources:null, buildRepository:null, cacheRepository:null, retryOpts:RetryOpts(delay:450ms, maxDelay:1m 30s, maxAttempts:10, jitter:0.25), httpClientOpts:HttpOpts(), freezeMode:null, preserveFileTimestamp:null, buildMaxDuration:40m, mirrorMode:null, scanMode:null, scanAllowedLevels:null)' } - def 'should not allow invalid settinga' () { + def 'should not allow invalid setting' () { when: new WaveConfig(endpoint: 'foo') then: @@ -240,4 +242,38 @@ class WaveConfigTest extends Specification { config.preserveFileTimestamp() } + def 'should enabled mirror mode' () { + expect: + !new WaveConfig([:]).mirrorMode() + and: + new WaveConfig([mirror:true]).mirrorMode() + } + + @Unroll + def 'should validate scan mode' () { + expect: + new WaveConfig(scan: [mode: MODE]).scanMode() == EXPECTED + where: + MODE | EXPECTED + null | null + 'none' | ScanMode.none + 'async' | ScanMode.async + 'required' | ScanMode.required + } + + @Unroll + def 'should validate scan levels' () { + expect: + new WaveConfig(scan: [allowedLevels: LEVEL]).scanAllowedLevels() == EXPECTED + where: + LEVEL | EXPECTED + null | null + 'low' | List.of(ScanLevel.LOW) + 'LOW' | List.of(ScanLevel.LOW) + 'low,high' | List.of(ScanLevel.LOW,ScanLevel.HIGH) + 'LOW, HIGH' | List.of(ScanLevel.LOW,ScanLevel.HIGH) + ['medium','high'] | List.of(ScanLevel.MEDIUM,ScanLevel.HIGH) + + } + } diff --git a/test-ci.sh b/test-ci.sh index 41c008b53e..5444674369 100755 --- a/test-ci.sh +++ b/test-ci.sh @@ -9,17 +9,12 @@ X_PULL_REQUEST=${TRAVIS_PULL_REQUEST:-false} [[ $GITHUB_REF ]] && X_BRANCH=$(echo $GITHUB_REF | awk '{n=split($1,A,"/"); print A[n]}') [[ $GITHUB_EVENT_NAME == pull_request ]] && X_PULL_REQUEST=true -if [ "$TEST_JDK" -ge 19 ]; then - export NXF_ENABLE_VIRTUAL_THREADS=true -fi - export WITH_DOCKER='-with-docker' export NXF_PLUGINS_DIR=$PWD/build/plugins export NXF_CMD=$PWD/nextflow; export TEST_JDK export TEST_MODE -unset JAVA_TOOL_OPTIONS # this variable mess-up Capsule loader Java version parsing ( $NXF_CMD info cd validation diff --git a/test-e2e/.gitignore b/test-e2e/.gitignore new file mode 100644 index 0000000000..53a548845f --- /dev/null +++ b/test-e2e/.gitignore @@ -0,0 +1,3 @@ +.nextflow +nextflow +build-info.properties diff --git a/test-e2e/Dockerfile b/test-e2e/Dockerfile new file mode 100644 index 0000000000..eb8df06a67 --- /dev/null +++ b/test-e2e/Dockerfile @@ -0,0 +1,11 @@ +FROM amazoncorretto:17-al2023 +COPY .nextflow /.nextflow +COPY nextflow /usr/bin/nextflow +ENV NXF_HOME=/.nextflow +RUN nextflow info +RUN NXF_PLUGINS_DEFAULT=false nextflow plugin install xpack-amzn,xpack-google + +FROM scratch +COPY --from=0 /.nextflow /.nextflow +COPY --from=0 /usr/bin/nextflow /usr/bin/nextflow + diff --git a/test-e2e/run.sh b/test-e2e/run.sh new file mode 100644 index 0000000000..4591606921 --- /dev/null +++ b/test-e2e/run.sh @@ -0,0 +1,89 @@ +# +# Copyright 2013-2024, Seqera Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +# cleanup +rm -rf .nextflow && mkdir .nextflow +# copy nextflow dependencies +(cd .. +./gradlew compile assemble +BUILD_PACK=1 ./gradlew installScratch publishToMavenLocal +) + +# copy nextflow plugins +cp -r ../build/plugins .nextflow/ +# copy nextflow launcher script +cp ../nextflow . && chmod +x nextflow +cp ../modules/nextflow/src/main/resources/META-INF/build-info.properties . +source build-info.properties + +if [ -z "$version" ]; then + echo "Error: version is empty or missing"; exit 1 +fi +if [ -z "$build" ]; then + echo "Error: build is empty or missing"; exit 1 +fi +if [ -z "$commitId" ]; then + echo "Error: commitId is empty or missing"; exit 1 +fi + +# +# build a scratch container image with assembled newxtflow runtime and plugins +# +tag=${version}-${commitId} +base=${base:-'public.cr.seqera.io/platform/nf-launcher:j17-base'} +repository=${repository:-'public.cr.seqera.io/snapshots/nextflow-scratch'} +image=${repository}:${tag} + +docker buildx build \ + --platform linux/amd64 \ + --push \ + --progress=plain \ + --tag ${image} \ + --build-arg TARGETPLATFORM=linux/amd64 \ + . +echo "Nextflow snapshots launcher image $image" + +# +# Create an ephemeral container with the scratch image and base Platform launcher image +# +launcher=$(wave -i ${base} --include ${image} --config-env NXF_HOME=/.nextflow) +echo "Running Platform tests using image launcher: $launcher" + +# determining the e2e test environment checking the $COMMIT_MESSAGE +# that is set by GitHub action workflow. If it does not exist fallback +# to the commit message in the git rpeo +if [ -z "$COMMIT_MESSAGE" ]; then + COMMIT_MESSAGE=$(git show -s --format='%s') + echo "Commit message [from git]: $COMMIT_MESSAGE" +else + echo "Commit message [from gha]: $COMMIT_MESSAGE" +fi +if echo "$COMMIT_MESSAGE" | grep -q "\[e2e prod\]"; then + ENVIRONMENT="production" +else + ENVIRONMENT="staging" +fi + +# +# Finally launch the showcase automation +# see https://github.com/seqeralabs/showcase-automation/ +echo "Launching seqera-showcase-${ENVIRONMENT}" +gh workflow run \ + seqera-showcase-${ENVIRONMENT}.yml \ + --repo seqeralabs/showcase-automation \ + -f launch_container=${launcher} + diff --git a/tests/checks/error-ignore-then-fail.nf/.checks b/tests/checks/error-ignore-then-fail.nf/.checks index 688d88fb3e..6e91cd7067 100644 --- a/tests/checks/error-ignore-then-fail.nf/.checks +++ b/tests/checks/error-ignore-then-fail.nf/.checks @@ -8,5 +8,6 @@ set -e [ $status -ne 0 ] || false -[[ `< .nextflow.log grep -c 'Submitted process > foo'` == 1 ]] || false +[[ `< .nextflow.log grep -c 'Submitted process > foo'` == 3 ]] || false +[[ `< .nextflow.log grep -c 'Submitted process > bar'` == 1 ]] || false [[ `< .nextflow.log grep -c 'Error is ignored'` == 1 ]] || false diff --git a/tests/checks/error-ignore-then-fail.nf/.config b/tests/checks/error-ignore-then-fail.nf/.config index ae0cd92bff..09a06e5e06 100644 --- a/tests/checks/error-ignore-then-fail.nf/.config +++ b/tests/checks/error-ignore-then-fail.nf/.config @@ -1 +1,6 @@ -workflow.failOnIgnore = true +workflow { + failOnIgnore = true +} +process { + errorStrategy = 'ignore' +} diff --git a/tests/checks/run.sh b/tests/checks/run.sh old mode 100644 new mode 100755 diff --git a/tests/checks/stub-retry.nf/.checks b/tests/checks/stub-retry.nf/.checks new file mode 100644 index 0000000000..6a5c757beb --- /dev/null +++ b/tests/checks/stub-retry.nf/.checks @@ -0,0 +1,11 @@ +set -e + +echo '' +$NXF_RUN -stub | tee stdout + +[[ `grep 'INFO' .nextflow.log | grep -c 'Submitted process > stubtest'` == 1 ]] || false +[[ `grep 'INFO' .nextflow.log | grep -c 'Re-submitted process > stubtest'` == 1 ]] || false + +[[ `grep -c 'Stubbing. Creating file' stdout` == 1 ]] || false + + diff --git a/tests/checks/trace-access.nf/.checks b/tests/checks/trace-access.nf/.checks new file mode 100644 index 0000000000..887f37fd3b --- /dev/null +++ b/tests/checks/trace-access.nf/.checks @@ -0,0 +1,11 @@ +set -e + +echo '' +$NXF_RUN | tee stdout + +[[ `grep 'INFO' .nextflow.log | grep -c 'Submitted process > foo'` == 1 ]] || false +[[ `grep 'INFO' .nextflow.log | grep -c 'Re-submitted process > foo'` == 3 ]] || false + +[[ `grep -c 'mem: 8 GB (previous: 4294967296) (error: nextflow.exception.ProcessFailedException: Process .* terminated with an error exit status (137))' stdout` == 1 ]] || false + + diff --git a/tests/error-ignore-then-fail.nf b/tests/error-ignore-then-fail.nf index de2371e865..07cfa2b3e5 100644 --- a/tests/error-ignore-then-fail.nf +++ b/tests/error-ignore-then-fail.nf @@ -15,15 +15,39 @@ * limitations under the License. */ +workflow { + input_channel = channel.of("SAMP1", "SAMP2", "SAMP3") + foo(input_channel) + bar(foo.out.sample_ids.collect()) +} + process foo { - errorStrategy 'ignore' + input: + val sample_id + + output: + val sample_id, emit: sample_ids script: - ''' - exit 1 - ''' + """ + if [[ $sample_id == "SAMP1" ]]; then + exit 2 + fi + ls -lah .* + """ } -workflow { - foo() +process bar { + input: + val ready + + output: + stdout + + script: + """ + ls -lah .* + """ + } + diff --git a/tests/output-dsl.nf b/tests/output-dsl.nf index 22d9cea365..f3ed165185 100644 --- a/tests/output-dsl.nf +++ b/tests/output-dsl.nf @@ -26,6 +26,7 @@ process align { path("*.bam") path("${x}.bai") + script: """ echo ${x} > ${x}.bam echo ${x} | rev > ${x}.bai @@ -40,6 +41,7 @@ process my_combine { output: path 'result.txt' + script: """ cat $bamfile > result.txt cat $baifile >> result.txt @@ -50,6 +52,7 @@ process foo { output: path 'xxx' + script: ''' mkdir xxx touch xxx/A @@ -59,13 +62,14 @@ process foo { } workflow { - def input = Channel.of('alpha','beta','delta') + main: + input = Channel.of('alpha','beta','delta') align(input) - def bam = align.out[0].toSortedList { it.name } - def bai = align.out[1].toSortedList { it.name } - my_combine( bam, bai ) - my_combine.out.view{ it.text } + bams = align.out[0].toSortedList { bam -> bam.name } + bais = align.out[1].toSortedList { bai -> bai.name } + my_combine( bams, bais ) + my_combine.out.view { it -> it.text } foo() @@ -76,10 +80,8 @@ workflow { } output { - directory 'results' - mode 'copy' - 'data' { + path { val -> { file -> file } } index { path 'index.csv' mapper { val -> [filename: val] } diff --git a/tests/stub-retry.nf b/tests/stub-retry.nf new file mode 100644 index 0000000000..a682d899c4 --- /dev/null +++ b/tests/stub-retry.nf @@ -0,0 +1,30 @@ +process stubtest { + debug true + errorStrategy 'retry' + + output: + path("*.txt") + + script: + """ + echo "Not stubbing" + touch script.txt + """ + + stub: + if( task.attempt < 2 ) { + """ + echo "Stubbing. Not creating file" + """ + } else { + """ + echo "Stubbing. Creating file" + touch script.txt + """ + } +} + +workflow { + main: + stubtest() +} diff --git a/tests/trace-access.nf b/tests/trace-access.nf new file mode 100644 index 0000000000..ff55dbdb84 --- /dev/null +++ b/tests/trace-access.nf @@ -0,0 +1,24 @@ +process foo { + memory { task.attempt > 1 ? task.previousTrace.memory * 2 : (1.GB) } + errorStrategy 'retry' + maxRetries 3 + input: + val i + output: + stdout + script: + if( task.attempt <= 3 ){ + """ + exit 137 + """ + } else { + """ + echo 'mem: $task.memory (previous: $task.previousTrace.memory) (error: $task.previousException)' + exit 0 + """ + } +} + +workflow { + foo(channel.of(1)).view() +} diff --git a/validation/awsbatch-unstage-fail.config b/validation/awsbatch-unstage-fail.config new file mode 100644 index 0000000000..81b96579d7 --- /dev/null +++ b/validation/awsbatch-unstage-fail.config @@ -0,0 +1,12 @@ +/* + * do not include plugin requirements otherwise latest + * published version will be downloaded instead of using local build + */ + +workDir = 's3://nextflow-ci/work' +process.executor = 'awsbatch' +process.queue = 'nextflow-ci' +process.container = 'quay.io/nextflow/test-aws-unstage-fail:1.0' +aws.region = 'eu-west-1' +aws.batch.maxTransferAttempts = 3 +aws.batch.delayBetweenAttempts = '5 sec' diff --git a/validation/awsbatch.sh b/validation/awsbatch.sh index d58727e7e8..b73571cbd6 100644 --- a/validation/awsbatch.sh +++ b/validation/awsbatch.sh @@ -7,6 +7,13 @@ get_abs_filename() { export NXF_CMD=${NXF_CMD:-$(get_abs_filename ../launch.sh)} +# Execution should fail ignoring +$NXF_CMD run test-aws-unstage-fail.nf -c awsbatch-unstage-fail.config || true +[[ `grep -c "Error executing process > 'test (1)'" .nextflow.log` == 1 ]] || false +[[ `grep -c " Essential container in task exited" .nextflow.log` == 1 ]] || false +[[ `grep -cozP "Command exit status:\n 1" .nextflow.log` == 1 ]] || false +[[ `grep -c "Producing a failure in aws" .nextflow.log` == 2 ]] || false + $NXF_CMD run test-complexpaths.nf -c awsbatch.config [[ -d foo ]] || false [[ -e 'foo/.alpha' ]] || false @@ -73,4 +80,4 @@ $NXF_CMD run nextflow-io/hello \ -process.array 10 \ -with-wave \ -with-fusion \ - -c awsbatch.config \ No newline at end of file + -c awsbatch.config diff --git a/validation/google.sh b/validation/google.sh index 8dcda8ab99..0dcb9606f8 100644 --- a/validation/google.sh +++ b/validation/google.sh @@ -6,9 +6,6 @@ get_abs_filename() { export NXF_CMD=${NXF_CMD:-$(get_abs_filename ../launch.sh)} -echo $GOOGLE_SECRET | base64 -d > $PWD/google_credentials.json -export GOOGLE_APPLICATION_CREDENTIALS=$PWD/google_credentials.json - [[ $TOWER_ACCESS_TOKEN ]] && OPTS='-with-tower' || OPTS='' set -x diff --git a/validation/test-aws-unstage-fail-container/Dockerfile b/validation/test-aws-unstage-fail-container/Dockerfile new file mode 100644 index 0000000000..0dd281ba58 --- /dev/null +++ b/validation/test-aws-unstage-fail-container/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu + +RUN apt-get update && apt-get -y install curl unzip && apt-get clean + + +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ + unzip awscliv2.zip && ./aws/install && rm -rf aws* + +ADD fake_aws /fake_aws + +ENV PATH=/fake_aws/bin/:$PATH diff --git a/validation/test-aws-unstage-fail-container/fake_aws/bin/aws b/validation/test-aws-unstage-fail-container/fake_aws/bin/aws new file mode 100755 index 0000000000..80985d9e08 --- /dev/null +++ b/validation/test-aws-unstage-fail-container/fake_aws/bin/aws @@ -0,0 +1,9 @@ +#!/bin/bash + +if [[ "$*" == *".command."* ]] || [[ "$*" == *".exitcode"* ]]; then + /usr/local/bin/aws $@ +else + >&2 echo "Producing a failure in aws $@" + exit 2 +fi + diff --git a/validation/test-aws-unstage-fail.nf b/validation/test-aws-unstage-fail.nf new file mode 100644 index 0000000000..96bcb9af1e --- /dev/null +++ b/validation/test-aws-unstage-fail.nf @@ -0,0 +1,16 @@ +process test { + input: + val i + output: + file("test${i}") + file("test_2_${i}") + script: + """ + dd if=/dev/urandom of=test${i} bs=1K count=90 + dd if=/dev/urandom of=test_2_${i} bs=1K count=90 + """ +} + +workflow { + Channel.of(1) | test +} diff --git a/validation/wave-tests/example6/nextflow.config b/validation/wave-tests/example6/nextflow.config index f58834306f..c757b3bb7b 100644 --- a/validation/wave-tests/example6/nextflow.config +++ b/validation/wave-tests/example6/nextflow.config @@ -1,12 +1,3 @@ -process { - container = 'quay.io/nextflow/rnaseq-nf:v1.1' -} - -params { - reads = 's3://rnaseq-nf/data/ggal/lung_{1,2}.fq' - transcriptome = 's3://rnaseq-nf/data/ggal/transcript.fa' -} - docker { enabled = true envWhitelist = 'AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY' @@ -16,5 +7,3 @@ fusion { enabled = true } -workDir = 's3://nextflow-ci/wave' - diff --git a/validation/wave-tests/example6/run-aws.sh b/validation/wave-tests/example6/run-aws.sh index b63724d33c..0f2d2247fa 100644 --- a/validation/wave-tests/example6/run-aws.sh +++ b/validation/wave-tests/example6/run-aws.sh @@ -1,6 +1,6 @@ $NXF_CMD run \ rnaseq-nf \ - -profile batch \ + -profile batch,s3-data \ -with-wave \ -with-fusion \ -process.scratch false diff --git a/validation/wave-tests/example6/run-gcp.sh b/validation/wave-tests/example6/run-gcp.sh index b522ac13c5..c55dcdb3ec 100644 --- a/validation/wave-tests/example6/run-gcp.sh +++ b/validation/wave-tests/example6/run-gcp.sh @@ -1,6 +1,6 @@ $NXF_CMD run \ rnaseq-nf \ - -profile gcb \ + -profile google-batch,gs-data \ -with-wave \ -with-fusion \ -process.scratch false diff --git a/validation/wave-tests/example6/run.sh b/validation/wave-tests/example6/run.sh index 0bb6d254c5..971a65f351 100644 --- a/validation/wave-tests/example6/run.sh +++ b/validation/wave-tests/example6/run.sh @@ -1,2 +1,5 @@ -$NXF_CMD run rnaseq-nf -with-wave +$NXF_CMD run \ + rnaseq-nf \ + -with-wave \ + -w s3://nextflow-ci/wave diff --git a/validation/wave.sh b/validation/wave.sh index 6b8953a7f3..b24b718be6 100644 --- a/validation/wave.sh +++ b/validation/wave.sh @@ -25,4 +25,4 @@ echo "Test Wave running rnaseq-nf with Fusion on AWS Batch" (cd wave-tests/example6; bash run-aws.sh) echo "Test Wave running rnaseq-nf with Fusion on Google Batch" -(cd wave-tests/example6; bash run-aws.sh) +(cd wave-tests/example6; bash run-gcp.sh)