Skip to content

1.5.10.post: fix * in dependency versions #38516

1.5.10.post: fix * in dependency versions

1.5.10.post: fix * in dependency versions #38516

Workflow file for this run

name: Test
# see: https://help.github.com/en/actions/reference/events-that-trigger-workflows
on: # Trigger the workflow on push or pull request, but only for the master branch
push:
branches: [master, "release/*"]
pull_request:
branches: [master, "release/*"]
jobs:
conda:
runs-on: ubuntu-20.04
container: pytorchlightning/pytorch_lightning:base-conda-py${{ matrix.python-version }}-torch${{ matrix.pytorch-version }}
strategy:
fail-fast: false
matrix:
python-version: ["3.8"] # previous to last Python version as that one is already used in test-full
pytorch-version: ["1.7", "1.8", "1.9", "1.10"] # nightly: add when there's a release candidate
timeout-minutes: 30
steps:
- uses: actions/checkout@v2
- name: Update dependencies
run: |
conda info
conda list
# adjust versions according installed Torch version
python ./requirements/adjust_versions.py requirements/extra.txt
python ./requirements/adjust_versions.py requirements/examples.txt
pip install --requirement requirements/devel.txt --find-links https://download.pytorch.org/whl/nightly/torch_nightly.html
# set a per-test timeout of 2.5 minutes to fail sooner. this aids with hanging tests
pip install pytest-timeout
pip list
- name: Pull checkpoints from S3
working-directory: ./legacy
run: |
# enter legacy and update checkpoints from S3
curl https://pl-public-data.s3.amazonaws.com/legacy/checkpoints.zip --output checkpoints.zip
unzip -o checkpoints.zip
ls -l checkpoints/
- name: Tests
run: |
coverage run --source pytorch_lightning -m pytest --timeout 150 pytorch_lightning tests -v --durations=50 --junitxml=junit/test-results-${{ runner.os }}-torch${{ matrix.pytorch-version }}.xml
shell: bash -l {0}
- name: Upload pytest results
uses: actions/upload-artifact@v2
with:
name: pytest-results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}
path: junit/test-results-${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.requires }}.xml
if: failure()
- name: Statistics
if: success()
run: |
coverage report
coverage xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
if: always()
# see: https://github.com/actions/toolkit/issues/399
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: coverage.xml
flags: cpu,pytest,torch${{ matrix.pytorch-version }}
name: CPU-coverage
fail_ci_if_error: false