Skip to content

make jobs run sequentially to reduce benchmark interference #121

make jobs run sequentially to reduce benchmark interference

make jobs run sequentially to reduce benchmark interference #121

Workflow file for this run

name: bench
on:
workflow_dispatch:
push:
pull_request:
jobs:
bench-R:
runs-on: ubuntu-latest
env:
RENV_PATHS_ROOT: ~/.local/share/renv
defaults:
run:
working-directory: R
steps:
- uses: actions/checkout@v3 #now we need to install R
- uses: r-lib/actions/setup-r@v2
with:
r-version: '4.2.2'
# we need to manually cache the packages
- name: Cache R packages
uses: actions/cache@v2
with:
path: ${{ env.RENV_PATHS_ROOT }}
key: ${{ runner.os }}-renv-${{ hashFiles('**/renv.lock') }}
restore-keys: |
${{ runner.os }}-renv-
- name: Restore packages
shell: Rscript {0}
run: |
if (!requireNamespace("renv", quietly = TRUE)) install.packages("renv")
renv::restore()
- name: Benchmark
run: Rscript -e 'source("main.R")'
- run: ls
- name: upload R benchmark
uses: actions/upload-artifact@v3
with:
name: R_benchmark
path: R/benchmark_results.yaml
bench-Julia:
needs: bench-R
runs-on: ubuntu-latest
defaults:
run:
working-directory: Julia
steps:
- uses: actions/checkout@v3
- uses: julia-actions/setup-julia@v1
with:
version: '1.9.3'
- name: Benchmark
run: julia --project -e 'using Pkg; Pkg.instantiate(); include("main.jl")'
- run: ls
- name: upload Julia benchmark
uses: actions/upload-artifact@v3
with:
name: Julia_benchmark
path: Julia/benchmark_results.yaml
bench-Python:
needs: [bench-R, bench-Julia]
runs-on: ubuntu-latest
defaults:
run:
working-directory: Python
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v2
with:
python-version: '3.11'
- run: pip install -r requirements.txt
# cache the python pip installed packages
- name: Cache pip packages
uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Benchmark
run: python main.py
- run: ls
- name: upload Python benchmark
uses: actions/upload-artifact@v3
with:
name: Python_benchmark
path: Python/benchmark_results.yaml
create-README:
runs-on: ubuntu-latest
needs: [bench-R, bench-Julia, bench-Python]
steps:
- uses: actions/checkout@v3
- name: Download R benchmark
uses: actions/download-artifact@v2
with:
name: R_benchmark
path: R
- name: Download Julia benchmark
uses: actions/download-artifact@v2
with:
name: Julia_benchmark
path: Julia
- name: Download Python benchmark
uses: actions/download-artifact@v2
with:
name: Python_benchmark
path: Python
# SETUP python and install dependencies
- uses: actions/setup-python@v2
with:
python-version: '3.11'
- run: pip install -r requirements.txt
- run: python generate_readme.py
- run: cat README.md
# commit and push the README.md
- name: Commit and push
run: |
git config --local user.email "github-actions[bot]@users.noreply.github.com"
git config --local user.name "github-actions[bot]"
git add .
git commit -m "Bench and update README.md"
git push
if: github.ref == 'refs/heads/main' && github.event_name != 'pull_request'