Skip to content

Merge pull request #3 from clouren/SPEX-3.1.x #20

Merge pull request #3 from clouren/SPEX-3.1.x

Merge pull request #3 from clouren/SPEX-3.1.x #20

Workflow file for this run

name: build
on:
workflow_dispatch:
push:
branches-ignore:
- '**/dev2'
- '**/*dev2'
pull_request:
concurrency: ci-${{ github.ref }}
env:
# string with name of libraries to be built
BUILD_LIBS: "SuiteSparse_config:AMD:COLAMD:SPEX"
# string with name of libraries to be checked
CHECK_LIBS: "SuiteSparse_config:AMD:COLAMD:SPEX"
# string with name of libraries that are installed
INSTALLED_LIBS: "SuiteSparse_config:AMD:COLAMD:SPEX"
jobs:
ubuntu:
# For available GitHub-hosted runners, see:
# https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners
runs-on: ubuntu-latest
name: ubuntu (${{ matrix.compiler }} ${{ matrix.cuda }} CUDA ${{ matrix.openmp }} OpenMP, ${{ matrix.link }})
strategy:
# Allow other runners in the matrix to continue if some fail
fail-fast: false
matrix:
compiler: [gcc, clang]
cuda: [without]
openmp: [with]
link: [both]
include:
- compiler: gcc
compiler-pkgs: "g++ gcc"
cc: "gcc"
cxx: "g++"
- compiler: clang
compiler-pkgs: "clang libomp-dev"
cc: "clang"
cxx: "clang++"
# Clang seems to generally require less cache size (smaller object files?).
- compiler: gcc
ccache-max: 600M
- compiler: clang
ccache-max: 500M
- cuda: without
# - cuda: with
# cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
# cuda-cmake-flags:
# -DSUITESPARSE_USE_CUDA=ON
# -DSUITESPARSE_USE_STRICT=ON
# -DCUDAToolkit_INCLUDE_DIRS="/usr/include"
# -DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
- compiler: gcc
compiler-pkgs: "g++ gcc"
cc: "gcc"
cxx: "g++"
ccache-max: 600M
cuda: without
openmp: without
openmp-cmake-flags: "-DSUITESPARSE_USE_OPENMP=OFF"
- compiler: gcc
compiler-pkgs: "g++ gcc"
cc: "gcc"
cxx: "g++"
ccache-max: 600M
cuda: without
# cuda: with
# cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
# cuda-cmake-flags:
# -DSUITESPARSE_USE_CUDA=ON
# -DSUITESPARSE_USE_STRICT=ON
# -DCUDAToolkit_INCLUDE_DIRS="/usr/include"
# -DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
openmp: with
link: static
# "Fake" a cross-compilation to exercise that build system path
link-cmake-flags:
-DBUILD_SHARED_LIBS=OFF
-DBUILD_STATIC_LIBS=ON
-DCMAKE_SYSTEM_NAME="Linux"
env:
CC: ${{ matrix.cc }}
CXX: ${{ matrix.cxx }}
steps:
- name: get CPU information
run: lscpu
- name: checkout repository
uses: actions/checkout@v4
- name: install dependencies
env:
COMPILER_PKGS: ${{ matrix.compiler-pkgs }}
CUDA_PKGS: ${{ matrix.cuda-pkgs }}
run: |
sudo apt -qq update
sudo apt install -y ${COMPILER_PKGS} autoconf automake ccache cmake \
dvipng gfortran libgmp-dev liblapack-dev libmpfr-dev valgrind \
libopenblas-dev ${CUDA_PKGS}
- name: prepare ccache
# create key with human readable timestamp
# used in action/cache/restore and action/cache/save steps
id: ccache-prepare
run: |
echo "key=ccache:ubuntu:${{ matrix.compiler }}:${{ matrix.cuda }}:${{ matrix.openmp }}:${{ matrix.link }}:${{ github.ref }}:$(date +"%Y-%m-%d_%H-%M-%S"):${{ github.sha }}" >> $GITHUB_OUTPUT
- name: restore ccache
# setup the GitHub cache used to maintain the ccache from one job to the next
uses: actions/cache/restore@v4
with:
path: ~/.ccache
key: ${{ steps.ccache-prepare.outputs.key }}
# Prefer caches from the same branch. Fall back to caches from the dev branch.
restore-keys: |
ccache:ubuntu:${{ matrix.compiler }}:${{ matrix.cuda }}:${{ matrix.openmp }}:${{ matrix.link }}:${{ github.ref }}
ccache:ubuntu:${{ matrix.compiler }}:${{ matrix.cuda }}:${{ matrix.openmp }}:${{ matrix.link }}:
- name: create empty libraries
# This is to work around a bug in nvlink.
# See: https://forums.developer.nvidia.com/t/nvlink-fatal-could-not-open-input-file-when-linking-with-empty-static-library/208517
if: matrix.cuda == 'with'
run: |
touch empty.c
gcc -fPIC -c empty.c -oempty.o
ar rcsv libdl.a empty.o
ar rcsv librt.a empty.o
ar rcsv libpthread.a empty.o
# overwrite system libraries with "valid" empty libraries
sudo mv ./libdl.a /usr/lib/x86_64-linux-gnu/libdl.a
sudo mv ./librt.a /usr/lib/x86_64-linux-gnu/librt.a
sudo mv ./libpthread.a /usr/lib/x86_64-linux-gnu/libpthread.a
- name: configure ccache
env:
CCACHE_MAX: ${{ matrix.ccache-max }}
run: |
test -d ~/.ccache || mkdir ~/.ccache
echo "max_size = $CCACHE_MAX" >> ~/.ccache/ccache.conf
echo "compression = true" >> ~/.ccache/ccache.conf
ccache -s
echo "/usr/lib/ccache" >> $GITHUB_PATH
- name: build
run: |
IFS=':' read -r -a libs <<< "${BUILD_LIBS}"
for lib in "${libs[@]}"; do
printf " \033[0;32m==>\033[0m Building library \033[0;32m${lib}\033[0m\n"
echo "::group::Configure $lib"
cd ${GITHUB_WORKSPACE}/${lib}/build
cmake -DCMAKE_BUILD_TYPE="Release" \
-DCMAKE_INSTALL_PREFIX="${GITHUB_WORKSPACE}" \
-DCMAKE_C_COMPILER_LAUNCHER="ccache" \
-DCMAKE_CXX_COMPILER_LAUNCHER="ccache" \
-DCMAKE_Fortran_COMPILER_LAUNCHER="ccache" \
-DBLA_VENDOR="OpenBLAS" \
-DSUITESPARSE_DEMOS=OFF \
-DBUILD_TESTING=OFF \
${{ matrix.cuda-cmake-flags }} \
${{ matrix.openmp-cmake-flags }} \
${{ matrix.link-cmake-flags }} \
..
echo "::endgroup::"
echo "::group::Build $lib"
cmake --build . --config Release
echo "::endgroup::"
done
- name: check
timeout-minutes: 20
run: |
IFS=':' read -r -a libs <<< "${CHECK_LIBS}"
for lib in "${libs[@]}"; do
printf "::group:: \033[0;32m==>\033[0m Checking library \033[0;32m${lib}\033[0m\n"
cd ${GITHUB_WORKSPACE}/${lib}
make demos CMAKE_OPTIONS="-DSUITESPARSE_DEMOS=ON -DBUILD_TESTING=ON"
echo "::endgroup::"
done
- name: ccache status
continue-on-error: true
run: ccache -s
- name: save ccache
# Save the cache after we are done (successfully) building
# This helps to retain the ccache even if the subsequent steps are failing.
uses: actions/cache/save@v4
with:
path: ~/.ccache
key: ${{ steps.ccache-prepare.outputs.key }}
- name: install
run: |
IFS=':' read -r -a libs <<< "${BUILD_LIBS}"
for lib in "${libs[@]}"; do
printf "::group::\033[0;32m==>\033[0m Installing library \033[0;32m${lib}\033[0m\n"
cd ${GITHUB_WORKSPACE}/${lib}/build
cmake --install .
echo "::endgroup::"
done
- name: test Config
run: |
IFS=':' read -r -a libs <<< "${INSTALLED_LIBS}"
for lib in "${libs[@]}"; do
printf "::group:: \033[0;32m==>\033[0m Building with Config.cmake with library \033[0;32m${lib}\033[0m\n"
cd ${GITHUB_WORKSPACE}/TestConfig/${lib}
cd build
cmake \
-DCMAKE_PREFIX_PATH="${GITHUB_WORKSPACE}/lib/cmake" \
..
cmake --build . --config Release
echo "::endgroup::"
done
mingw:
# For available GitHub-hosted runners, see:
# https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners
runs-on: windows-latest
defaults:
run:
# Use MSYS2 as default shell
shell: msys2 {0}
strategy:
# Allow other runners in the matrix to continue if some fail
fail-fast: false
# CLANG32 is broken, as of Mar 21, 2024. The stable branch CI also fails
# the same way, even though it succeeded on Mar 2, with the same
# SuiteSparse 7.6.1 and same workflow file. Between that time, clang and
# openmp were upgraded. clang-openmp went from 17.0.6-1 to 18.1.1-1, and
# clang itself went from 17.0.6-7 to 18.1.1-3. Nothing else changed, so
# it must be a problem with the github runner (changed from 2.313.0 to
# 2.314.1). So for now, CLANG32 is excluded from this CI test.
matrix:
# CLANG32 disabled for now:
# msystem: [MINGW64, MINGW32, CLANG64, CLANG32]
msystem: [MINGW64, MINGW32, UCRT64, CLANG64]
include:
- msystem: MINGW64
target-prefix: mingw-w64-x86_64
f77-package: mingw-w64-x86_64-fc
- msystem: MINGW32
target-prefix: mingw-w64-i686
f77-package: mingw-w64-i686-fc
- msystem: UCRT64
target-prefix: mingw-w64-ucrt-x86_64
# Purposefully don't install a Fortran compiler to test that configuration
f77-package: mingw-w64-ucrt-x86_64-cc
- msystem: CLANG64
target-prefix: mingw-w64-clang-x86_64
f77-package: mingw-w64-clang-x86_64-fc
# CLANG32 disabled for now:
# - msystem: CLANG32
# target-prefix: mingw-w64-clang-i686
# # There's no Fortran compiler for this environment.
# f77-package: mingw-w64-clang-i686-cc
env:
CHERE_INVOKING: 1
steps:
- name: get CPU name
shell: pwsh
run : |
Get-CIMInstance -Class Win32_Processor | Select-Object -Property Name
- name: install MSYS2 build environment
uses: msys2/setup-msys2@v2
with:
update: true
# Use pre-installed version to save disc space on partition with source.
release: false
install: >-
base-devel
${{ matrix.target-prefix }}-autotools
${{ matrix.target-prefix }}-cmake
${{ matrix.target-prefix }}-cc
${{ matrix.f77-package }}
${{ matrix.target-prefix }}-ccache
${{ matrix.target-prefix }}-openblas
${{ matrix.target-prefix }}-omp
${{ matrix.target-prefix }}-python
${{ matrix.target-prefix }}-gmp
${{ matrix.target-prefix }}-mpfr
msystem: ${{ matrix.msystem }}
- name: checkout repository
uses: actions/checkout@v4
- name: prepare ccache
# create key with human readable timestamp
# used in action/cache/restore and action/cache/save steps
id: ccache-prepare
run: |
echo "ccachedir=$(cygpath -m $(ccache -k cache_dir))" >> $GITHUB_OUTPUT
echo "key=ccache:mingw:${{ matrix.msystem }}:${{ github.ref }}:$(date +"%Y-%m-%d_%H-%M-%S"):${{ github.sha }}" >> $GITHUB_OUTPUT
- name: restore ccache
# Setup the GitHub cache used to maintain the ccache from one job to the next
uses: actions/cache/restore@v4
with:
path: ${{ steps.ccache-prepare.outputs.ccachedir }}
key: ${{ steps.ccache-prepare.outputs.key }}
# Prefer caches from the same branch. Fall back to caches from the dev branch.
restore-keys: |
ccache:mingw:${{ matrix.msystem }}:${{ github.ref }}
ccache:mingw:${{ matrix.msystem }}
- name: configure ccache
# Limit the maximum size and switch on compression to avoid exceeding the total disk or cache quota.
run: |
which ccache
test -d ${{ steps.ccache_cache_timestamp.outputs.ccachedir }} || mkdir -p ${{ steps.ccache_cache_timestamp.outputs.ccachedir }}
echo "max_size = 250M" > ${{ steps.ccache_cache_timestamp.outputs.ccachedir }}/ccache.conf
echo "compression = true" >> ${{ steps.ccache_cache_timestamp.outputs.ccachedir }}/ccache.conf
ccache -p
ccache -s
- name: build
run: |
IFS=':' read -r -a libs <<< "${BUILD_LIBS}"
for lib in "${libs[@]}"; do
printf " \033[0;32m==>\033[0m Building library \033[0;32m${lib}\033[0m\n"
echo "::group::Configure $lib"
cd ${GITHUB_WORKSPACE}/${lib}/build
cmake -DCMAKE_BUILD_TYPE="Release" \
-DCMAKE_INSTALL_PREFIX="${GITHUB_WORKSPACE}" \
-DCMAKE_C_COMPILER_LAUNCHER="ccache" \
-DCMAKE_CXX_COMPILER_LAUNCHER="ccache" \
-DCMAKE_Fortran_COMPILER_LAUNCHER="ccache" \
-DBLA_VENDOR="OpenBLAS" \
-DPython_EXECUTABLE="$(which python)" \
-DSUITESPARSE_DEMOS=OFF \
-DBUILD_TESTING=OFF \
..
echo "::endgroup::"
echo "::group::Build $lib"
cmake --build . --config Release
echo "::endgroup::"
done
- name: check
timeout-minutes: 20
# Need to install the libraries for the tests
run: |
echo "::group::Install libraries"
make install
echo "::endgroup::"
IFS=':' read -r -a libs <<< "${CHECK_LIBS}"
for lib in "${libs[@]}"; do
printf "::group:: \033[0;32m==>\033[0m Checking library \033[0;32m${lib}\033[0m\n"
cd ${GITHUB_WORKSPACE}/${lib}
PATH="${GITHUB_WORKSPACE}/bin:${PATH}" \
make demos CMAKE_OPTIONS="-DSUITESPARSE_DEMOS=ON -DBUILD_TESTING=ON"
echo "::endgroup::"
done
- name: ccache status
continue-on-error: true
run: ccache -s
- name: save ccache
# Save the cache after we are done (successfully) building
# This helps to retain the ccache even if the subsequent steps are failing.
uses: actions/cache/save@v4
with:
path: ${{ steps.ccache-prepare.outputs.ccachedir }}
key: ${{ steps.ccache-prepare.outputs.key }}
- name: test Config
run: |
IFS=':' read -r -a libs <<< "${INSTALLED_LIBS}"
for lib in "${libs[@]}"; do
printf "::group:: \033[0;32m==>\033[0m Building with Config.cmake with library \033[0;32m${lib}\033[0m\n"
cd ${GITHUB_WORKSPACE}/TestConfig/${lib}
cd build
cmake \
-DCMAKE_PREFIX_PATH="${GITHUB_WORKSPACE}/lib/cmake" \
..
cmake --build . --config Release
echo "::endgroup::"
done