Skip to content

Commit 7f5e703

Browse files
Merge pull request DrTimothyAldenDavis#898 from mmuetzel/ci-ubuntu-clang
CI: Overhaul build matrix for Ubuntu runners and work around issue with Clang
2 parents 18ab5b9 + c9310a7 commit 7f5e703

File tree

5 files changed

+94
-110
lines changed

5 files changed

+94
-110
lines changed

.github/workflows/build.yaml

Lines changed: 49 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -32,62 +32,42 @@ jobs:
3232
fail-fast: false
3333

3434
matrix:
35-
compiler: [gcc, clang]
36-
cuda: [with, without]
35+
compiler: [gcc]
36+
cuda: [with]
3737
openmp: [with]
3838
link: [both]
3939
include:
4040
- compiler: gcc
41-
compiler-pkgs: "g++ gcc"
42-
cc: "gcc"
43-
cxx: "g++"
44-
- compiler: clang
45-
compiler-pkgs: "clang libomp-dev"
46-
cc: "clang"
47-
cxx: "clang++"
48-
# Clang seems to generally require less cache size (smaller object files?).
41+
cuda: with
42+
openmp: with
43+
link: both
4944
- compiler: gcc
50-
ccache-max: 600M
45+
cuda: without
46+
openmp: with
47+
link: both
5148
- compiler: clang
52-
ccache-max: 500M
53-
- cuda: with
54-
cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
55-
cuda-cmake-flags:
56-
-DSUITESPARSE_USE_CUDA=ON
57-
-DSUITESPARSE_USE_STRICT=ON
58-
-DCUDAToolkit_INCLUDE_DIRS="/usr/include"
59-
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
49+
cuda: with
50+
openmp: with
51+
link: both
52+
- compiler: clang
53+
cuda: without
54+
openmp: with
55+
link: both
6056
- compiler: gcc
61-
compiler-pkgs: "g++ gcc"
62-
cc: "gcc"
63-
cxx: "g++"
64-
ccache-max: 600M
6557
cuda: without
6658
openmp: without
67-
openmp-cmake-flags: "-DSUITESPARSE_USE_OPENMP=OFF"
59+
link: both
6860
- compiler: gcc
69-
compiler-pkgs: "g++ gcc"
70-
cc: "gcc"
71-
cxx: "g++"
72-
ccache-max: 600M
7361
cuda: with
74-
cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
75-
cuda-cmake-flags:
76-
-DSUITESPARSE_USE_CUDA=ON
77-
-DSUITESPARSE_USE_STRICT=ON
78-
-DCUDAToolkit_INCLUDE_DIRS="/usr/include"
79-
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
8062
openmp: with
8163
link: static
8264
# "Fake" a cross-compilation to exercise that build system path
83-
link-cmake-flags:
84-
-DBUILD_SHARED_LIBS=OFF
85-
-DBUILD_STATIC_LIBS=ON
65+
extra-cmake-flags:
8666
-DCMAKE_SYSTEM_NAME="Linux"
8767

8868
env:
89-
CC: ${{ matrix.cc }}
90-
CXX: ${{ matrix.cxx }}
69+
CC: ${{ matrix.compiler == 'gcc' && 'gcc' || 'clang' }}
70+
CXX: ${{ matrix.compiler == 'gcc' && 'g++' || 'clang++' }}
9171

9272
steps:
9373
- name: get CPU information
@@ -97,14 +77,15 @@ jobs:
9777
uses: actions/checkout@v4
9878

9979
- name: install dependencies
100-
env:
101-
COMPILER_PKGS: ${{ matrix.compiler-pkgs }}
102-
CUDA_PKGS: ${{ matrix.cuda-pkgs }}
10380
run: |
10481
sudo apt -qq update
105-
sudo apt install -y ${COMPILER_PKGS} autoconf automake ccache cmake \
82+
sudo apt install -y \
83+
${{ matrix.compiler == 'gcc' && 'g++ gcc' || 'clang' }} \
84+
${{ matrix.compiler == 'clang' && matrix.openmp == 'with' && 'libomp-dev' || '' }} \
85+
autoconf automake ccache cmake \
10686
dvipng gfortran libgmp-dev liblapack-dev libmpfr-dev valgrind \
107-
libopenblas-dev ${CUDA_PKGS}
87+
libopenblas-dev \
88+
${{ matrix.cuda == 'with' && 'nvidia-cuda-dev nvidia-cuda-toolkit' || '' }}
10889
10990
- name: prepare ccache
11091
# create key with human readable timestamp
@@ -144,7 +125,8 @@ jobs:
144125
CCACHE_MAX: ${{ matrix.ccache-max }}
145126
run: |
146127
test -d ~/.ccache || mkdir ~/.ccache
147-
echo "max_size = $CCACHE_MAX" >> ~/.ccache/ccache.conf
128+
# Clang seems to generally require less cache size (smaller object files?).
129+
echo "max_size = ${{ matrix.compiler == 'gcc' && '600M' || '500M' }}" >> ~/.ccache/ccache.conf
148130
echo "compression = true" >> ~/.ccache/ccache.conf
149131
ccache -s
150132
echo "/usr/lib/ccache" >> $GITHUB_PATH
@@ -156,21 +138,29 @@ jobs:
156138
printf " \033[0;32m==>\033[0m Building library \033[0;32m${lib}\033[0m\n"
157139
echo "::group::Configure $lib"
158140
cd ${GITHUB_WORKSPACE}/${lib}/build
159-
cmake -DCMAKE_BUILD_TYPE="Release" \
141+
cmake -DCMAKE_BUILD_TYPE=${{ matrix.compiler == 'clang' && 'Debug' || 'Release' }} \
160142
-DCMAKE_INSTALL_PREFIX="${GITHUB_WORKSPACE}" \
161143
-DCMAKE_C_COMPILER_LAUNCHER="ccache" \
162144
-DCMAKE_CXX_COMPILER_LAUNCHER="ccache" \
163145
-DCMAKE_Fortran_COMPILER_LAUNCHER="ccache" \
164146
-DBLA_VENDOR="OpenBLAS" \
165147
-DSUITESPARSE_DEMOS=OFF \
166148
-DBUILD_TESTING=OFF \
167-
${{ matrix.cuda-cmake-flags }} \
168-
${{ matrix.openmp-cmake-flags }} \
169-
${{ matrix.link-cmake-flags }} \
149+
${{ matrix.cuda == 'with'
150+
&& '-DSUITESPARSE_USE_CUDA=ON \
151+
-DSUITESPARSE_USE_STRICT=ON \
152+
-DCUDAToolkit_INCLUDE_DIRS="/usr/include" \
153+
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"'
154+
|| '-DSUITESPARSE_USE_CUDA=OFF' }} \
155+
-DSUITESPARSE_USE_OPENMP=${{ matrix.openmp == 'without' && 'OFF' || 'ON' }} \
156+
${{ matrix.link == 'static'
157+
&& '-DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON'
158+
|| '' }} \
159+
${{ matrix.extra-cmake-flags }} \
170160
..
171161
echo "::endgroup::"
172162
echo "::group::Build $lib"
173-
cmake --build . --config Release
163+
cmake --build .
174164
echo "::endgroup::"
175165
done
176166
@@ -214,8 +204,15 @@ jobs:
214204
cmake \
215205
-DCMAKE_PREFIX_PATH="${GITHUB_WORKSPACE}/lib/cmake" \
216206
-DBLA_VENDOR="OpenBLAS" \
217-
${{ matrix.cuda-cmake-flags }} \
218-
${{ matrix.link-cmake-flags }} \
207+
${{ matrix.cuda == 'with'
208+
&& '-DSUITESPARSE_USE_CUDA=ON \
209+
-DSUITESPARSE_USE_STRICT=ON \
210+
-DCUDAToolkit_INCLUDE_DIRS="/usr/include" \
211+
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"'
212+
|| '' }} \
213+
${{ matrix.link == 'static'
214+
&& '-DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON'
215+
|| '' }} \
219216
..
220217
echo "::endgroup::"
221218
printf "::group::\033[0;32m==>\033[0m Building example\n"

.github/workflows/root-cmakelists.yaml

Lines changed: 40 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -30,50 +30,29 @@ jobs:
3030
fail-fast: false
3131

3232
matrix:
33-
compiler: [gcc, clang]
34-
cuda: [with, without]
33+
compiler: [gcc]
34+
cuda: [with]
3535
link: [both]
3636
include:
3737
- compiler: gcc
38-
compiler-pkgs: "g++ gcc"
39-
cc: "gcc"
40-
cxx: "g++"
41-
- compiler: clang
42-
compiler-pkgs: "clang libomp-dev"
43-
cc: "clang"
44-
cxx: "clang++"
45-
# Clang seems to generally require less cache size (smaller object files?).
38+
cuda: with
39+
link: both
4640
- compiler: gcc
47-
ccache-max: 600M
41+
cuda: without
42+
link: both
4843
- compiler: clang
49-
ccache-max: 500M
50-
- cuda: with
51-
cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
52-
cuda-cmake-flags:
53-
-DSUITESPARSE_USE_CUDA=ON
54-
-DSUITESPARSE_USE_STRICT=ON
55-
-DCUDAToolkit_INCLUDE_DIRS="/usr/include"
56-
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
44+
cuda: with
45+
link: both
46+
- compiler: clang
47+
cuda: without
48+
link: both
5749
- compiler: gcc
58-
compiler-pkgs: "g++ gcc"
59-
cc: "gcc"
60-
cxx: "g++"
61-
ccache-max: 600M
6250
cuda: with
63-
cuda-pkgs: "nvidia-cuda-dev nvidia-cuda-toolkit"
64-
cuda-cmake-flags:
65-
-DSUITESPARSE_USE_CUDA=ON
66-
-DSUITESPARSE_USE_STRICT=ON
67-
-DCUDAToolkit_INCLUDE_DIRS="/usr/include"
68-
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"
6951
link: static
70-
link-cmake-flags:
71-
-DBUILD_SHARED_LIBS=OFF
72-
-DBUILD_STATIC_LIBS=ON
7352

7453
env:
75-
CC: ${{ matrix.cc }}
76-
CXX: ${{ matrix.cxx }}
54+
CC: ${{ matrix.compiler == 'gcc' && 'gcc' || 'clang' }}
55+
CXX: ${{ matrix.compiler == 'gcc' && 'g++' || 'clang++' }}
7756

7857
steps:
7958
- name: get CPU information
@@ -83,14 +62,15 @@ jobs:
8362
uses: actions/checkout@v4
8463

8564
- name: install dependencies
86-
env:
87-
COMPILER_PKGS: ${{ matrix.compiler-pkgs }}
88-
CUDA_PKGS: ${{ matrix.cuda-pkgs }}
8965
run: |
9066
sudo apt -qq update
91-
sudo apt install -y ${COMPILER_PKGS} autoconf automake ccache cmake \
67+
sudo apt install -y \
68+
${{ matrix.compiler == 'gcc' && 'g++ gcc' || 'clang' }} \
69+
${{ matrix.compiler == 'clang' && 'libomp-dev' || '' }} \
70+
autoconf automake ccache cmake \
9271
dvipng gfortran libgmp-dev liblapack-dev libmpfr-dev \
93-
libopenblas-dev ${CUDA_PKGS}
72+
libopenblas-dev \
73+
${{ matrix.cuda == 'with' && 'nvidia-cuda-dev nvidia-cuda-toolkit' || '' }}
9474
9575
- name: prepare ccache
9676
# create key with human readable timestamp
@@ -126,11 +106,10 @@ jobs:
126106
sudo mv ./libpthread.a /usr/lib/x86_64-linux-gnu/libpthread.a
127107
128108
- name: configure ccache
129-
env:
130-
CCACHE_MAX: ${{ matrix.ccache-max }}
131109
run: |
132110
test -d ~/.ccache || mkdir ~/.ccache
133-
echo "max_size = $CCACHE_MAX" >> ~/.ccache/ccache.conf
111+
# Clang seems to generally require less cache size (smaller object files?).
112+
echo "max_size = ${{ matrix.compiler == 'gcc' && '600M' || '500M' }}" >> ~/.ccache/ccache.conf
134113
echo "compression = true" >> ~/.ccache/ccache.conf
135114
ccache -s
136115
echo "/usr/lib/ccache" >> $GITHUB_PATH
@@ -154,16 +133,23 @@ jobs:
154133
- name: configure
155134
run: |
156135
mkdir -p ${GITHUB_WORKSPACE}/build && cd ${GITHUB_WORKSPACE}/build
157-
cmake -DCMAKE_BUILD_TYPE="Release" \
136+
cmake -DCMAKE_BUILD_TYPE=${{ matrix.compiler == 'clang' && 'Debug' || 'Release' }} \
158137
-DCMAKE_INSTALL_PREFIX=".." \
159138
-DCMAKE_C_COMPILER_LAUNCHER="ccache" \
160139
-DCMAKE_CXX_COMPILER_LAUNCHER="ccache" \
161140
-DCMAKE_Fortran_COMPILER_LAUNCHER="ccache" \
162141
-DBLA_VENDOR="OpenBLAS" \
163142
-DSUITESPARSE_DEMOS=OFF \
164143
-DBUILD_TESTING=OFF \
165-
${{ matrix.cuda-cmake-flags }} \
166-
${{ matrix.link-cmake-flags }} \
144+
${{ matrix.cuda == 'with'
145+
&& '-DSUITESPARSE_USE_CUDA=ON \
146+
-DSUITESPARSE_USE_STRICT=ON \
147+
-DCUDAToolkit_INCLUDE_DIRS="/usr/include" \
148+
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"'
149+
|| '' }} \
150+
${{ matrix.link == 'static'
151+
&& '-DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON'
152+
|| '' }} \
167153
..
168154
169155
- name: build libraries
@@ -213,8 +199,15 @@ jobs:
213199
cmake \
214200
-DCMAKE_PREFIX_PATH="${GITHUB_WORKSPACE}/lib/cmake" \
215201
-DBLA_VENDOR="OpenBLAS" \
216-
${{ matrix.cuda-cmake-flags }} \
217-
${{ matrix.link-cmake-flags }} \
202+
${{ matrix.cuda == 'with'
203+
&& '-DSUITESPARSE_USE_CUDA=ON \
204+
-DSUITESPARSE_USE_STRICT=ON \
205+
-DCUDAToolkit_INCLUDE_DIRS="/usr/include" \
206+
-DCMAKE_CUDA_COMPILER_LAUNCHER="ccache"'
207+
|| '' }} \
208+
${{ matrix.link == 'static'
209+
&& '-DBUILD_SHARED_LIBS=OFF -DBUILD_STATIC_LIBS=ON'
210+
|| '' }} \
218211
..
219212
echo "::endgroup::"
220213
printf "::group::\033[0;32m==>\033[0m Building example\n"

CHOLMOD/CMakeLists.txt

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -573,15 +573,9 @@ endif ( )
573573

574574
# CHOLMOD_CUDA
575575
if ( CHOLMOD_HAS_CUDA )
576-
# cmake now configures cholmod.h with "#define CHOLMOD_HAS_CUDA" if CHOLMOD is
577-
# being compiled with CUDA, so the -DCHOLMOD_HAS_CUDA flag is no longer needed.
578-
# if ( BUILD_SHARED_LIBS )
579-
# target_compile_definitions ( CHOLMOD PUBLIC "CHOLMOD_HAS_CUDA" )
580-
# endif ( )
581-
# set ( CHOLMOD_CFLAGS "${CHOLMOD_CFLAGS} -DCHOLMOD_HAS_CUDA" )
582-
# if ( BUILD_STATIC_LIBS )
583-
# target_compile_definitions ( CHOLMOD_static PUBLIC "CHOLMOD_HAS_CUDA" )
584-
# endif ( )
576+
if ( NOT CHOLMOD_HAS_OPENMP )
577+
message ( FATAL_ERROR "CHOLMOD_CUDA requires OpenMP. But it has been disabled, or no working OpenMP could be found." )
578+
endif ()
585579
if ( BUILD_SHARED_LIBS )
586580
target_link_libraries ( CHOLMOD PRIVATE CUDA::nvrtc CUDA::cudart_static CUDA::cublas )
587581
target_include_directories ( CHOLMOD INTERFACE

GraphBLAS/Demo/Program/grow_demo.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919
#include "import_test.c"
2020
#include "read_matrix.c"
2121

22-
#include "omp.h"
2322
#if defined ( _OPENMP )
23+
#include <omp.h>
2424
#define WALLCLOCK omp_get_wtime ( )
2525
#else
2626
#define WALLCLOCK 0

GraphBLAS/Demo/Program/wathen_demo.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
#include "simple_rand.c"
1717
#include "wathen.c"
1818
#ifdef _OPENMP
19-
#include "omp.h"
19+
#include <omp.h>
2020
#endif
2121

2222
// macro used by OK(...) to free workspace if an error occurs

0 commit comments

Comments
 (0)