Skip to content

Commit f277445

Browse files
committed
test(LuxLib): migrate to paralleltestrunners
1 parent 13321ef commit f277445

31 files changed

+973
-1129
lines changed

.buildkite/testing_luxlib.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ steps:
77
version: "{{matrix.julia}}"
88
- JuliaCI/julia-test#v1:
99
project: "lib/LuxLib"
10-
test_args: "BACKEND_GROUP=CUDA LUXLIB_TEST_GROUP={{matrix.group}}"
10+
test_args: "--BACKEND_GROUP=CUDA {{matrix.group}}"
1111
- JuliaCI/julia-coverage#v1:
1212
codecov: true
1313
dirs:
@@ -28,9 +28,9 @@ steps:
2828
julia:
2929
- "1.12"
3030
group:
31-
- "common"
31+
- "common_ops"
3232
- "normalization"
33-
- "misc"
33+
- "others"
3434

3535
# - group: ":julia: (LuxLib) AMD GPU"
3636
# steps:
@@ -40,7 +40,7 @@ steps:
4040
# version: "{{matrix.julia}}"
4141
# - JuliaCI/julia-test#v1:
4242
# project: "lib/LuxLib"
43-
# test_args: "BACKEND_GROUP=AMDGPU"
43+
# test_args: "--BACKEND_GROUP=AMDGPU"
4444
# - JuliaCI/julia-coverage#v1:
4545
# codecov: true
4646
# dirs:

.github/workflows/CI_LuxLib.yml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,39 +27,39 @@ jobs:
2727
fail-fast: false
2828
matrix:
2929
test_group:
30-
- "common"
30+
- "common_ops"
3131
- "normalization"
32-
- "misc"
32+
- "others"
3333
blas_backend:
3434
- "default"
3535
loopvec:
3636
- "true"
3737
include:
38-
- test_group: "common"
38+
- test_group: "common_ops"
3939
blas_backend: "default"
4040
loopvec: "false"
41-
- test_group: "misc"
41+
- test_group: "others"
4242
blas_backend: "default"
4343
loopvec: "false"
4444
uses: ./.github/workflows/CommonCI.yml
4545
with:
4646
julia_version: "1.12"
4747
project: "lib/LuxLib"
48-
test_args: "BACKEND_GROUP=cpu LUXLIB_TEST_GROUP=${{ matrix.test_group }} LUXLIB_BLAS_BACKEND=${{ matrix.blas_backend }} LUXLIB_LOAD_LOOPVEC=${{ matrix.loopvec }}"
48+
test_args: "--BACKEND_GROUP=cpu --BLAS_BACKEND=${{ matrix.blas_backend }} --LOOP_VECTORIZATION=${{ matrix.loopvec }} ${{ matrix.test_group }}"
4949

5050
downgrade:
5151
strategy:
5252
fail-fast: false
5353
matrix:
5454
test_group:
55-
- "common"
55+
- "common_ops"
5656
- "normalization"
57-
- "misc"
57+
- "others"
5858
uses: ./.github/workflows/CommonCI.yml
5959
with:
6060
julia_version: "1.11"
6161
project: "lib/LuxLib"
6262
downgrade_testing: true
6363
local_dependencies: "lib/LuxCore,lib/MLDataDevices"
6464
local_test_dependencies: "lib/LuxTestUtils,lib/MLDataDevices"
65-
test_args: "BACKEND_GROUP=cpu LUXLIB_TEST_GROUP=${{ matrix.test_group }} LUXLIB_BLAS_BACKEND=default LUXLIB_LOAD_LOOPVEC=true"
65+
test_args: "--BACKEND_GROUP=cpu --BLAS_BACKEND=default --LOOP_VECTORIZATION=true ${{ matrix.test_group }}"

lib/LuxLib/Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ EnzymeCore = "0.8.16"
7979
FastClosures = "0.3.2"
8080
ForwardDiff = "0.10.36, 1"
8181
Functors = "0.5"
82-
KernelAbstractions = "0.9.30"
82+
KernelAbstractions = "0.9.32"
8383
LinearAlgebra = "1.10"
8484
LoopVectorization = "0.12.171"
8585
LuxCore = "1.5"

lib/LuxLib/test/Project.toml

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,31 @@
11
[deps]
2+
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
23
AppleAccelerate = "13e28ba4-7ad8-5781-acae-3021b1ed3924"
34
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
45
BLISBLAS = "6f275bd8-fec0-4d39-945b-7e95a765fa1e"
56
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
6-
CPUSummary = "2a0fbf3d-bb9c-48f3-b0a9-814d99fd7ab9"
7+
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
78
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
89
ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
910
Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9"
1011
EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869"
1112
ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7"
1213
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
13-
InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
1414
JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb"
1515
LoopVectorization = "bdcacae8-1622-11e9-2a5c-532679323890"
16+
LuxCUDA = "d0bbae9a-e099-4d5b-a835-1c6931763bda"
1617
LuxLib = "82251201-b29d-42c6-8e01-566dec8acb11"
1718
LuxTestUtils = "ac9de150-d08f-4546-94fb-7472b5760531"
1819
MKL = "33e6dc65-8f57-5167-99aa-e5a354878fb2"
1920
MLDataDevices = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40"
21+
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
2022
Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6"
2123
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
2224
Octavian = "6fd5a793-0b7e-452c-907f-f8bfe9c57db4"
25+
ParallelTestRunner = "d3525ed8-44d0-4b2c-a655-542cee43accc"
2326
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
2427
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
25-
ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823"
2628
Reactant = "3c362404-f566-11ee-1572-e11a4b42c853"
27-
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
2829
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
2930
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
3031
Static = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
@@ -33,38 +34,37 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
3334
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
3435
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
3536
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
37+
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"
38+
oneAPI = "8f75cd03-7ff8-4ecb-9b8f-daf728133b1b"
3639

3740
[sources]
38-
LuxLib = { path = ".." }
39-
LuxTestUtils = { path = "../../LuxTestUtils" }
40-
MLDataDevices = { path = "../../MLDataDevices" }
41+
LuxLib = {path = ".."}
42+
LuxTestUtils = {path = "../../LuxTestUtils"}
43+
MLDataDevices = {path = "../../MLDataDevices"}
4144

4245
[compat]
4346
AppleAccelerate = "0.4, 0.5"
4447
Aqua = "0.8.7"
4548
BLISBLAS = "0.1, 0.2"
4649
BenchmarkTools = "1.5"
47-
CPUSummary = "0.2.6"
4850
ChainRulesCore = "1.25.1"
4951
ComponentArrays = "0.15.22"
5052
Enzyme = "0.13.120"
5153
EnzymeCore = "0.8.16"
5254
ExplicitImports = "1.9.0"
5355
ForwardDiff = "0.10.36, =1"
54-
InteractiveUtils = "<0.0.1, 1"
5556
JLArrays = "0.1.5, 0.2, 0.3"
5657
LoopVectorization = "0.12.171"
5758
LuxTestUtils = "2"
5859
MKL = "0.7, 0.8, 0.9"
5960
MLDataDevices = "1.17"
60-
Mooncake = "0.4"
61+
Mooncake = "0.4, 0.5"
6162
NNlib = "0.9.27"
6263
Octavian = "0.3.28"
64+
ParallelTestRunner = "2.1"
6365
Pkg = "1.10"
6466
Random = "1.10"
65-
ReTestItems = "1.24.0"
6667
Reactant = "0.2.179"
67-
Reexport = "1"
6868
ReverseDiff = "1.15"
6969
StableRNGs = "1.0.2"
7070
Static = "0.8.4, 1"

lib/LuxLib/test/common_ops/activation_tests.jl

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
1-
@testitem "Activation Functions" tags = [:misc] setup = [SharedTestSetup] begin
2-
using Enzyme
1+
using Enzyme, LuxLib, Test, NNlib
32

4-
rng = StableRNG(1234)
3+
include("../shared_testsetup.jl")
4+
5+
apply_act(f::F, x) where {F} = sum(abs2, f.(x))
6+
apply_act_fast(f::F, x) where {F} = sum(abs2, fast_activation!!(f, copy(x)))
7+
apply_act_fast2(f::F, x) where {F} = sum(abs2, fast_activation(f, x))
58

6-
apply_act(f::F, x) where {F} = sum(abs2, f.(x))
7-
apply_act_fast(f::F, x) where {F} = sum(abs2, fast_activation!!(f, copy(x)))
8-
apply_act_fast2(f::F, x) where {F} = sum(abs2, fast_activation(f, x))
9+
@testset "Activation Functions" begin
10+
rng = StableRNG(1234)
911

1012
@testset "$mode" for (mode, aType, ongpu, fp64) in MODES
1113
@testset "$f: $T" for f in [

lib/LuxLib/test/common_ops/attention_tests.jl

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
1-
@testitem "Scaled Dot Product Attention" tags = [:misc] setup = [SharedTestSetup] begin
2-
using LuxLib, Reactant, NNlib, Random, MLDataDevices, Enzyme, Statistics
1+
include("../shared_testsetup.jl")
32

3+
using LuxLib, Reactant, NNlib, Random, MLDataDevices, Enzyme, Statistics, Test
4+
5+
@testset "Scaled Dot Product Attention" begin
46
@testset "$mode" for (mode, aType, ongpu, fp64) in MODES
57
@testset "Different Batch Sizes" begin
68
n, lenq, lenkv = 15, 3, 4

lib/LuxLib/test/common_ops/bias_act_tests.jl

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,20 @@
1-
@testitem "Bias Activation" tags = [:misc] setup = [SharedTestSetup] begin
2-
rng = StableRNG(1234)
1+
include("../shared_testsetup.jl")
32

4-
bias_act_loss1(act, x, b) = sum(abs2, act.(x .+ LuxLib.Impl.reshape_bias(x, b)))
5-
bias_act_loss2(act, x, b) = sum(abs2, bias_activation(act, x, b))
6-
bias_act_loss3(act, x, b) = sum(abs2, bias_activation!!(act, copy(x), b))
3+
using LuxLib, Test, StableRNGs, NNlib, LuxTestUtils
4+
using ReverseDiff, Tracker
75

8-
struct __Fix1{F,A}
9-
f::F
10-
act::A
11-
end
12-
(f::__Fix1)(x, b) = f.f(f.act, x, b)
6+
bias_act_loss1(act, x, b) = sum(abs2, act.(x .+ LuxLib.Impl.reshape_bias(x, b)))
7+
bias_act_loss2(act, x, b) = sum(abs2, bias_activation(act, x, b))
8+
bias_act_loss3(act, x, b) = sum(abs2, bias_activation!!(act, copy(x), b))
9+
10+
struct __Fix1{F,A}
11+
f::F
12+
act::A
13+
end
14+
(f::__Fix1)(x, b) = f.f(f.act, x, b)
15+
16+
@testset "Bias Activation" begin
17+
rng = StableRNG(1234)
1318

1419
@testset "$mode" for (mode, aType, ongpu, fp64) in MODES
1520
@testset "$act, $T, $sz" for act in [
@@ -93,9 +98,7 @@
9398
end
9499
end
95100

96-
@testitem "Bias Activation (ReverseDiff)" tags = [:misc] setup = [SharedTestSetup] begin
97-
using ReverseDiff, Tracker
98-
101+
@testset "Bias Activation (ReverseDiff)" begin
99102
x = rand(Float32, 3, 4)
100103
b = rand(Float32, 3)
101104
act = tanh
@@ -113,7 +116,7 @@ end
113116
@test z isa Tracker.TrackedArray
114117
end
115118

116-
@testitem "Bias Activation: Zero-sized Arrays" tags = [:misc] setup = [SharedTestSetup] begin
119+
@testset "Bias Activation: Zero-sized Arrays" begin
117120
@testset "$mode" for (mode, aType, ongpu) in MODES
118121
x = aType(rand(Float32, 4, 3, 2, 0))
119122
b = aType(rand(Float32, 2))

lib/LuxLib/test/common_ops/conv_tests.jl

Lines changed: 4 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
@testsetup module ConvSetup
1+
include("../shared_testsetup.jl")
2+
23
using LuxLib, LuxTestUtils, Random, Test, NNlib
34

45
expand(_, i::Tuple) = i
@@ -76,11 +77,7 @@ end
7677

7778
anonact = x -> gelu(x)
7879

79-
# const ELTYPES = [(Float32, Float32), (Float32, Float64), (Float64, Float64)]
8080
const ELTYPES = [(Float32, Float32), (Float64, Float64)]
81-
# const ACTIVATIONS = [
82-
# identity, tanh, tanh_fast, sigmoid, sigmoid_fast, relu, gelu, swish, anonact
83-
# ]
8481
const ACTIVATIONS = [identity, sigmoid, gelu]
8582

8683
const ALL_TEST_CONFIGS = Iterators.product(
@@ -95,43 +92,11 @@ const ALL_TEST_CONFIGS = Iterators.product(
9592
),
9693
)
9794

98-
const TEST_BLOCKS = collect(
99-
Iterators.partition(ALL_TEST_CONFIGS, ceil(Int, length(ALL_TEST_CONFIGS) / 2))
100-
)
101-
102-
export expand, convfilter, calc_padding, anonact, TEST_BLOCKS, run_conv_testing
103-
104-
end
105-
106-
@testitem "Fused Conv: Group 1" tags = [:common] setup = [SharedTestSetup, ConvSetup] begin
107-
@testset "$mode" for (mode, aType, ongpu, fp64) in MODES
108-
@testset "$(Tw) x $(Tx) hasbias: $(hasbias) activation: $(activation) kernel: $(kernel) padding: $(padding) stride: $(stride) groups: $(groups)" for (
109-
(Tx, Tw), hasbias, activation, (kernel, padding, stride, groups)
110-
) in TEST_BLOCKS[1]
111-
!fp64 && (Tx == Float64 || Tw == Float64) && continue
112-
run_conv_testing(
113-
generate_fixed_array,
114-
activation,
115-
kernel,
116-
stride,
117-
padding,
118-
hasbias,
119-
groups,
120-
Tw,
121-
Tx,
122-
aType,
123-
mode,
124-
ongpu,
125-
)
126-
end
127-
end
128-
end
129-
130-
@testitem "Fused Conv: Group 2" tags = [:common] setup = [SharedTestSetup, ConvSetup] begin
95+
@testset "Fused Conv" begin
13196
@testset "$mode" for (mode, aType, ongpu, fp64) in MODES
13297
@testset "$(Tw) x $(Tx) hasbias: $(hasbias) activation: $(activation) kernel: $(kernel) padding: $(padding) stride: $(stride) groups: $(groups)" for (
13398
(Tx, Tw), hasbias, activation, (kernel, padding, stride, groups)
134-
) in TEST_BLOCKS[2]
99+
) in ALL_TEST_CONFIGS
135100
!fp64 && (Tx == Float64 || Tw == Float64) && continue
136101
run_conv_testing(
137102
generate_fixed_array,

0 commit comments

Comments
 (0)