@@ -8,6 +8,20 @@ using Libdl
88
99const __version = Ref{VersionNumber}()
1010
11+ """
12+ version()
13+
14+ Returns the version of the CUDA toolkit in use.
15+ """
16+ version() = @after_init(__version[])
17+
18+ """
19+ release()
20+
21+ Returns the CUDA release part of the version as returned by [`version`](@ref).
22+ """
23+ release() = @after_init(VersionNumber(__version[]. major, __version[]. minor))
24+
1125const __libcublas = Ref{String}()
1226const __libcusparse = Ref{String}()
1327const __libcusolver = Ref{String}()
@@ -16,6 +30,18 @@ const __libcurand = Ref{String}()
1630const __libcudnn = Ref{Union{Nothing,String}}(nothing )
1731const __libcutensor = Ref{Union{Nothing,String}}(nothing )
1832
33+ libcublas() = @after_init(__libcublas[])
34+ libcusparse() = @after_init(__libcusparse[])
35+ libcusolver() = @after_init(__libcusolver[])
36+ libcufft() = @after_init(__libcufft[])
37+ libcurand() = @after_init(__libcurand[])
38+ libcudnn() = @after_init(__libcudnn[])
39+ libcutensor() = @after_init(__libcutensor[])
40+
41+ export has_cudnn, has_cutensor
42+ has_cudnn() = libcudnn() != = nothing
43+ has_cutensor() = libcutensor() != = nothing
44+
1945
2046# # discovery
2147
@@ -186,109 +212,20 @@ function use_local_cutensor(cuda_dirs)
186212 return true
187213end
188214
189-
190- # # initialization
191-
192- const __initialized__ = Ref{Union{Nothing,Bool}}(nothing )
193-
194- """
195- functional(show_reason=false)
196-
197- Check if the package has been initialized successfully and is ready to use.
198-
199- This call is intended for packages that support conditionally using an available GPU. If you
200- fail to check whether CUDA is functional, actual use of functionality might warn and error.
201- """
202- function functional(show_reason:: Bool = false )
203- if __initialized__[] === nothing
204- __runtime_init__(show_reason)
205- end
206- __initialized__[]
207- end
208-
209- function __runtime_init__(show_reason:: Bool )
210- __initialized__[] = false
211-
212- # if any dependent GPU package failed, expect it to have logged an error and bail out
213- if ! CUDAdrv. functional(show_reason) || ! CUDAnative. functional(show_reason)
214- show_reason && @warn " CuArrays.jl did not initialize because CUDAdrv.jl or CUDAnative.jl failed to"
215- return
216- end
217-
218-
219- # CUDA toolkit
215+ function __configure_dependencies__(show_reason:: Bool )
216+ found = false
220217
221218 if parse(Bool, get(ENV , " JULIA_CUDA_USE_BINARYBUILDER" , " true" ))
222- __initialized__[] = use_artifact_cuda()
219+ found = use_artifact_cuda()
223220 end
224221
225- if ! __initialized__[]
226- __initialized__[] = use_local_cuda()
222+ if ! found
223+ found = use_local_cuda()
227224 end
228225
229- if ! __initialized__[]
226+ if ! found
230227 show_reason && @error " Could not find a suitable CUDA installation"
231- return
232228 end
233229
234- # library compatibility
235- cuda = version()
236- if has_cutensor()
237- cutensor = CUTENSOR. version()
238- if cutensor < v" 1"
239- @warn(" CuArrays.jl only supports CUTENSOR 1.0 or higher" )
240- end
241-
242- cutensor_cuda = CUTENSOR. cuda_version()
243- if cutensor_cuda. major != cuda. major || cutensor_cuda. minor != cuda. minor
244- @warn(" You are using CUTENSOR $cutensor for CUDA $cutensor_cuda with CUDA toolkit $cuda ; these might be incompatible." )
245- end
246- end
247- if has_cudnn()
248- cudnn = CUDNN. version()
249- if cudnn < v" 7.6"
250- @warn(" CuArrays.jl only supports CUDNN v7.6 or higher" )
251- end
252-
253- cudnn_cuda = CUDNN. cuda_version()
254- if cudnn_cuda. major != cuda. major || cudnn_cuda. minor != cuda. minor
255- @warn(" You are using CUDNN $cudnn for CUDA $cudnn_cuda with CUDA toolkit $cuda ; these might be incompatible." )
256- end
257- end
230+ return found
258231end
259-
260-
261- # # getters
262-
263- macro initialized(ex)
264- quote
265- @assert functional(true ) " CuArrays.jl is not functional"
266- $ (esc(ex))
267- end
268- end
269-
270- """
271- version()
272-
273- Returns the version of the CUDA toolkit in use.
274- """
275- version() = @initialized(__version[])
276-
277- """
278- release()
279-
280- Returns the CUDA release part of the version as returned by [`version`](@ref).
281- """
282- release() = @initialized(VersionNumber(__version[]. major, __version[]. minor))
283-
284- libcublas() = @initialized(__libcublas[])
285- libcusparse() = @initialized(__libcusparse[])
286- libcusolver() = @initialized(__libcusolver[])
287- libcufft() = @initialized(__libcufft[])
288- libcurand() = @initialized(__libcurand[])
289- libcudnn() = @initialized(__libcudnn[])
290- libcutensor() = @initialized(__libcutensor[])
291-
292- export has_cudnn, has_cutensor
293- has_cudnn() = libcudnn() != = nothing
294- has_cutensor() = libcutensor() != = nothing
0 commit comments