Skip to content
This repository was archived by the owner on May 27, 2021. It is now read-only.

Commit cbbbeb4

Browse files
committed
Adapt to CUDAapi and CUDAdrv changes.
1 parent ff0cd45 commit cbbbeb4

File tree

6 files changed

+58
-40
lines changed

6 files changed

+58
-40
lines changed

Manifest.toml

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,17 +16,15 @@ version = "0.2.0"
1616

1717
[[CUDAapi]]
1818
deps = ["Libdl", "Logging"]
19-
git-tree-sha1 = "56a813440ac98a1aa64672ab460a1512552211a7"
19+
git-tree-sha1 = "d9614968b9a13df433870115acff20f41e7b400a"
2020
uuid = "3895d2a7-ec45-59b8-82bb-cfc6a382f9b3"
21-
version = "2.1.0"
21+
version = "3.0.0"
2222

2323
[[CUDAdrv]]
2424
deps = ["CEnum", "CUDAapi", "Printf"]
25-
git-tree-sha1 = "fcb6f610289397bdadac2c0aca0acc5be60bc832"
26-
repo-rev = "181808ff35f9deb7360edf556d4131dc55ebeee5"
27-
repo-url = "https://github.com/JuliaGPU/CUDAdrv.jl.git"
25+
git-tree-sha1 = "01e90fa34e25776bc7c8661183d4519149ebfe59"
2826
uuid = "c5f51814-7f29-56b8-a69c-e4d8f6be1fde"
29-
version = "5.1.0"
27+
version = "6.0.0"
3028

3129
[[DataStructures]]
3230
deps = ["InteractiveUtils", "OrderedCollections"]

Project.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
1717
[compat]
1818
Adapt = "0.4, 1.0"
1919
CEnum = "0.2"
20-
CUDAapi = "2.0"
21-
CUDAdrv = "5.0"
20+
CUDAapi = "3.0"
21+
CUDAdrv = "6.0"
2222
DataStructures = "0.15, 0.16, 0.17"
2323
LLVM = "1.2"
2424
TimerOutputs = "0.5"

src/CUDAnative.jl

Lines changed: 33 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,15 @@ using Libdl
1515

1616
## global state
1717

18+
const toolkit_dirs = Ref{Vector{String}}()
19+
20+
"""
21+
prefix()
22+
23+
Returns the installation prefix directories of the CUDA toolkit in use.
24+
"""
25+
prefix() = toolkit_dirs[]
26+
1827
const toolkit_version = Ref{VersionNumber}()
1928

2029
"""
@@ -39,7 +48,6 @@ const ptx_support = Ref{Vector{VersionNumber}}()
3948
const libdevice = Ref{Union{String,Dict{VersionNumber,String}}}()
4049
const libcudadevrt = Ref{String}()
4150
const nvdisasm = Ref{String}()
42-
const ptxas = Ref{String}()
4351

4452

4553
## source code includes
@@ -109,58 +117,54 @@ function __init__()
109117

110118
# CUDA
111119

112-
toolkit_dirs = find_toolkit()
113-
toolkit_version[] = find_toolkit_version(toolkit_dirs)
120+
toolkit_dirs[] = find_toolkit()
121+
122+
let val = find_cuda_binary("nvdisasm", toolkit_dirs[])
123+
val === nothing && error("Your CUDA installation does not provide the nvdisasm binary")
124+
nvdisasm[] = val
125+
end
126+
127+
toolkit_version[] = parse_toolkit_version(nvdisasm[])
114128
if release() < v"9"
115129
silent || @warn "CUDAnative.jl only supports CUDA 9.0 or higher (your toolkit provides CUDA $(release()))"
116130
elseif release() > CUDAdrv.release()
117131
silent || @warn """You are using CUDA toolkit $(release()) with a driver that only supports up to $(CUDAdrv.release()).
118132
It is recommended to upgrade your driver."""
119133
end
120134

121-
llvm_support = llvm_compat(llvm_version)
122-
cuda_support = cuda_compat()
123-
124-
target_support[] = sort(collect(llvm_support.cap cuda_support.cap))
125-
isempty(target_support[]) && error("Your toolchain does not support any device capability")
126-
127-
ptx_support[] = sort(collect(llvm_support.ptx cuda_support.ptx))
128-
isempty(ptx_support[]) && error("Your toolchain does not support any PTX ISA")
129-
130-
@debug("CUDAnative supports devices $(verlist(target_support[])); PTX $(verlist(ptx_support[]))")
131-
132-
let val = find_libdevice(target_support[], toolkit_dirs)
135+
let val = find_libdevice(toolkit_dirs[])
133136
val === nothing && error("Your CUDA installation does not provide libdevice")
134137
libdevice[] = val
135138
end
136139

137-
let val = find_libcudadevrt(toolkit_dirs)
140+
let val = find_libcudadevrt(toolkit_dirs[])
138141
val === nothing && error("Your CUDA installation does not provide libcudadevrt")
139142
libcudadevrt[] = val
140143
end
141144

142-
let val = find_cuda_binary("nvdisasm", toolkit_dirs)
143-
val === nothing && error("Your CUDA installation does not provide the nvdisasm binary")
144-
nvdisasm[] = val
145-
end
146-
147-
let val = find_cuda_binary("ptxas", toolkit_dirs)
148-
val === nothing && error("Your CUDA installation does not provide the ptxas binary")
149-
ptxas[] = val
150-
end
151-
152-
let val = find_cuda_library("nvtx", toolkit_dirs)
145+
let val = find_cuda_library("nvtx", toolkit_dirs[], [v"1"])
153146
val === nothing && error("Your CUDA installation does not provide the NVTX library")
154147
NVTX.libnvtx[] = val
155148
end
156149

157-
toolkit_extras_dirs = filter(dir->isdir(joinpath(dir, "extras")), toolkit_dirs)
150+
toolkit_extras_dirs = filter(dir->isdir(joinpath(dir, "extras")), toolkit_dirs[])
158151
cupti_dirs = map(dir->joinpath(dir, "extras", "CUPTI"), toolkit_extras_dirs)
159-
let val = find_cuda_library("cupti", cupti_dirs)
152+
let val = find_cuda_library("cupti", cupti_dirs, [toolkit_version[]])
160153
val === nothing && error("Your CUDA installation does not provide the CUPTI library")
161154
CUPTI.libcupti[] = val
162155
end
163156

157+
llvm_support = llvm_compat(llvm_version)
158+
cuda_support = cuda_compat()
159+
160+
target_support[] = sort(collect(llvm_support.cap cuda_support.cap))
161+
isempty(target_support[]) && error("Your toolchain does not support any device capability")
162+
163+
ptx_support[] = sort(collect(llvm_support.ptx cuda_support.ptx))
164+
isempty(ptx_support[]) && error("Your toolchain does not support any PTX ISA")
165+
166+
@debug("CUDAnative supports devices $(verlist(target_support[])); PTX $(verlist(ptx_support[]))")
167+
164168

165169
## actual initialization
166170

src/compatibility.jl

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,23 @@ const highest = v"999"
66
verlist(vers) = join(map(ver->"$(ver.major).$(ver.minor)", sort(collect(vers))), ", ", " and ")
77

88

9+
## version range
10+
11+
struct VersionRange
12+
lower::VersionNumber
13+
upper::VersionNumber
14+
end
15+
16+
Base.in(v::VersionNumber, r::VersionRange) = (v >= r.lower && v <= r.upper)
17+
18+
import Base.(:)
19+
(:)(a::VersionNumber, b::VersionNumber) = VersionRange(a, b)
20+
21+
Base.intersect(v::VersionNumber, r::VersionRange) =
22+
v < r.lower ? (r.lower:v) :
23+
v > r.upper ? (v:r.upper) : (v:v)
24+
25+
926
## devices supported by the CUDA toolkit
1027

1128
# Source:

src/exceptions.jl

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,7 @@ function create_exceptions!(mod::CuModule)
2727
flag_ptr[] = reinterpret(Ptr{Cvoid}, convert(CuPtr{Cvoid}, exception_flag))
2828
catch err
2929
# modules that do not throw exceptions will not contain the indicator flag
30-
if err !== CUDAdrv.ERROR_NOT_FOUND
31-
rethrow()
32-
end
30+
err.code == CUDAdrv.ERROR_NOT_FOUND || rethrow()
3331
end
3432

3533
return

test/runtests.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ include("util.jl")
99

1010
@test CUDAnative.functional()
1111

12+
CUDAnative.prefix()
1213
CUDAnative.version()
1314

1415
CUDAnative.enable_timings()

0 commit comments

Comments
 (0)