Skip to content

Commit d00834c

Browse files
Add warm-up call to auto_throughput.py
Add throughput.py example, which is based on the same kernel as auto_throughput.py but records global memory reads/writes amounts to output BWUtil metric measuring %SOL in bandwidth utilization.
1 parent 275f8ee commit d00834c

File tree

2 files changed

+99
-5
lines changed

2 files changed

+99
-5
lines changed

python/examples/auto_throughput.py

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,18 @@
1515
# limitations under the License.
1616

1717
import sys
18+
from collections.abc import Callable
1819

1920
import cuda.nvbench as nvbench
2021
import numpy as np
2122
from numba import cuda
2223

2324

24-
def make_kernel(items_per_thread: int):
25+
def as_cuda_Stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream:
26+
return cuda.external_stream(cs.addressof())
27+
28+
29+
def make_kernel(items_per_thread: int) -> Callable:
2530
@cuda.jit
2631
def kernel(stride: np.uintp, elements: np.uintp, in_arr, out_arr):
2732
tid = cuda.grid(1)
@@ -35,27 +40,33 @@ def kernel(stride: np.uintp, elements: np.uintp, in_arr, out_arr):
3540
return kernel
3641

3742

38-
def throughput_bench(state: nvbench.State):
43+
def throughput_bench(state: nvbench.State) -> None:
3944
stride = state.getInt64("Stride")
4045
ipt = state.getInt64("ItemsPerThread")
4146

4247
nbytes = 128 * 1024 * 1024
4348
elements = nbytes // np.dtype(np.int32).itemsize
4449

45-
alloc_stream = cuda.external_stream(state.getStream().addressof())
50+
alloc_stream = as_cuda_Stream(state.getStream())
4651
inp_arr = cuda.device_array(elements, dtype=np.int32, stream=alloc_stream)
4752
out_arr = cuda.device_array(elements * ipt, dtype=np.int32, stream=alloc_stream)
4853

49-
state.addElementCount(elements, "Elements")
54+
state.addElementCount(elements, column_name="Elements")
5055
state.collectCUPTIMetrics()
5156

5257
threads_per_block = 256
5358
blocks_in_grid = (elements + threads_per_block - 1) // threads_per_block
5459

5560
krn = make_kernel(ipt)
5661

62+
# warm-up call ensures that kernel is loaded into context
63+
# before blocking kernel is launched
64+
krn[blocks_in_grid, threads_per_block, alloc_stream, 0](
65+
stride, elements, inp_arr, out_arr
66+
)
67+
5768
def launcher(launch: nvbench.Launch):
58-
exec_stream = cuda.external_stream(launch.getStream().addressof())
69+
exec_stream = as_cuda_Stream(launch.getStream())
5970
krn[blocks_in_grid, threads_per_block, exec_stream, 0](
6071
stride, elements, inp_arr, out_arr
6172
)

python/examples/throughput.py

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
# Copyright 2025 NVIDIA Corporation
2+
#
3+
# Licensed under the Apache License, Version 2.0 with the LLVM exception
4+
# (the "License"); you may not use this file except in compliance with
5+
# the License.
6+
#
7+
# You may obtain a copy of the License at
8+
#
9+
# http://llvm.org/foundation/relicensing/LICENSE.txt
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
17+
import sys
18+
from collections.abc import Callable
19+
20+
import cuda.nvbench as nvbench
21+
import numpy as np
22+
from numba import cuda
23+
24+
25+
def as_cuda_Stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream:
26+
return cuda.external_stream(cs.addressof())
27+
28+
29+
def make_kernel(items_per_thread: int) -> Callable:
30+
@cuda.jit
31+
def kernel(stride: np.uintp, elements: np.uintp, in_arr, out_arr):
32+
tid = cuda.grid(1)
33+
step = cuda.gridDim.x * cuda.blockDim.x
34+
for i in range(stride * tid, stride * elements, stride * step):
35+
for j in range(items_per_thread):
36+
read_id = (items_per_thread * i + j) % elements
37+
write_id = tid + j * elements
38+
out_arr[write_id] = in_arr[read_id]
39+
40+
return kernel
41+
42+
43+
def throughput_bench(state: nvbench.State) -> None:
44+
stride = state.getInt64("Stride")
45+
ipt = state.getInt64("ItemsPerThread")
46+
47+
nbytes = 128 * 1024 * 1024
48+
elements = nbytes // np.dtype(np.int32).itemsize
49+
50+
alloc_stream = as_cuda_Stream(state.getStream())
51+
inp_arr = cuda.device_array(elements, dtype=np.int32, stream=alloc_stream)
52+
out_arr = cuda.device_array(elements * ipt, dtype=np.int32, stream=alloc_stream)
53+
54+
state.addElementCount(elements, column_name="Elements")
55+
state.addGlobalMemoryReads(inp_arr.nbytes, column_name="Datasize")
56+
state.addGlobalMemoryWrites(inp_arr.nbytes)
57+
58+
threads_per_block = 256
59+
blocks_in_grid = (elements + threads_per_block - 1) // threads_per_block
60+
61+
krn = make_kernel(ipt)
62+
63+
# warm-up call ensures that kernel is loaded into context
64+
# before blocking kernel is launched
65+
krn[blocks_in_grid, threads_per_block, alloc_stream, 0](
66+
stride, elements, inp_arr, out_arr
67+
)
68+
69+
def launcher(launch: nvbench.Launch):
70+
exec_stream = as_cuda_Stream(launch.getStream())
71+
krn[blocks_in_grid, threads_per_block, exec_stream, 0](
72+
stride, elements, inp_arr, out_arr
73+
)
74+
75+
state.exec(launcher)
76+
77+
78+
if __name__ == "__main__":
79+
b = nvbench.register(throughput_bench)
80+
b.addInt64Axis("Stride", [1, 2, 4])
81+
b.addInt64Axis("ItemsPerThread", [1, 2, 3, 4])
82+
83+
nvbench.run_all_benchmarks(sys.argv)

0 commit comments

Comments
 (0)