Skip to content

Commit 453a164

Browse files
Improvements to readability of examples per PR review
1 parent c91204f commit 453a164

File tree

6 files changed

+30
-30
lines changed

6 files changed

+30
-30
lines changed

python/examples/auto_throughput.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,11 @@
2121
from numba import cuda
2222

2323

24-
def as_cuda_Stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream:
24+
def as_cuda_stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream:
2525
return cuda.external_stream(cs.addressof())
2626

2727

28-
def make_kernel(items_per_thread: int) -> cuda.dispatcher.CUDADispatcher:
28+
def make_throughput_kernel(items_per_thread: int) -> cuda.dispatcher.CUDADispatcher:
2929
@cuda.jit
3030
def kernel(stride: np.uintp, elements: np.uintp, in_arr, out_arr):
3131
tid = cuda.grid(1)
@@ -46,7 +46,7 @@ def throughput_bench(state: nvbench.State) -> None:
4646
nbytes = 128 * 1024 * 1024
4747
elements = nbytes // np.dtype(np.int32).itemsize
4848

49-
alloc_stream = as_cuda_Stream(state.get_stream())
49+
alloc_stream = as_cuda_stream(state.get_stream())
5050
inp_arr = cuda.device_array(elements, dtype=np.int32, stream=alloc_stream)
5151
out_arr = cuda.device_array(elements * ipt, dtype=np.int32, stream=alloc_stream)
5252

@@ -56,7 +56,7 @@ def throughput_bench(state: nvbench.State) -> None:
5656
threads_per_block = 256
5757
blocks_in_grid = (elements + threads_per_block - 1) // threads_per_block
5858

59-
krn = make_kernel(ipt)
59+
krn = make_throughput_kernel(ipt)
6060

6161
# warm-up call ensures that kernel is loaded into context
6262
# before blocking kernel is launched. Kernel loading may cause
@@ -66,7 +66,7 @@ def throughput_bench(state: nvbench.State) -> None:
6666
)
6767

6868
def launcher(launch: nvbench.Launch):
69-
exec_stream = as_cuda_Stream(launch.get_stream())
69+
exec_stream = as_cuda_stream(launch.get_stream())
7070
krn[blocks_in_grid, threads_per_block, exec_stream, 0](
7171
stride, elements, inp_arr, out_arr
7272
)

python/examples/axes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ def launcher(launch: nvbench.Launch):
193193
nvbench.register(default_value)
194194
# specify axis
195195
nvbench.register(single_float64_axis).add_float64_axis(
196-
"Duration", [7e-5, 1e-4, 5e-4]
196+
"Duration (s)", [7e-5, 1e-4, 5e-4]
197197
)
198198

199199
copy1_bench = nvbench.register(copy_sweep_grid_shape)

python/examples/cccl_parallel_segmented_reduce.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,6 @@ def segmented_reduce(state: nvbench.State):
6161
dev_id = state.get_device()
6262
cp_stream = as_cp_ExternalStream(state.get_stream(), dev_id)
6363

64-
with cp_stream:
65-
rng = cp.random.default_rng()
66-
mat = rng.integers(low=-31, high=32, dtype=np.int32, size=(n_rows, n_cols))
67-
6864
def add_op(a, b):
6965
return a + b
7066

@@ -84,6 +80,8 @@ def scale(row_id):
8480

8581
h_init = np.zeros(tuple(), dtype=np.int32)
8682
with cp_stream:
83+
rng = cp.random.default_rng()
84+
mat = rng.integers(low=-31, high=32, dtype=np.int32, size=(n_rows, n_cols))
8785
d_input = mat
8886
d_output = cp.empty(n_rows, dtype=d_input.dtype)
8987

python/examples/cupy_extract.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,14 +36,16 @@ def cupy_extract_by_mask(state: nvbench.State):
3636

3737
state.collect_cupti_metrics()
3838
state.add_element_count(n_rows * n_cols, "# Elements")
39+
int32_dt = cp.dtype(cp.int32)
40+
bool_dt = cp.dtype(cp.bool_)
3941
state.add_global_memory_reads(
40-
n_rows * n_cols * (cp.dtype(cp.int32).itemsize + cp.dtype("?").itemsize)
42+
n_rows * n_cols * (int32_dt.itemsize + bool_dt.itemsize)
4143
)
42-
state.add_global_memory_writes(n_rows * n_cols * (cp.dtype(cp.int32).itemsize))
44+
state.add_global_memory_writes(n_rows * n_cols * (int32_dt.itemsize))
4345

4446
with cp_s:
45-
X = cp.full((n_cols, n_rows), fill_value=3, dtype=cp.int32)
46-
mask = cp.ones((n_cols, n_rows), dtype="?")
47+
X = cp.full((n_cols, n_rows), fill_value=3, dtype=int32_dt)
48+
mask = cp.ones((n_cols, n_rows), dtype=bool_dt)
4749
_ = X[mask]
4850

4951
def launcher(launch: nvbench.Launch):

python/examples/throughput.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,11 @@
2121
from numba import cuda
2222

2323

24-
def as_cuda_Stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream:
24+
def as_cuda_stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream:
2525
return cuda.external_stream(cs.addressof())
2626

2727

28-
def make_kernel(items_per_thread: int) -> cuda.dispatcher.CUDADispatcher:
28+
def make_throughput_kernel(items_per_thread: int) -> cuda.dispatcher.CUDADispatcher:
2929
@cuda.jit
3030
def kernel(stride: np.uintp, elements: np.uintp, in_arr, out_arr):
3131
tid = cuda.grid(1)
@@ -46,7 +46,7 @@ def throughput_bench(state: nvbench.State) -> None:
4646
nbytes = 128 * 1024 * 1024
4747
elements = nbytes // np.dtype(np.int32).itemsize
4848

49-
alloc_stream = as_cuda_Stream(state.get_stream())
49+
alloc_stream = as_cuda_stream(state.get_stream())
5050
inp_arr = cuda.device_array(elements, dtype=np.int32, stream=alloc_stream)
5151
out_arr = cuda.device_array(elements * ipt, dtype=np.int32, stream=alloc_stream)
5252

@@ -57,7 +57,7 @@ def throughput_bench(state: nvbench.State) -> None:
5757
threads_per_block = 256
5858
blocks_in_grid = (elements + threads_per_block - 1) // threads_per_block
5959

60-
krn = make_kernel(ipt)
60+
krn = make_throughput_kernel(ipt)
6161

6262
# warm-up call ensures that kernel is loaded into context
6363
# before blocking kernel is launched. Kernel loading may
@@ -67,7 +67,7 @@ def throughput_bench(state: nvbench.State) -> None:
6767
)
6868

6969
def launcher(launch: nvbench.Launch):
70-
exec_stream = as_cuda_Stream(launch.get_stream())
70+
exec_stream = as_cuda_stream(launch.get_stream())
7171
krn[blocks_in_grid, threads_per_block, exec_stream, 0](
7272
stride, elements, inp_arr, out_arr
7373
)

python/test/run_1.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,15 @@ def kernel(a, b, c):
1414
c[tid] = a[tid] + b[tid]
1515

1616

17-
def get_numba_stream(launch):
17+
def get_numba_stream(launch: nvbench.Launch):
1818
return cuda.external_stream(launch.get_stream().addressof())
1919

2020

21-
def add_two(state):
22-
# state.skip("Skipping this benchmark for no reason")
21+
def skipit(state: nvbench.State) -> None:
22+
state.skip("Skipping this benchmark for no reason")
23+
24+
25+
def add_two(state: nvbench.State):
2326
N = state.get_int64("elements")
2427
a = cuda.to_device(np.random.random(N))
2528
c = cuda.device_array_like(a)
@@ -44,7 +47,7 @@ def kernel_launcher(launch):
4447
state.exec(kernel_launcher, batched=True, sync=True)
4548

4649

47-
def add_float(state):
50+
def add_float(state: nvbench.State):
4851
N = state.get_int64("elements")
4952
v = state.get_float64("v")
5053
name = state.get_string("name")
@@ -75,7 +78,7 @@ def kernel_launcher(launch):
7578
state.exec(kernel_launcher, batched=True, sync=True)
7679

7780

78-
def add_three(state):
81+
def add_three(state: nvbench.State):
7982
N = state.get_int64("elements")
8083
a = cuda.to_device(np.random.random(N).astype(np.float32))
8184
b = cuda.to_device(np.random.random(N).astype(np.float32))
@@ -105,13 +108,10 @@ def register_benchmarks():
105108
nvbench.register(add_float)
106109
.add_float64_axis("v", [0.1, 0.3])
107110
.add_string_axis("name", ["Anne", "Lynda"])
108-
.add_int64_axis("elements", [2**pow2 for pow2 in range(20, 23)])
109-
)
110-
(
111-
nvbench.register(add_three).add_int64_axis(
112-
"elements", [2**pow2 for pow2 in range(20, 22)]
113-
)
111+
.add_int64_power_of_two_axis("elements", range(20, 23))
114112
)
113+
(nvbench.register(add_three).add_int64_power_of_two_axis("elements", range(20, 22)))
114+
nvbench.register(skipit)
115115

116116

117117
if __name__ == "__main__":

0 commit comments

Comments
 (0)