|
| 1 | +# Copyright 2025 NVIDIA Corporation |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 with the LLVM exception |
| 4 | +# (the "License"); you may not use this file except in compliance with |
| 5 | +# the License. |
| 6 | +# |
| 7 | +# You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://llvm.org/foundation/relicensing/LICENSE.txt |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, software |
| 12 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | +# See the License for the specific language governing permissions and |
| 15 | +# limitations under the License. |
| 16 | + |
| 17 | +import sys |
| 18 | +from collections.abc import Callable |
| 19 | + |
| 20 | +import cuda.nvbench as nvbench |
| 21 | +import numpy as np |
| 22 | +from numba import cuda |
| 23 | + |
| 24 | + |
| 25 | +def as_cuda_Stream(cs: nvbench.CudaStream) -> cuda.cudadrv.driver.Stream: |
| 26 | + return cuda.external_stream(cs.addressof()) |
| 27 | + |
| 28 | + |
| 29 | +def make_kernel(items_per_thread: int) -> Callable: |
| 30 | + @cuda.jit |
| 31 | + def kernel(stride: np.uintp, elements: np.uintp, in_arr, out_arr): |
| 32 | + tid = cuda.grid(1) |
| 33 | + step = cuda.gridDim.x * cuda.blockDim.x |
| 34 | + for i in range(stride * tid, stride * elements, stride * step): |
| 35 | + for j in range(items_per_thread): |
| 36 | + read_id = (items_per_thread * i + j) % elements |
| 37 | + write_id = tid + j * elements |
| 38 | + out_arr[write_id] = in_arr[read_id] |
| 39 | + |
| 40 | + return kernel |
| 41 | + |
| 42 | + |
| 43 | +def throughput_bench(state: nvbench.State) -> None: |
| 44 | + stride = state.getInt64("Stride") |
| 45 | + ipt = state.getInt64("ItemsPerThread") |
| 46 | + |
| 47 | + nbytes = 128 * 1024 * 1024 |
| 48 | + elements = nbytes // np.dtype(np.int32).itemsize |
| 49 | + |
| 50 | + alloc_stream = as_cuda_Stream(state.getStream()) |
| 51 | + inp_arr = cuda.device_array(elements, dtype=np.int32, stream=alloc_stream) |
| 52 | + out_arr = cuda.device_array(elements * ipt, dtype=np.int32, stream=alloc_stream) |
| 53 | + |
| 54 | + state.addElementCount(elements, column_name="Elements") |
| 55 | + state.addGlobalMemoryReads(inp_arr.nbytes, column_name="Datasize") |
| 56 | + state.addGlobalMemoryWrites(inp_arr.nbytes) |
| 57 | + |
| 58 | + threads_per_block = 256 |
| 59 | + blocks_in_grid = (elements + threads_per_block - 1) // threads_per_block |
| 60 | + |
| 61 | + krn = make_kernel(ipt) |
| 62 | + |
| 63 | + # warm-up call ensures that kernel is loaded into context |
| 64 | + # before blocking kernel is launched |
| 65 | + krn[blocks_in_grid, threads_per_block, alloc_stream, 0]( |
| 66 | + stride, elements, inp_arr, out_arr |
| 67 | + ) |
| 68 | + |
| 69 | + def launcher(launch: nvbench.Launch): |
| 70 | + exec_stream = as_cuda_Stream(launch.getStream()) |
| 71 | + krn[blocks_in_grid, threads_per_block, exec_stream, 0]( |
| 72 | + stride, elements, inp_arr, out_arr |
| 73 | + ) |
| 74 | + |
| 75 | + state.exec(launcher) |
| 76 | + |
| 77 | + |
| 78 | +if __name__ == "__main__": |
| 79 | + b = nvbench.register(throughput_bench) |
| 80 | + b.addInt64Axis("Stride", [1, 2, 4]) |
| 81 | + b.addInt64Axis("ItemsPerThread", [1, 2, 3, 4]) |
| 82 | + |
| 83 | + nvbench.run_all_benchmarks(sys.argv) |
0 commit comments