|
| 1 | +import sys |
| 2 | + |
| 3 | +import cuda.nvbench as nvbench |
| 4 | +import torch |
| 5 | + |
| 6 | + |
| 7 | +def as_torch_cuda_Stream( |
| 8 | + cs: nvbench.CudaStream, dev: int | None |
| 9 | +) -> torch.cuda.ExternalStream: |
| 10 | + return torch.cuda.ExternalStream( |
| 11 | + stream_ptr=cs.addressof(), device=torch.cuda.device(dev) |
| 12 | + ) |
| 13 | + |
| 14 | + |
| 15 | +def torch_bench(state: nvbench.State) -> None: |
| 16 | + state.set_throttle_threshold(0.25) |
| 17 | + |
| 18 | + dev_id = state.get_device() |
| 19 | + tc_s = as_torch_cuda_Stream(state.get_stream(), dev_id) |
| 20 | + |
| 21 | + dt = torch.float32 |
| 22 | + scalar_shape: tuple = tuple() |
| 23 | + n = 2**28 |
| 24 | + with torch.cuda.stream(tc_s): |
| 25 | + a3 = torch.randn(scalar_shape, dtype=dt) |
| 26 | + a2 = torch.randn(scalar_shape, dtype=dt) |
| 27 | + a1 = torch.randn(scalar_shape, dtype=dt) |
| 28 | + a0 = torch.randn(scalar_shape, dtype=dt) |
| 29 | + x = torch.linspace(-3, 3, n, dtype=dt) |
| 30 | + y = torch.sin(x) |
| 31 | + |
| 32 | + learning_rate = 1e-4 |
| 33 | + |
| 34 | + def launcher(launch: nvbench.Launch) -> None: |
| 35 | + tc_s = as_torch_cuda_Stream(launch.get_stream(), dev_id) |
| 36 | + with torch.cuda.stream(tc_s): |
| 37 | + x2 = torch.square(x) |
| 38 | + y_pred = (a3 + x2 * a1) + x * (a2 + a0 * x2) |
| 39 | + |
| 40 | + _ = torch.square(y_pred - y).sum() |
| 41 | + grad_y_pred = 2 * (y_pred - y) |
| 42 | + grad_a3 = grad_y_pred.sum() |
| 43 | + grad_a2 = (grad_y_pred * x).sum() |
| 44 | + grad_a1 = (grad_y_pred * x2).sum() |
| 45 | + grad_a0 = (grad_y_pred * x2 * x).sum() |
| 46 | + |
| 47 | + _ = a3 - grad_a3 * learning_rate |
| 48 | + _ = a2 - grad_a2 * learning_rate |
| 49 | + _ = a1 - grad_a1 * learning_rate |
| 50 | + _ = a0 - grad_a0 * learning_rate |
| 51 | + |
| 52 | + state.exec(launcher, sync=True) |
| 53 | + |
| 54 | + |
| 55 | +if __name__ == "__main__": |
| 56 | + nvbench.register(torch_bench) |
| 57 | + |
| 58 | + nvbench.run_all_benchmarks(sys.argv) |
0 commit comments