-
Notifications
You must be signed in to change notification settings - Fork 278
Expand file tree
/
Copy pathlaunch.cu
More file actions
145 lines (125 loc) · 5.05 KB
/
launch.cu
File metadata and controls
145 lines (125 loc) · 5.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#include <iostream>
#include <random>
#include <cuda_bf16.h>
#include <cuda_runtime.h>
#include <chrono>
#include "../common.cuh"
int run_benchmark(size_t M, size_t N, size_t K) {
cudaError_t cudaStatus;
std::cout << "-------------------- M=" << M << " N=" << N << " K=" << K << " --------------------\n";
// Allocate host memory
float *h_A = new float[M * K];
float *h_B = new float[K * N];
float *h_C = new float[M * N];
std::cout << "Allocated host memory" << std::endl;
// Initialize random number generator
std::random_device rd;
std::mt19937 gen(42);
std::uniform_real_distribution<> dis(-0.5, 0.5);
// Initialize matrices with random values
for (int i = 0; i < M * K; ++i) h_A[i] = dis(gen);
for (int i = 0; i < K * N; ++i) h_B[i] = dis(gen);
std::cout << "Initialized matrices" << std::endl;
// Allocate device memory
__nv_bfloat16 *d_A, *d_B, *d_C, *d_C_ref;
cudaMalloc(&d_A, M*K*sizeof(__nv_bfloat16));
cudaMalloc(&d_B, K*N*sizeof(__nv_bfloat16));
cudaMalloc(&d_C, M*N*sizeof(__nv_bfloat16));
cudaMalloc(&d_C_ref, M*N*sizeof(__nv_bfloat16));
// Check for CUDA errors
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
std::cerr << "CUDA error: " << cudaGetErrorString(cudaStatus) << std::endl;
// Optionally, you might want to exit the program or handle the error in some way
return -1;
}
std::cout << "Allocated device memory" << std::endl;
// Convert to __nv_bfloat16 and copy to device
__nv_bfloat16 *h_A_bf16 = new __nv_bfloat16[M * K];
__nv_bfloat16 *h_B_bf16 = new __nv_bfloat16[K * N];
for (int i = 0; i < M * K; ++i) h_A_bf16[i] = __float2bfloat16(h_A[i]);
for (int i = 0; i < K * N; ++i) h_B_bf16[i] = __float2bfloat16(h_B[i]);
cudaMemcpy(d_A, h_A_bf16, M*K*2, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B_bf16, K*N*2, cudaMemcpyHostToDevice);
std::cout << "Copied matrices to device" << std::endl;
// Compute reference GEMM on GPU
reference_gemm<__nv_bfloat16, __nv_bfloat16, false>(d_C_ref, d_A, d_B, M, N, K);
cudaDeviceSynchronize();
std::cout << "Computed reference GEMM on device" << std::endl;
printf("\n");
// Launch kernel
for(int i = 0; i < 2; i++) { // warmup
matmul(d_A, d_B, d_C, M);
}
// Start timing
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
constexpr int ITERS = 1;
for(int i = 0; i < ITERS; i++) {
matmul(d_A, d_B, d_C, M);
}
cudaDeviceSynchronize();
// End timing
auto end = std::chrono::high_resolution_clock::now();
// Calculate duration
std::chrono::duration<double> diff = end - start;
double useconds = diff.count() * 1e6 / ITERS;
// Calculate TFLOPs
double flops = double(2.0) * M * N * K; // 2 FLOPs per multiply-add
double tflops = (flops / useconds) / 1e6;
std::cout << "Avg Kernel execution time: " << useconds << " us\n";
std::cout << "Achieved performance: " << tflops << " TFLOPs\n";
// Check for CUDA errors
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
std::cerr << "CUDA error: " << cudaGetErrorString(cudaStatus) << std::endl;
// Optionally, you might want to exit the program or handle the error in some way
return -1;
}
// Copy result back to host
__nv_bfloat16 *h_C_bf16 = new __nv_bfloat16[M * N];
__nv_bfloat16 *h_C_ref_bf16 = new __nv_bfloat16[M * N];
cudaMemcpy(h_C_bf16, d_C, M*N*2, cudaMemcpyDeviceToHost);
cudaMemcpy(h_C_ref_bf16, d_C_ref, M*N*2, cudaMemcpyDeviceToHost);
std::cout << "Copied result back to host" << std::endl;
// Convert result back to float for comparison
float *h_C_ref = new float[M * N];
for (int i = 0; i < M * N; ++i) h_C[i] = __bfloat162float(h_C_bf16[i]);
for (int i = 0; i < M * N; ++i) h_C_ref[i] = __bfloat162float(h_C_ref_bf16[i]);
std::cout << "Converted result back to float" << std::endl;
// Check result
float max_error = 0.0f;
int error_count = 0;
for (int i = 0; i < M * N; ++i) {
float error = std::abs(h_C[i] - h_C_ref[i]);
if( error > 0.2 ) { // large because of bf16 vs fp32 numerics
if(error_count < 20) std::cout << "Error at row " << i / N << " col " << i % N << ": " << h_C[i] << " != " << h_C_ref[i] << " (ref)" << std::endl;
else if(error_count == 21) std::cout << "Too many errors to show them all.\n";
error_count++;
}
max_error = std::max(max_error, error);
}
std::cout << "Max error: " << max_error << std::endl;
std::cout << "Error count: " << error_count << std::endl;
std::cout << "Total count: " << int(N * N) << std::endl;
// Clean up
delete[] h_A;
delete[] h_B;
delete[] h_C;
delete[] h_C_ref;
delete[] h_A_bf16;
delete[] h_B_bf16;
delete[] h_C_bf16;
delete[] h_C_ref_bf16;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_C_ref);
return 0;
}
int main() {
int N;
N = 4096;
run_benchmark(N, N, N);
return 0;
}