-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathbaseline.cu
More file actions
296 lines (243 loc) · 10.3 KB
/
baseline.cu
File metadata and controls
296 lines (243 loc) · 10.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
#include "main.h"
//Mutiple-GPU Plan Structure
typedef struct
{
float *h_A, *h_B,*h_AR,*h_BR;
float *A_normmap,*B_normmap;
float *h_C;
cudaStream_t stream;
} TGPUplan;
#define MATRIXOFFSETCPY(dst,src,size_row,size_col,off_row,off_col,total_col) \
for(int i=0;i<size_row;i++){ \
for(int j=0;j<size_col;j++){ \
dst[i][j]=GETELEMENT21(src,off_row+i,off_col+j,total_col); \
} \
}
#define SHAREFLAG 1
//输入:矩阵A,规模m,n(不能写宏定义因为要复用),范数锁norm
//每个kernel算[LoNum * LoNum]大小的矩阵范数,每个线程取得一个元素然后reduce
__global__ void get_Fnorm(float *A,float *A_normmap,int m,int n){
int id = blockIdx.x * blockDim.x + threadIdx.x;
int kId = blockIdx.x;//kernel
int thId = threadIdx.x;
__shared__ float sdata[LoNum*LoNum];
int valid;
float t;
int myBlockRow = kId / (CBLMUN);
int myBlockCol = kId % (CBLMUN);
int myThreadRow = thId / LoNum;
int myThreadCol = thId % LoNum;
//每个线程取一个【待优化,可以取多个】
valid = id > m*n? 0:1;
if(valid){
t = GETELEMENT21(A,myBlockRow*LoNum+myThreadRow,myBlockCol*LoNum+myThreadCol,n);
sdata[thId] = t*t;
}
__syncthreads();
//naive版reduce,算完完整的范数
#pragma unroll
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (thId < s)
{
sdata[thId] += sdata[thId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
if(thId==0) A_normmap[kId] = sqrt(sdata[0]);
}
//每个线程算8个元素,同行相邻的! 每个kernel算一个[LoNum][LoNum]的范数
//每个kernel LoNum*LoNum/8个线程
//!虽然有bank conflict,但是是最快的
__global__ void unroll_get_Fnorm(float* A,float *A_normmap,int m,int n,int blockRowOff, float *R){
int id = blockIdx.x * blockDim.x + threadIdx.x;
int kId = blockIdx.x;//kernel
int thId = threadIdx.x;
__shared__ float sdata[LoNum*LoNum];
int valid;
const int myBlockRow = kId / (CBLMUN)+blockRowOff;
const int myBlockCol = kId % (CBLMUN);
const int myBlockId = myBlockRow*(T/LoNum)+myBlockCol;
const int myThreadRow = thId*8 / LoNum;
const int myThreadCol = thId*8 % LoNum;
const int myFinalRow = myBlockRow*LoNum+myThreadRow;
const int myFinalCol = myBlockCol*LoNum+myThreadCol;
const int reduceNum = blockDim.x;
int tadd = myFinalRow*n+myFinalCol;
//每个线程取8个
valid = id*8 > m*n? 0:1;
if(valid){
for(int i=0;i<8;i++){
R[tadd+i] = A[tadd+i];
}
}
__syncthreads();
for (unsigned int s = reduceNum / 2; s > 0; s >>= 1) {
if (thId < s) {
R[thId+tadd] += R[thId + tadd];
}
__syncthreads();
}
if(thId==0) A_normmap[myBlockId] = sqrt(R[tadd]);
}
//每个kernel计算C[LoNum,LoNum]
//静态无分配版本,每个线程一个元素进行计算
//B normmap !列索引 待优化! ; 分配乘法任务待优化
__global__ void get_C_Threads1Element_Mul(float* A,float* A_normmap, float* B, float* B_normmap,float* C,const int main_row_offset){
int id = blockIdx.x * blockDim.x + threadIdx.x;
int kId = blockIdx.x;//kernel
int thId = threadIdx.x;
__shared__ int sC_bitmap[CBLMUN];//share mem需要初始化!!
__shared__ float sA[LoNum*LoNum],sB[LoNum*LoNum]; //sC可以换成局部变量,但有local的风险
float norm_mul,myCresult=0.0f;
const int myBlockRow = kId / (CBLMUN) + main_row_offset;
const int myBlockCol = kId % (CBLMUN); //负责计算块坐标C[Brow,Bcol]处的块
const int myBlockRowOff = myBlockRow*LoNum;
const int myBlockColOff = myBlockCol*LoNum;
const int myThreadRow = thId/LoNum;
const int myThreadCol = thId%LoNum;
const int myFinalRow = myBlockRowOff+myThreadRow;
const int myFinalCol = myBlockColOff+myThreadCol;
//需要A_norm第R行,B_norm第C列
for(int i=thId;i<CBLMUN;i+=blockDim.x){
if(thId<(CBLMUN)){
norm_mul = GETELEMENT21(A_normmap,myBlockRow,i,CBLMUN) * GETELEMENT21(B_normmap,i,myBlockCol,CBLMUN);
sC_bitmap[i] = norm_mul>Norm? 1:0; //!范数计算有E的浮动误差,应该是6位有效数字
}
}
__syncthreads();//不能和下面合并!因为有的线程的b可能没算完就结束了,但是非常费时间
GETELEMENT21(C,myFinalRow,myFinalCol,N)=0;
//遍历bitmap,每个线程负责一个位置的元素
for(int b=0;b<CBLMUN;b++){
if(sC_bitmap[b]==1){//慢
// for(int i=0;i<LoNum;i++){ //极慢,三倍
// myCresult += *(mysA+i) * *(mysB+i*LoNum);
// }
for(int i=0;i<LoNum;i++){ //极慢,三倍
GETELEMENT21(C,myFinalRow,myFinalCol,N) += GETELEMENT21(A,myBlockRowOff+myThreadRow,b*LoNum+i,K) * GETELEMENT21(B,b*LoNum+i,myBlockColOff+myThreadCol,N);
}
}
}
}
int main(int argc, char **argv){
int device_row_offset=T/LoNum/DEVICEDIM;
//测试part是否太大
if(T/LoNum/DEVICEDIM/PART<=0){
printf("PART error! too many parts!\n");
return;
}
TGPUplan plan[DEVICEDIM];
for(int i=0;i<DEVICEDIM;i++){
cudaSetDevice(i);
cudaStreamCreate(&plan[i].stream);
}
//统一内存h_A,h_B;
float *h_A = (float *)malloc(sizeof(float)*T*T);
float *h_B = (float *)malloc(sizeof(float)*T*T);
//给A,B赋值
if(MATRIXNOR) getNormMatrix(h_A,h_B);
if(MATRIXEXP){
getDecayMatrixExp(h_A,1,0.9,M,K);
getDecayMatrixExp(h_B,1,0.9,K,N);
}
if(MATRIXALG){
getDecayMatrixAlg(h_A,0.1,0.1,K,N);
getDecayMatrixAlg(h_B,0.1,0.1,K,N);
}
// printf("---A---\n");MATRIXSHOW21D(h_A,M,K);
for(int i=0;i<DEVICEDIM;i++){
//给私有的bitmap和C分配空间,C用UM
cudaSetDevice(i);
cudaMallocManaged((void **)&plan[i].h_A, sizeof(float)*T*T);
cudaMallocManaged((void **)&plan[i].h_B, sizeof(float)*T*T);
cudaMallocManaged((void **)&plan[i].h_AR, sizeof(float)*T*T);
cudaMallocManaged((void **)&plan[i].h_BR, sizeof(float)*T*T);
cudaMallocManaged((void **)&plan[i].h_C, sizeof(float)*T*T);
cudaMallocManaged((void **)&plan[i].A_normmap, sizeof(float)*(T/LoNum)*(T/LoNum));
cudaMallocManaged((void **)&plan[i].B_normmap, sizeof(float)*(T/LoNum)*(T/LoNum));
//UM指导
cudaMemPrefetchAsync(plan[i].h_A, sizeof(float)*T*T, i);
cudaMemPrefetchAsync(plan[i].h_B, sizeof(float)*T*T, i);
cudaMemPrefetchAsync(plan[i].h_C, sizeof(float)*T*T, i);
cudaMemAdvise(plan[i].h_A, sizeof(float)*T*T, cudaMemAdviseSetReadMostly, i);
cudaMemAdvise(plan[i].h_B, sizeof(float)*T*T, cudaMemAdviseSetReadMostly, i);
//流
cudaStreamCreate(&plan[i].stream);
//拷贝数据
cudaMemcpy(plan[i].h_A,h_A,sizeof(float)*T*T,cudaMemcpyHostToDevice);
cudaMemcpy(plan[i].h_B,h_B,sizeof(float)*T*T,cudaMemcpyHostToDevice);
}
printf("INIT DONE--------------\n");
//printf("para: T=%d Norm=%f DEVICE=%d PARTS=%d ALG=%d EXP=%d\n",T,Norm,DEVICEDIM,PART,MATRIXALG,MATRIXEXP);
//计时部分
cudaEvent_t start, stop;
float elapsed = 0.0;
double sum=0.0;
for(int i=0;i<TESTTIME;i++){
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#pragma unroll 2
for(int device=0;device<DEVICEDIM;device++){
cudaSetDevice(device);
const int partBlockOffset=T/LoNum/PART; //所有行分P次算
//计算全部B范数
int A_blocks = M*K/(LoNum*LoNum),B_blocks = (K*N)/(LoNum*LoNum),F_threads = LoNum*LoNum;
for(int p=0;p<PART;p++){
unroll_get_Fnorm<<<B_blocks/PART,F_threads/8,0,plan[device].stream>>>(plan[device].h_B,plan[device].B_normmap,M,K,p*partBlockOffset,plan[device].h_BR);
// if(p!=PART-1) cudaStreamSynchronize(plan[device].stream);
}
//计算某几行A范数和C结果
int C_blocks = M*N/(LoNum*LoNum),C_threads=LoNum*LoNum;
for(int p=0;p<PART;p++){
unroll_get_Fnorm<<<A_blocks/DEVICEDIM/PART,F_threads/8,0,plan[device].stream>>>(plan[device].h_A,plan[device].A_normmap,M,K,device*(T/LoNum/DEVICEDIM)+p*(partBlockOffset/DEVICEDIM),plan[device].h_AR);
cudaStreamSynchronize(plan[device].stream);
get_C_Threads1Element_Mul<<<C_blocks/DEVICEDIM/PART,C_threads,0,plan[device].stream>>>(plan[device].h_A,plan[device].A_normmap,plan[device].h_B,plan[device].B_normmap,plan[device].h_C,device*(T/LoNum/DEVICEDIM)+p*(partBlockOffset/DEVICEDIM));
}
}
//host同步
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
elapsed /= 1000.0f;
if(i!=0) sum += elapsed;
}
printf("time=%fs\n",sum/(TESTTIME-1));
cudaEventDestroy(start);
cudaEventDestroy(stop);
//检验结果
if(0) {
//整合最终C的结果
float* result_C;
cudaMallocManaged((void **)&result_C, sizeof(float)*T*T);
for(int i=0;i<T;i++){
for(int j=0;j<T;j++){
result_C[i*T+j]=plan[i/(T/DEVICEDIM)].h_C[i*T+j];
}
}
check_simple_matrix_mul(h_A,h_B,result_C);
// check_simple_gpu(h_A,h_B,result_C);
//取0号的normmap验证
float *h_Amap;
cudaMallocManaged((void **)&h_Amap, sizeof(float)*T*T/LoNum/LoNum);
const int ndim = T*T/LoNum/LoNum/DEVICEDIM;
for(int device=0;device<DEVICEDIM;device++){
for(int i=0;i<ndim;i++){
h_Amap[i+device*ndim] = plan[device].A_normmap[i+device*ndim];
}
}
countValid(h_Amap,plan[0].B_normmap);
// printf("A norm");
// checkNormMap(h_A,h_Amap);//测试范数
// printf("B norm");
// checkNormMap(h_B,h_Bmap);//测试范数
}
// printf("---NORM squrt A:---\n"); MATRIXSHOW21D(A_normmap,CBLMUN,CBLMUN);
// printf("---NORM squrt B:---\n"); MATRIXSHOW21D(B_normmap,CBLMUN,CBLMUN);
// printf("!!! NORM mul setting = %f!!!\n\n",Norm);
//end
// cudaFree(d_A);
// cudaFree(d_B);
// cudaFree(d_C);
}