Skip to content

Commit b295055

Browse files
committed
[ADD] Add experiments
1 parent bd9dea3 commit b295055

File tree

8 files changed

+1831
-0
lines changed

8 files changed

+1831
-0
lines changed

tests/icde_tmp/Makefile

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
# Author: Gabriele Mencagli
2+
# Date: 04/10/2023
3+
4+
FF_ROOT = $(HOME)/fastflow
5+
WF_INCLUDES = $(HOME)/WindFlow-3.6.0/wf
6+
7+
CXX = g++
8+
CXXFLAGS = -std=c++17
9+
INCLUDES = -I $(FF_ROOT) -I $(WF_INCLUDES) -I .
10+
MACRO = -DFF_BOUNDED_BUFFER -DDEFAULT_BUFFER_CAPACITY=32786
11+
OPTFLAGS = -g -O3
12+
LDFLAGS = -pthread
13+
14+
NVXX = /usr/local/cuda/bin/nvcc
15+
NVXXFLAGS = -std=c++17 -x cu
16+
NVOPTFLAGS = -w --expt-extended-lambda -O3 -g -gencode arch=compute_80,code=sm_80 -Wno-deprecated-gpu-targets --expt-relaxed-constexpr
17+
18+
all: test_synth_gpu test_synth_gpu_keyed test_synth_gpu_delayed test_saber test_saber_v2
19+
20+
test_synth_gpu.o: test_synth_gpu.cpp
21+
$(NVXX) $(NVXXFLAGS) $(NVOPTFLAGS) $(INCLUDES) $(MACRO) $(OPTFLAGS) $< -c
22+
23+
test_synth_gpu: test_synth_gpu.o
24+
$(NVXX) test_synth_gpu.o -o test_synth_gpu
25+
26+
test_synth_gpu_keyed.o: test_synth_gpu_keyed.cpp
27+
$(NVXX) $(NVXXFLAGS) $(NVOPTFLAGS) $(INCLUDES) $(MACRO) $(OPTFLAGS) $< -c
28+
29+
test_synth_gpu_keyed: test_synth_gpu_keyed.o
30+
$(NVXX) test_synth_gpu_keyed.o -o test_synth_gpu_keyed
31+
32+
test_synth_gpu_delayed.o: test_synth_gpu_delayed.cpp
33+
$(NVXX) $(NVXXFLAGS) $(NVOPTFLAGS) $(INCLUDES) $(MACRO) $(OPTFLAGS) $< -c
34+
35+
test_synth_gpu_delayed: test_synth_gpu_delayed.o
36+
$(NVXX) test_synth_gpu_delayed.o -o test_synth_gpu_delayed
37+
38+
test_saber.o: test_saber.cpp
39+
$(NVXX) $(NVXXFLAGS) $(NVOPTFLAGS) $(INCLUDES) $(MACRO) $(OPTFLAGS) $< -c
40+
41+
test_saber: test_saber.o
42+
$(NVXX) test_saber.o -o test_saber
43+
44+
test_saber_v2.o: test_saber_v2.cpp
45+
$(NVXX) $(NVXXFLAGS) $(NVOPTFLAGS) $(INCLUDES) $(MACRO) $(OPTFLAGS) $< -c
46+
47+
test_saber_v2: test_saber_v2.o
48+
$(NVXX) test_saber_v2.o -o test_saber_v2
49+
50+
clean:
51+
rm -f test_synth_gpu
52+
rm -f test_synth_gpu_keyed
53+
rm -f test_synth_gpu_delayed
54+
rm -f test_saber
55+
rm -f test_saber_v2
56+
rm -f *.o
57+
58+
.DEFAULT_GOAL := all
59+
.PHONY: all clean

tests/icde_tmp/aggregates.hpp

Lines changed: 311 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,311 @@
1+
/*******************************************************************************
2+
* This program is free software; you can redistribute it and/or modify it
3+
* under the terms of the GNU Lesser General Public License version 3 as
4+
* published by the Free Software Foundation.
5+
*
6+
* This program is distributed in the hope that it will be useful, but WITHOUT
7+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
9+
* License for more details.
10+
*
11+
* You should have received a copy of the GNU Lesser General Public License
12+
* along with this program; if not, write to the Free Software Foundation,
13+
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
14+
******************************************************************************
15+
*/
16+
17+
/*
18+
* Functors (lift and combine) of the different associative and commutative
19+
* aggregation functions.
20+
*/
21+
22+
// includes
23+
#include<cmath>
24+
#include<string>
25+
#include<fstream>
26+
27+
using namespace std;
28+
29+
// lift functor of the SUM aggregate
30+
class Lift_SUM_GPU
31+
{
32+
public:
33+
// operator()
34+
__host__ __device__ void operator()(const input_t &tuple, output_v1_t &result)
35+
{
36+
result.key = tuple.key;
37+
result._1 = tuple._1;
38+
}
39+
};
40+
41+
// combine functor of the SUM aggregate
42+
class Combine_SUM_GPU
43+
{
44+
public:
45+
// operator()
46+
__host__ __device__ void operator()(const output_v1_t &input1, const output_v1_t &input2, output_v1_t &result)
47+
{
48+
result.key = input1.key;
49+
result._1 = input1._1 + input2._1;
50+
}
51+
};
52+
53+
// lift functor of the COUNT aggregate
54+
class Lift_COUNT_GPU
55+
{
56+
public:
57+
// operator()
58+
__host__ __device__ void operator()(const input_t &tuple, output_v1_t &result)
59+
{
60+
result.key = tuple.key;
61+
result._1 = 1;
62+
}
63+
};
64+
65+
// combine functor of the COUNT aggregate
66+
class Combine_COUNT_GPU
67+
{
68+
public:
69+
// operator()
70+
__host__ __device__ void operator()(const output_v1_t &input1, const output_v1_t &input2, output_v1_t &result)
71+
{
72+
result.key = input1.key;
73+
result._1 = input1._1 + input2._1;
74+
}
75+
};
76+
77+
// lift functor of the MAX aggregate
78+
class Lift_MAX_GPU
79+
{
80+
public:
81+
// operator()
82+
__host__ __device__ void operator()(const input_t &tuple, output_v1_t &result)
83+
{
84+
result.key = tuple.key;
85+
result._1 = tuple._1;
86+
}
87+
};
88+
89+
// combine functor of the MAX aggregate
90+
class Combine_MAX_GPU
91+
{
92+
public:
93+
// operator()
94+
__host__ __device__ void operator()(const output_v1_t &input1, const output_v1_t &input2, output_v1_t &result)
95+
{
96+
result.key = input1.key;
97+
result._1 = (input1._1 > input2._1) ? input1._1 : input2._1;
98+
}
99+
};
100+
101+
// lift functor of the MAX_COUNT aggregate
102+
class Lift_MAX_COUNT_GPU
103+
{
104+
public:
105+
// operator()
106+
__host__ __device__ void operator()(const input_t &tuple, output_v2_t &result)
107+
{
108+
result.key = tuple.key;
109+
result._1 = 1;
110+
result._2 = tuple._2;
111+
}
112+
};
113+
114+
// combine functor of the MAX_COUNT aggregate
115+
class Combine_MAX_COUNT_GPU
116+
{
117+
public:
118+
// operator()
119+
__host__ __device__ void operator()(const output_v2_t &input1, const output_v2_t &input2, output_v2_t &result)
120+
{
121+
result.key = input1.key;
122+
if (input1._2 > input2._2) {
123+
result._2 = input1._2;
124+
result._1 = input1._1;
125+
}
126+
else if (input1._2 < input2._2) {
127+
result._2 = input2._2;
128+
result._1 = input2._1;
129+
}
130+
else {
131+
result._2 = input2._2;
132+
result._1 = input1._1 + input2._1;
133+
}
134+
}
135+
};
136+
137+
// lift functor of the MIN aggregate
138+
class Lift_MIN_GPU
139+
{
140+
public:
141+
// operator()
142+
__host__ __device__ void operator()(const input_t &tuple, output_v1_t &result)
143+
{
144+
result.key = tuple.key;
145+
result._1 = tuple._1;
146+
}
147+
};
148+
149+
// combine functor of the MIN aggregate
150+
class Combine_MIN_GPU
151+
{
152+
public:
153+
// operator()
154+
__host__ __device__ void operator()(const output_v1_t &input1, const output_v1_t &input2, output_v1_t &result)
155+
{
156+
result.key = input1.key;
157+
result._1 = (input1._1 < input2._1) ? input1._1 : input2._1;
158+
}
159+
};
160+
161+
// lift functor of the MIN_COUNT aggregate
162+
class Lift_MIN_COUNT_GPU
163+
{
164+
public:
165+
// operator()
166+
__host__ __device__ void operator()(const input_t &tuple, output_v2_t &result)
167+
{
168+
result.key = tuple.key;
169+
result._1 = 1;
170+
result._2 = tuple._2;
171+
}
172+
};
173+
174+
// combine functor of the MIB_COUNT aggregate
175+
class Combine_MIN_COUNT_GPU
176+
{
177+
public:
178+
// operator()
179+
__host__ __device__ void operator()(const output_v2_t &input1, const output_v2_t &input2, output_v2_t &result)
180+
{
181+
result.key = input1.key;
182+
if (input1._2 < input2._2) {
183+
result._2 = input1._2;
184+
result._1 = input1._1;
185+
}
186+
else if (input1._2 > input2._2) {
187+
result._2 = input2._2;
188+
result._1 = input2._1;
189+
}
190+
else {
191+
result._2 = input2._2;
192+
result._1 = input1._1 + input2._1;
193+
}
194+
}
195+
};
196+
197+
// lift functor of the AVG aggregate
198+
class Lift_AVG_GPU
199+
{
200+
public:
201+
// operator()
202+
__host__ __device__ void operator()(const input_t &tuple, output_v2_t &result)
203+
{
204+
result.key = tuple.key;
205+
result._1 = 1;
206+
result._2 = tuple._2;
207+
}
208+
};
209+
210+
// combine functor of the AVG aggregate
211+
class Combine_AVG_GPU
212+
{
213+
public:
214+
// operator()
215+
__host__ __device__ void operator()(const output_v2_t &input1, const output_v2_t &input2, output_v2_t &result)
216+
{
217+
result.key = input1.key;
218+
float alpha1 = (((float) input1._1) / (input1._1 + input2._1));
219+
float alpha2 = (((float) input2._1) / (input1._1 + input2._1));
220+
result._2 = alpha1 * input1._2 + alpha2 * input2._2;
221+
result._1 + input1._1 + input2._1;
222+
}
223+
};
224+
225+
// lift functor of the GEOM aggregate
226+
class Lift_GEOM_GPU
227+
{
228+
public:
229+
// operator()
230+
__host__ __device__ void operator()(const input_t &tuple, output_v2_t &result)
231+
{
232+
result.key = tuple.key;
233+
result._1 = 1;
234+
result._2 = tuple._2;
235+
}
236+
};
237+
238+
// combine functor of the GEOM aggregate
239+
class Combine_GEOM_GPU
240+
{
241+
public:
242+
// operator()
243+
__host__ __device__ void operator()(const output_v2_t &input1, const output_v2_t &input2, output_v2_t &result)
244+
{
245+
result.key = input1.key;
246+
float r1 = pow(input1._2, input1._1);
247+
float r2 = pow(input2._2, input2._1);
248+
result._1 = input1._1 + input2._1;
249+
result._2 = pow((r1 * r2), result._1);
250+
}
251+
};
252+
253+
// lift functor of the SSTD aggregate
254+
class Lift_SSTD_GPU
255+
{
256+
public:
257+
// operator()
258+
__host__ __device__ void operator()(const input_t &tuple, output_v3_t &result)
259+
{
260+
result.key = tuple.key;
261+
result._1 = 1;
262+
result._2 = tuple._2;
263+
result._3 = pow(tuple._2, 2);
264+
result._4 = sqrt((1.0/((float) result._1)) * (result._3 - pow(result._2, 2)/result._1));
265+
}
266+
};
267+
268+
// combine functor of the SSTD aggregate
269+
class Combine_SSTD_GPU
270+
{
271+
public:
272+
// operator()
273+
__host__ __device__ void operator()(const output_v3_t &input1, const output_v3_t &input2, output_v3_t &result)
274+
{
275+
result.key = input1.key;
276+
result._1 = input1._1 + input2._1;
277+
result._2 = input1._2 + input2._2;
278+
result._3 = input1._3 + input2._3;
279+
result._4 = sqrt((1.0/((float) (result._1 - 1))) * (result._3 - pow(result._2, 2)/result._1));
280+
}
281+
};
282+
283+
// lift functor of the PSTD aggregate
284+
class Lift_PSTD_GPU
285+
{
286+
public:
287+
// operator()
288+
__host__ __device__ void operator()(const input_t &tuple, output_v3_t &result)
289+
{
290+
result.key = tuple.key;
291+
result._1 = 1;
292+
result._2 = tuple._2;
293+
result._3 = pow(tuple._2, 2);
294+
result._4 = sqrt((1.0/((float) result._1)) * (result._3 - pow(result._2, 2)/result._1));
295+
}
296+
};
297+
298+
// combine functor of the PSTD aggregate
299+
class Combine_PSTD_GPU
300+
{
301+
public:
302+
// operator()
303+
__host__ __device__ void operator()(const output_v3_t &input1, const output_v3_t &input2, output_v3_t &result)
304+
{
305+
result.key = input1.key;
306+
result._1 = input1._1 + input2._1;
307+
result._2 = input1._2 + input2._2;
308+
result._3 = input1._3 + input2._3;
309+
result._4 = sqrt((1/((float) result._1)) * (result._3 - pow(result._2, 2)/result._1));
310+
}
311+
};

0 commit comments

Comments
 (0)