-
Notifications
You must be signed in to change notification settings - Fork 315
Expand file tree
/
Copy pathutils.cpp
More file actions
347 lines (312 loc) · 14.4 KB
/
utils.cpp
File metadata and controls
347 lines (312 loc) · 14.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
#include <algorithm>
#include "pbs/programmable_bootstrap.h"
#include "pbs/programmable_bootstrap_multibit.h"
#include "pbs/programmable_bootstrap_testing.h"
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <device.h>
#include <functional>
#include <random>
#include <utils.h>
#include "checked_arithmetic.h"
void init_seed(Seed *seed) {
seed->lo = 0;
seed->hi = 0;
}
void shuffle_seed(Seed *seed) {
// std::random_device rd;
// std::mt19937 gen(rd());
// std::uniform_int_distribution<unsigned long long> dis(
// std::numeric_limits<std::uint64_t>::min(),
// std::numeric_limits<std::uint64_t>::max());
//
// seed.lo += dis(gen);
// seed.hi += dis(gen);
// This is a more convenient solution for testing
seed->lo += 1;
seed->hi += 1;
}
// For each sample and repetition, create a plaintext
// The payload_modulus is the message modulus times the carry modulus
// (so the total message modulus)
uint64_t *generate_plaintexts(uint64_t payload_modulus, uint64_t delta,
int number_of_inputs, const unsigned repetitions,
const unsigned samples) {
uint64_t *plaintext_array = (uint64_t *)malloc(safe_mul_sizeof<uint64_t>(
repetitions, samples, number_of_inputs));
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<unsigned long long> dis(
std::numeric_limits<std::uint64_t>::min(),
std::numeric_limits<std::uint64_t>::max());
for (uint r = 0; r < repetitions; r++) {
for (uint s = 0; s < samples; s++) {
for (int i = 0; i < number_of_inputs; i++) {
plaintext_array[r * samples * number_of_inputs + s * number_of_inputs +
i] = (dis(gen) % payload_modulus) * delta;
}
}
}
return plaintext_array;
}
uint64_t *generate_identity_lut_pbs(int polynomial_size, int glwe_dimension,
int message_modulus, int carry_modulus,
std::function<uint64_t(uint64_t)> func) {
// Modulus of the msg contained in the msg bits and operations buffer
uint64_t modulus_sup = message_modulus * carry_modulus;
// N/p = size of each box
uint64_t box_size = polynomial_size / modulus_sup;
// Value of the shift we multiply our messages by
uint64_t delta = ((uint64_t)1 << 63) / (uint64_t)(modulus_sup);
// Create the plaintext lut_pbs
uint64_t *plaintext_lut_pbs =
(uint64_t *)malloc(safe_mul_sizeof<uint64_t>(polynomial_size));
// This plaintext_lut_pbs extracts the carry bits
for (uint64_t i = 0; i < modulus_sup; i++) {
uint64_t index = i * box_size;
for (uint64_t j = index; j < index + box_size; j++) {
plaintext_lut_pbs[j] = func(i) * delta;
}
}
uint64_t half_box_size = box_size / 2;
// Negate the first half_box_size coefficients
for (uint64_t i = 0; i < half_box_size; i++) {
plaintext_lut_pbs[i] = -plaintext_lut_pbs[i];
}
// Rotate the plaintext_lut_pbs
std::rotate(plaintext_lut_pbs, plaintext_lut_pbs + half_box_size,
plaintext_lut_pbs + polynomial_size);
// Create the GLWE lut_pbs
uint64_t *lut_pbs = (uint64_t *)malloc(
safe_mul_sizeof<uint64_t>(polynomial_size, glwe_dimension + 1));
for (int i = 0; i < polynomial_size * glwe_dimension; i++) {
lut_pbs[i] = 0;
}
for (int i = 0; i < polynomial_size; i++) {
int glwe_index = glwe_dimension * polynomial_size + i;
lut_pbs[glwe_index] = plaintext_lut_pbs[i];
}
free(plaintext_lut_pbs);
return lut_pbs;
}
// Generate repetitions LWE secret keys
void generate_lwe_secret_keys(uint64_t **lwe_sk_array, int lwe_dimension,
Seed *seed, const unsigned repetitions) {
*lwe_sk_array =
(uint64_t *)malloc(safe_mul_sizeof<uint64_t>(lwe_dimension, repetitions));
int shift = 0;
for (uint r = 0; r < repetitions; r++) {
// Generate the lwe secret key for each repetition
core_crypto_lwe_secret_key(*lwe_sk_array + (ptrdiff_t)(shift),
lwe_dimension, seed->lo, seed->hi);
shift += lwe_dimension;
}
}
// Generate repetitions GLWE secret keys
void generate_glwe_secret_keys(uint64_t **glwe_sk_array, int glwe_dimension,
int polynomial_size, Seed *seed,
const unsigned repetitions) {
size_t glwe_sk_array_size = safe_mul(glwe_dimension, polynomial_size, repetitions);
*glwe_sk_array = (uint64_t *)malloc(safe_mul_sizeof<uint64_t>(glwe_sk_array_size));
int shift = 0;
for (uint r = 0; r < repetitions; r++) {
// Generate the lwe secret key for each repetition
core_crypto_lwe_secret_key(*glwe_sk_array + (ptrdiff_t)(shift),
glwe_dimension * polynomial_size, seed->lo,
seed->hi);
shift += glwe_dimension * polynomial_size;
}
}
// Generate repetitions LWE bootstrap keys
void generate_lwe_programmable_bootstrap_keys(cudaStream_t stream, uint32_t gpu_index,
double **d_fourier_bsk_array,
uint64_t *lwe_sk_in_array,
uint64_t *lwe_sk_out_array, int lwe_dimension,
int glwe_dimension, int polynomial_size,
int pbs_level, int pbs_base_log, Seed *seed,
DynamicDistribution noise_distribution,
const unsigned repetitions) {
size_t bsk_size = safe_mul(
safe_mul(glwe_dimension + 1, glwe_dimension + 1, pbs_level,
polynomial_size),
(size_t)(lwe_dimension + 1));
size_t bsk_array_size = safe_mul(bsk_size, repetitions);
uint64_t *bsk_array = (uint64_t *)malloc(safe_mul_sizeof<uint64_t>(bsk_array_size));
*d_fourier_bsk_array =
(double *)cuda_malloc_async(safe_mul_sizeof<double>(bsk_array_size), stream, gpu_index);
int shift_in = 0;
int shift_out = 0;
int shift_bsk = 0;
for (uint r = 0; r < repetitions; r++) {
// Generate the bootstrap key for each repetition
core_crypto_par_generate_lwe_bootstrapping_key(
bsk_array + (ptrdiff_t)(shift_bsk), pbs_base_log, pbs_level,
lwe_sk_in_array + (ptrdiff_t)(shift_in), lwe_dimension,
lwe_sk_out_array + (ptrdiff_t)(shift_out), glwe_dimension,
polynomial_size, noise_distribution, seed->lo, seed->hi);
double *d_fourier_bsk = *d_fourier_bsk_array + (ptrdiff_t)(shift_bsk);
uint64_t *bsk = bsk_array + (ptrdiff_t)(shift_bsk);
cuda_synchronize_stream(stream, gpu_index);
cuda_convert_lwe_programmable_bootstrap_key_64_async(stream, gpu_index, (void *)(d_fourier_bsk), (void *)(bsk),
lwe_dimension, glwe_dimension,
pbs_level, polynomial_size);
shift_in += lwe_dimension;
shift_out += glwe_dimension * polynomial_size;
shift_bsk += bsk_size;
}
cuda_synchronize_stream(stream, gpu_index);
free(bsk_array);
}
//Force the vanilla layout of the bsk for the classical TBC.
void generate_lwe_programmable_bootstrap_keys_standard(
cudaStream_t stream, uint32_t gpu_index, double **d_fourier_bsk_array,
uint64_t *lwe_sk_in_array, uint64_t *lwe_sk_out_array, int lwe_dimension,
int glwe_dimension, int polynomial_size, int pbs_level, int pbs_base_log,
Seed *seed, DynamicDistribution noise_distribution,
const unsigned repetitions) {
size_t bsk_size = safe_mul(
safe_mul(glwe_dimension + 1, glwe_dimension + 1, pbs_level,
polynomial_size),
(size_t)(lwe_dimension + 1));
size_t bsk_array_size = safe_mul(bsk_size, repetitions);
uint64_t *bsk_array =
(uint64_t *)malloc(safe_mul_sizeof<uint64_t>(bsk_array_size));
*d_fourier_bsk_array = (double *)cuda_malloc_async(
safe_mul_sizeof<double>(bsk_array_size), stream, gpu_index);
int shift_in = 0;
int shift_out = 0;
int shift_bsk = 0;
for (uint r = 0; r < repetitions; r++) {
core_crypto_par_generate_lwe_bootstrapping_key(
bsk_array + (ptrdiff_t)(shift_bsk), pbs_base_log, pbs_level,
lwe_sk_in_array + (ptrdiff_t)(shift_in), lwe_dimension,
lwe_sk_out_array + (ptrdiff_t)(shift_out), glwe_dimension,
polynomial_size, noise_distribution, seed->lo, seed->hi);
double *d_fourier_bsk = *d_fourier_bsk_array + (ptrdiff_t)(shift_bsk);
uint64_t *bsk = bsk_array + (ptrdiff_t)(shift_bsk);
cuda_synchronize_stream(stream, gpu_index);
cuda_convert_lwe_programmable_bootstrap_key_standard_64_async(
stream, gpu_index, (void *)(d_fourier_bsk), (void *)(bsk),
lwe_dimension, glwe_dimension, pbs_level, polynomial_size);
shift_in += lwe_dimension;
shift_out += glwe_dimension * polynomial_size;
shift_bsk += bsk_size;
}
cuda_synchronize_stream(stream, gpu_index);
free(bsk_array);
}
// Use the bsk layout required by the specialized 2_2 classical PBS kernel.
void generate_lwe_programmable_bootstrap_keys_specialized_2_2(
cudaStream_t stream, uint32_t gpu_index, double **d_fourier_bsk_array,
uint64_t *lwe_sk_in_array, uint64_t *lwe_sk_out_array, int lwe_dimension,
int glwe_dimension, int polynomial_size, int pbs_level, int pbs_base_log,
Seed *seed, DynamicDistribution noise_distribution,
const unsigned repetitions) {
size_t bsk_size = safe_mul(
safe_mul(glwe_dimension + 1, glwe_dimension + 1, pbs_level,
polynomial_size),
(size_t)(lwe_dimension + 1));
size_t bsk_array_size = safe_mul(bsk_size, repetitions);
uint64_t *bsk_array =
(uint64_t *)malloc(safe_mul_sizeof<uint64_t>(bsk_array_size));
*d_fourier_bsk_array = (double *)cuda_malloc_async(
safe_mul_sizeof<double>(bsk_array_size), stream, gpu_index);
int shift_in = 0;
int shift_out = 0;
int shift_bsk = 0;
for (uint r = 0; r < repetitions; r++) {
core_crypto_par_generate_lwe_bootstrapping_key(
bsk_array + (ptrdiff_t)(shift_bsk), pbs_base_log, pbs_level,
lwe_sk_in_array + (ptrdiff_t)(shift_in), lwe_dimension,
lwe_sk_out_array + (ptrdiff_t)(shift_out), glwe_dimension,
polynomial_size, noise_distribution, seed->lo, seed->hi);
double *d_fourier_bsk = *d_fourier_bsk_array + (ptrdiff_t)(shift_bsk);
uint64_t *bsk = bsk_array + (ptrdiff_t)(shift_bsk);
cuda_synchronize_stream(stream, gpu_index);
// Use forced-specialized BSK conversion (CC bypass) so the BSK layout
// matches what the specialized kernel (device_programmable_bootstrap_
// specialized_2_2_params) expects.
cuda_convert_lwe_programmable_bootstrap_key_specialized_2_2_64_async(
stream, gpu_index, (void *)(d_fourier_bsk), (void *)(bsk),
lwe_dimension, glwe_dimension, pbs_level, polynomial_size);
shift_in += lwe_dimension;
shift_out += glwe_dimension * polynomial_size;
shift_bsk += bsk_size;
}
cuda_synchronize_stream(stream, gpu_index);
free(bsk_array);
}
void generate_lwe_multi_bit_programmable_bootstrap_keys(
cudaStream_t stream, uint32_t gpu_index, uint64_t **d_bsk_array, uint64_t *lwe_sk_in_array,
uint64_t *lwe_sk_out_array, int lwe_dimension, int glwe_dimension,
int polynomial_size, int grouping_factor, int pbs_level, int pbs_base_log,
DynamicDistribution noise_distribution,
const unsigned repetitions) {
// Multiply all factors first, then divide by grouping_factor at the end
// to preserve integer division semantics (the full product is always
// divisible by grouping_factor, but partial sub-products may not be)
size_t bsk_size =
safe_mul(
safe_mul((size_t)lwe_dimension, (size_t)pbs_level,
(size_t)(glwe_dimension + 1),
(size_t)(glwe_dimension + 1)),
safe_mul((size_t)polynomial_size,
(size_t)(1 << grouping_factor))) /
grouping_factor;
size_t bsk_array_size = safe_mul(bsk_size, repetitions);
uint64_t *bsk_array = (uint64_t *)malloc(safe_mul_sizeof<uint64_t>(bsk_array_size));
*d_bsk_array =
(uint64_t *)cuda_malloc_async(safe_mul_sizeof<uint64_t>(bsk_array_size), stream, gpu_index);
for (uint r = 0; r < repetitions; r++) {
int shift_in = 0;
int shift_out = 0;
int shift_bsk = 0;
core_crypto_par_generate_lwe_multi_bit_bootstrapping_key(
lwe_sk_in_array + (ptrdiff_t)(shift_in), lwe_dimension,
lwe_sk_out_array + (ptrdiff_t)(shift_out), glwe_dimension,
polynomial_size, bsk_array + (ptrdiff_t)(shift_bsk), pbs_base_log,
pbs_level, grouping_factor, noise_distribution, 0, 0);
uint64_t *d_bsk = *d_bsk_array + (ptrdiff_t)(shift_bsk);
uint64_t *bsk = bsk_array + (ptrdiff_t)(shift_bsk);
cuda_convert_lwe_multi_bit_programmable_bootstrap_key_64_async(
stream, gpu_index, d_bsk, bsk, lwe_dimension, glwe_dimension, pbs_level,
polynomial_size, grouping_factor);
shift_in += lwe_dimension;
shift_out += glwe_dimension * polynomial_size;
shift_bsk += bsk_size;
}
cuda_synchronize_stream(stream, gpu_index);
free(bsk_array);
}
// Generate repetitions keyswitch keys
void generate_lwe_keyswitch_keys(
cudaStream_t stream, uint32_t gpu_index, uint64_t **d_ksk_array, uint64_t *lwe_sk_in_array,
uint64_t *lwe_sk_out_array, int input_lwe_dimension,
int output_lwe_dimension, int ksk_level, int ksk_base_log, Seed *seed,
DynamicDistribution noise_distribution, const unsigned repetitions) {
size_t ksk_size = safe_mul(ksk_level, output_lwe_dimension + 1, input_lwe_dimension);
size_t ksk_array_size = safe_mul(ksk_size, repetitions);
uint64_t *ksk_array = (uint64_t *)malloc(safe_mul_sizeof<uint64_t>(ksk_array_size));
*d_ksk_array =
(uint64_t *)cuda_malloc_async(safe_mul_sizeof<uint64_t>(ksk_array_size), stream, gpu_index);
int shift_in = 0;
int shift_out = 0;
int shift_ksk = 0;
for (uint r = 0; r < repetitions; r++) {
// Generate the keyswitch key for each repetition
core_crypto_par_generate_lwe_keyswitch_key(
ksk_array + (ptrdiff_t)(shift_ksk), ksk_base_log, ksk_level,
lwe_sk_in_array + (ptrdiff_t)(shift_in), input_lwe_dimension,
lwe_sk_out_array + (ptrdiff_t)(shift_out), output_lwe_dimension,
noise_distribution, seed->lo, seed->hi);
uint64_t *d_ksk = *d_ksk_array + (ptrdiff_t)(shift_ksk);
uint64_t *ksk = ksk_array + (ptrdiff_t)(shift_ksk);
cuda_memcpy_async_to_gpu(d_ksk, ksk, safe_mul_sizeof<uint64_t>(ksk_size), stream, gpu_index);
shift_in += input_lwe_dimension;
shift_out += output_lwe_dimension;
shift_ksk += ksk_size;
}
cuda_synchronize_stream(stream, gpu_index);
free(ksk_array);
}